Commit ed4aeef7 authored by Alex Leontiev's avatar Alex Leontiev

coding style

parent b6b81962
......@@ -43,8 +43,8 @@
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "time.h"
#include <algorithm>
#include <limits.h>
#include<algorithm>
#include<limits.h>
#include "tld_tracker.hpp"
#include "opencv2/highgui.hpp"
......@@ -57,6 +57,7 @@
#define SCALE_STEP 1.2
#define ENSEMBLE_THRESHOLD 0.5
#define VARIANCE_THRESHOLD 0.5
#define NEXPERT_THRESHOLD 0.2
#define GRIDSIZE 15
#define DOWNSCALE_MODE INTER_LINEAR
#define BLUR_AS_VADIM
......@@ -73,11 +74,11 @@ using namespace tld;
* FIXME(issues)
* THETA_NN 0.5<->0.6 dramatic change vs video 6 !!
* TODO(features)
* benchmark: two streams of photos --> better video
* benchmark: two streams of photos -->better video
* (try inter_area for resize)
* TODO:
* fix pushbot ->pick commits -> compare_branches->all in 1->resubmit
* ||video(0.5<->0.6) --> debug if box size is less than 20
* fix pushbot->pick commits->compare_branches->all in 1->resubmit
* || video(0.5<->0.6) -->debug if box size is less than 20
* perfect PN
*
* vadim:
......@@ -86,8 +87,19 @@ using namespace tld;
* 6. comment logical sections
* 11. group decls logically, order of statements
*
*
* ?10. all in one class
* todo: initializer lists
*
* format: if,while,for, , Assert
* format: methods
* --> compile
* ; ,
* --> compile
* -->commit-push -->notify
*
* ?( )
*
* ?vadim: for{1command} can omit {}; if( a != (b + c) ) vs ( a != ( b + c ) )
*/
/* design decisions:
......@@ -103,7 +115,7 @@ public:
static void onMouse( int event, int x, int y, int, void* obj){
((MyMouseCallbackDEBUG*)obj)->onMouse(event,x,y);
}
MyMouseCallbackDEBUG& operator= (const MyMouseCallbackDEBUG& /*other*/){return *this;}
MyMouseCallbackDEBUG& operator = (const MyMouseCallbackDEBUG& /*other*/){return *this;}
private:
void onMouse( int event, int x, int y);
Mat& img_,imgBlurred_;
......@@ -118,7 +130,7 @@ public:
bool confident;
bool failedLastTime;
int frameNum;
void printme(FILE* port=stdout);
void printme(FILE* port = stdout);
private:
double scale;
Size minSize;
......@@ -126,9 +138,9 @@ private:
class TLDDetector {
public:
TLDDetector(const TrackerTLD::Params& params,Ptr<TrackerModel>model_in):model(model_in),params_(params){}
TLDDetector(const TrackerTLD::Params& params,Ptr<TrackerModel> model_in):model(model_in),params_(params){}
~TLDDetector(){}
static void generateScanGrid(int rows,int cols,Size initBox,std::vector<Rect2d>& res,bool withScaling=false);
static void generateScanGrid(int rows,int cols,Size initBox,std::vector<Rect2d>& res,bool withScaling = false);
struct LabeledPatch{
Rect2d rect;
bool isObject, shouldBeIntegrated;
......@@ -143,12 +155,12 @@ protected:
};
template <class T,class Tparams>
template<class T, class Tparams>
class TrackerProxyImpl : public TrackerProxy{
public:
TrackerProxyImpl(Tparams params=Tparams()):params_(params){}
TrackerProxyImpl(Tparams params = Tparams()):params_(params){}
bool init( const Mat& image, const Rect2d& boundingBox ){
trackerPtr=T::createTracker();
trackerPtr = T::createTracker();
return trackerPtr->init(image,boundingBox);
}
bool update( const Mat& image,Rect2d& boundingBox){
......@@ -164,20 +176,20 @@ class TrackerTLDModel : public TrackerModel{
public:
TrackerTLDModel(TrackerTLD::Params params,const Mat& image, const Rect2d& boundingBox,Size minSize);
Rect2d getBoundingBox(){return boundingBox_;}
void setBoudingBox(Rect2d boundingBox){boundingBox_=boundingBox;}
void setBoudingBox(Rect2d boundingBox){boundingBox_ = boundingBox;}
double getOriginalVariance(){return originalVariance_;}
inline double ensembleClassifierNum(const uchar* data);
inline void prepareClassifiers(int rowstep){for(int i=0;i<(int)classifiers.size();i++)classifiers[i].prepareClassifier(rowstep);}
inline void prepareClassifiers(int rowstep){for(int i = 0;i < (int)classifiers.size();i++)classifiers[i].prepareClassifier(rowstep);}
double Sr(const Mat_<uchar>& patch);
double Sc(const Mat_<uchar>& patch);
void integrateRelabeled(Mat& img,Mat& imgBlurred,const std::vector<TLDDetector::LabeledPatch>& patches);
void integrateAdditional(const std::vector<Mat_<uchar> >& eForModel,const std::vector<Mat_<uchar> >& eForEnsemble,bool isPositive);
Size getMinSize(){return minSize_;}
void printme(FILE* port=stdout);
void printme(FILE* port = stdout);
protected:
Size minSize_;
unsigned int timeStampPositiveNext,timeStampNegativeNext;
int timeStampPositiveNext,timeStampNegativeNext;
TrackerTLD::Params params_;
void pushIntoModel(const Mat_<uchar>& example,bool positive);
void modelEstimationImpl( const std::vector<Mat>& /*responses*/ ){}
......@@ -185,7 +197,7 @@ class TrackerTLDModel : public TrackerModel{
Rect2d boundingBox_;
double originalVariance_;
std::vector<Mat_<uchar> > positiveExamples,negativeExamples;
std::vector<unsigned int> timeStampsPositive,timeStampsNegative;
std::vector<int> timeStampsPositive,timeStampsNegative;
RNG rng;
std::vector<TLDEnsembleClassifier> classifiers;
};
......@@ -217,7 +229,7 @@ class TrackerTLDImpl : public TrackerTLD
class Nexpert : public Pexpert{
public:
Nexpert(const Mat& img,Rect2d& resultBox,const TLDDetector* detector,TrackerTLD::Params params){
img_=img; resultBox_=resultBox; detector_=detector; params_=params;}
img_ = img; resultBox_ = resultBox; detector_ = detector; params_ = params;}
bool operator()(Rect2d box);
int additionalExamples(std::vector<Mat_<uchar> >& examplesForModel,std::vector<Mat_<uchar> >& examplesForEnsemble){
examplesForModel.clear();examplesForEnsemble.clear();return 0;}
......@@ -246,11 +258,11 @@ Ptr<TrackerTLD> TrackerTLD::createTracker(const TrackerTLD::Params &parameters){
return Ptr<TrackerTLDImpl>(new TrackerTLDImpl(parameters));
}
TrackerTLDImpl::TrackerTLDImpl( const TrackerTLD::Params &parameters) :
TrackerTLDImpl::TrackerTLDImpl(const TrackerTLD::Params &parameters) :
params( parameters ){
isInit = false;
trackerProxy=Ptr<TrackerProxyImpl<TrackerMedianFlow,TrackerMedianFlow::Params> >(
new TrackerProxyImpl<TrackerMedianFlow,TrackerMedianFlow::Params>());
trackerProxy = Ptr<TrackerProxyImpl<TrackerMedianFlow,TrackerMedianFlow::Params> >
(new TrackerProxyImpl<TrackerMedianFlow,TrackerMedianFlow::Params>());
}
void TrackerTLDImpl::read( const cv::FileNode& fn )
......@@ -267,22 +279,23 @@ bool TrackerTLDImpl::initImpl(const Mat& image, const Rect2d& boundingBox ){
Mat image_gray;
trackerProxy->init(image,boundingBox);
cvtColor( image, image_gray, COLOR_BGR2GRAY );
data=Ptr<Data>(new Data(boundingBox));
double scale=data->getScale();
Rect2d myBoundingBox=boundingBox;
if(scale>1.0){
data = Ptr<Data>(new Data(boundingBox));
double scale = data->getScale();
Rect2d myBoundingBox = boundingBox;
if(scale > 1.0)
{
Mat image_proxy;
resize(image_gray,image_proxy,Size(cvRound(image.cols*scale),cvRound(image.rows*scale)),0,0,DOWNSCALE_MODE);
resize(image_gray,image_proxy,Size(cvRound(image.cols * scale),cvRound(image.rows * scale)),0,0,DOWNSCALE_MODE);
image_proxy.copyTo(image_gray);
myBoundingBox.x*=scale;
myBoundingBox.y*=scale;
myBoundingBox.width*=scale;
myBoundingBox.height*=scale;
myBoundingBox.x *= scale;
myBoundingBox.y *= scale;
myBoundingBox.width *= scale;
myBoundingBox.height *= scale;
}
model=Ptr<TrackerTLDModel>(new TrackerTLDModel(params,image_gray,myBoundingBox,data->getMinSize()));
detector=Ptr<TLDDetector>(new TLDDetector(params,model));
data->confident=false;
data->failedLastTime=false;
model = Ptr<TrackerTLDModel>(new TrackerTLDModel(params,image_gray,myBoundingBox,data->getMinSize()));
detector = Ptr<TLDDetector>(new TLDDetector(params,model));
data->confident = false;
data->failedLastTime = false;
return true;
}
......@@ -290,14 +303,14 @@ bool TrackerTLDImpl::initImpl(const Mat& image, const Rect2d& boundingBox ){
bool TrackerTLDImpl::updateImpl(const Mat& image, Rect2d& boundingBox){
Mat image_gray,image_blurred,imageForDetector;
cvtColor( image, image_gray, COLOR_BGR2GRAY );
double scale=data->getScale();
if(scale>1.0){
double scale = data->getScale();
if(scale > 1.0){
resize(image_gray,imageForDetector,Size(cvRound(image.cols*scale),cvRound(image.rows*scale)),0,0,DOWNSCALE_MODE);
}else{
imageForDetector=image_gray;
imageForDetector = image_gray;
}
GaussianBlur(imageForDetector,image_blurred,GaussBlurKernelSize,0.0);
TrackerTLDModel* tldModel=((TrackerTLDModel*)static_cast<TrackerModel*>(model));
TrackerTLDModel* tldModel = ((TrackerTLDModel*)static_cast<TrackerModel*>(model));
data->frameNum++;
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE,STANDARD_PATCH_SIZE);
std::vector<TLDDetector::LabeledPatch> detectorResults;
......@@ -305,57 +318,60 @@ bool TrackerTLDImpl::updateImpl(const Mat& image, Rect2d& boundingBox){
std::vector<Rect2d> candidates;
std::vector<double> candidatesRes;
bool trackerNeedsReInit=false;
for(int i=0;i<2;i++){
Rect2d tmpCandid=boundingBox;
if(((i==0)&&!(data->failedLastTime)&&trackerProxy->update(image,tmpCandid)) ||
((i==1)&&(detector->detect(imageForDetector,image_blurred,tmpCandid,detectorResults)))){
bool trackerNeedsReInit = false;
for( int i = 0; i < 2; i++ )
{
Rect2d tmpCandid = boundingBox;
if(((i == 0)&& !(data->failedLastTime)&& trackerProxy->update(image,tmpCandid)) ||
((i == 1)&& (detector->detect(imageForDetector,image_blurred,tmpCandid,detectorResults)))){
candidates.push_back(tmpCandid);
if(i==0){
if(i == 0){
resample(image_gray,tmpCandid,standardPatch);
}else{
resample(imageForDetector,tmpCandid,standardPatch);
}
candidatesRes.push_back(tldModel->Sc(standardPatch));
}else{
if(i==0){
trackerNeedsReInit=true;
if(i == 0){
trackerNeedsReInit = true;
}
}
}
std::vector<double>::iterator it=std::max_element(candidatesRes.begin(),candidatesRes.end());
std::vector<double>::iterator it = std::max_element(candidatesRes.begin(),candidatesRes.end());
dfprintf((stdout,"scale=%f\n",log(1.0*boundingBox.width/(data->getMinSize()).width)/log(SCALE_STEP)));
for(int i=0;i<(int)candidatesRes.size();i++){
dprintf(("\tcandidatesRes[%d]=%f\n",i,candidatesRes[i]));
dfprintf((stdout,"scale = %f\n",log(1.0 * boundingBox.width / (data->getMinSize()).width) / log(SCALE_STEP)));
for(int i = 0;i < (int)candidatesRes.size();i++){
dprintf(("\tcandidatesRes[%d] = %f\n",i,candidatesRes[i]));
}
data->printme();
tldModel->printme(stdout);
if(it==candidatesRes.end()){
data->confident=false;
data->failedLastTime=true;
if(it == candidatesRes.end()){
data->confident = false;
data->failedLastTime = true;
return false;
}else{
boundingBox=candidates[it-candidatesRes.begin()];
data->failedLastTime=false;
if(trackerNeedsReInit || it!=candidatesRes.begin()){
boundingBox = candidates[it - candidatesRes.begin()];
data->failedLastTime = false;
if(trackerNeedsReInit || it != candidatesRes.begin()){
trackerProxy->init(image,boundingBox);
}
}
if(!false && it!=candidatesRes.end()){
resample(imageForDetector,candidates[it-candidatesRes.begin()],standardPatch);
#if 1
if(it != candidatesRes.end()){
resample(imageForDetector,candidates[it - candidatesRes.begin()],standardPatch);
dfprintf((stderr,"%d %f %f\n",data->frameNum,tldModel->Sc(standardPatch),tldModel->Sr(standardPatch)));
if(candidatesRes.size()==2 && it==(candidatesRes.begin()+1))
if(candidatesRes.size() == 2 && it == (candidatesRes.begin() + 1))
dfprintf((stderr,"detector WON\n"));
}else{
dfprintf((stderr,"%d x x\n",data->frameNum));
}
#endif
if(*it > CORE_THRESHOLD){
data->confident=true;
data->confident = true;
}
if(data->confident){
......@@ -363,18 +379,18 @@ bool TrackerTLDImpl::updateImpl(const Mat& image, Rect2d& boundingBox){
Nexpert nExpert(imageForDetector,boundingBox,detector,params);
std::vector<Mat_<uchar> > examplesForModel,examplesForEnsemble;
examplesForModel.reserve(100);examplesForEnsemble.reserve(100);
int negRelabeled=0;
for(int i=0;i<(int)detectorResults.size();i++){
int negRelabeled = 0;
for(int i = 0;i < (int)detectorResults.size();i++){
bool expertResult;
if(detectorResults[i].isObject){
expertResult=nExpert(detectorResults[i].rect);
if(expertResult!=detectorResults[i].isObject){negRelabeled++;}
expertResult = nExpert(detectorResults[i].rect);
if(expertResult != detectorResults[i].isObject){negRelabeled++;}
}else{
expertResult=pExpert(detectorResults[i].rect);
expertResult = pExpert(detectorResults[i].rect);
}
detectorResults[i].shouldBeIntegrated=detectorResults[i].shouldBeIntegrated || (detectorResults[i].isObject!=expertResult);
detectorResults[i].isObject=expertResult;
detectorResults[i].shouldBeIntegrated = detectorResults[i].shouldBeIntegrated || (detectorResults[i].isObject != expertResult);
detectorResults[i].isObject = expertResult;
}
tldModel->integrateRelabeled(imageForDetector,image_blurred,detectorResults);
dprintf(("%d relabeled by nExpert\n",negRelabeled));
......@@ -394,35 +410,36 @@ bool TrackerTLDImpl::updateImpl(const Mat& image, Rect2d& boundingBox){
TrackerTLDModel::TrackerTLDModel(TrackerTLD::Params params,const Mat& image, const Rect2d& boundingBox,Size minSize):minSize_(minSize),
timeStampPositiveNext(0),timeStampNegativeNext(0),params_(params),boundingBox_(boundingBox){
originalVariance_=variance(image(boundingBox));
originalVariance_ = variance(image(boundingBox));
std::vector<Rect2d> closest,scanGrid;
Mat scaledImg,blurredImg,image_blurred;
double scale=scaleAndBlur(image,cvRound(log(1.0*boundingBox.width/(minSize.width))/log(SCALE_STEP)),scaledImg,blurredImg,GaussBlurKernelSize);
double scale = scaleAndBlur(image,cvRound(log(1.0 * boundingBox.width / (minSize.width)) / log(SCALE_STEP)),
scaledImg,blurredImg,GaussBlurKernelSize,SCALE_STEP);
GaussianBlur(image,image_blurred,GaussBlurKernelSize,0.0);
TLDDetector::generateScanGrid(image.rows,image.cols,minSize,scanGrid);
getClosestN(scanGrid,Rect2d(boundingBox.x/scale,boundingBox.y/scale,boundingBox.width/scale,boundingBox.height/scale),10,closest);
getClosestN(scanGrid,Rect2d(boundingBox.x / scale,boundingBox.y / scale,boundingBox.width / scale,boundingBox.height / scale),10,closest);
Mat_<uchar> blurredPatch(minSize);
TLDEnsembleClassifier::makeClassifiers(minSize,MEASURES_PER_CLASSIFIER,GRIDSIZE,classifiers);
positiveExamples.reserve(200);
for(int i=0;i<(int)closest.size();i++){
for(int j=0;j<20;j++){
for(int i = 0;i < (int)closest.size();i++){
for(int j = 0;j < 20;j++){
Point2f center;
Size2f size;
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE,STANDARD_PATCH_SIZE);
center.x=(float)(closest[i].x+closest[i].width*(0.5+rng.uniform(-0.01,0.01)));
center.y=(float)(closest[i].y+closest[i].height*(0.5+rng.uniform(-0.01,0.01)));
size.width=(float)(closest[i].width*rng.uniform((double)0.99,(double)1.01));
size.height=(float)(closest[i].height*rng.uniform((double)0.99,(double)1.01));
float angle=(float)rng.uniform(-10.0,10.0);
center.x = (float)(closest[i].x + closest[i].width * (0.5 + rng.uniform(-0.01,0.01)));
center.y = (float)(closest[i].y + closest[i].height * (0.5 + rng.uniform(-0.01,0.01)));
size.width = (float)(closest[i].width * rng.uniform((double)0.99,(double)1.01));
size.height = (float)(closest[i].height * rng.uniform((double)0.99,(double)1.01));
float angle = (float)rng.uniform(-10.0,10.0);
resample(scaledImg,RotatedRect(center,size,angle),standardPatch);
for(int y=0;y<standardPatch.rows;y++){
for(int x=0;x<standardPatch.cols;x++){
standardPatch(x,y)+=(uchar)rng.gaussian(5.0);
for(int y = 0;y < standardPatch.rows;y++){
for(int x = 0;x < standardPatch.cols;x++){
standardPatch(x,y) += (uchar)rng.gaussian(5.0);
}
}
......@@ -433,7 +450,7 @@ timeStampPositiveNext(0),timeStampNegativeNext(0),params_(params),boundingBox_(b
resample(blurredImg,RotatedRect(center,size,angle),blurredPatch);
#endif
pushIntoModel(standardPatch,true);
for(int k=0;k<(int)classifiers.size();k++){
for(int k = 0;k < (int)classifiers.size();k++){
classifiers[k].integrate(blurredPatch,true);
}
}
......@@ -444,15 +461,15 @@ timeStampPositiveNext(0),timeStampNegativeNext(0),params_(params),boundingBox_(b
negativeExamples.reserve(NEG_EXAMPLES_IN_INIT_MODEL);
std::vector<int> indices;
indices.reserve(NEG_EXAMPLES_IN_INIT_MODEL);
while(negativeExamples.size()<NEG_EXAMPLES_IN_INIT_MODEL){
int i=rng.uniform((int)0,(int)scanGrid.size());
if(std::find(indices.begin(),indices.end(),i)==indices.end() && overlap(boundingBox,scanGrid[i])<0.2){
while(negativeExamples.size() < NEG_EXAMPLES_IN_INIT_MODEL){
int i = rng.uniform((int)0,(int)scanGrid.size());
if(std::find(indices.begin(),indices.end(),i) == indices.end() && overlap(boundingBox,scanGrid[i]) < 0.2){
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE,STANDARD_PATCH_SIZE);
resample(image,scanGrid[i],standardPatch);
pushIntoModel(standardPatch,false);
resample(image_blurred,scanGrid[i],blurredPatch);
for(int k=0;k<(int)classifiers.size();k++){
for(int k = 0;k < (int)classifiers.size();k++){
classifiers[k].integrate(blurredPatch,false);
}
}
......@@ -463,23 +480,30 @@ timeStampPositiveNext(0),timeStampNegativeNext(0),params_(params),boundingBox_(b
void TLDDetector::generateScanGrid(int rows,int cols,Size initBox,std::vector<Rect2d>& res,bool withScaling){
res.clear();
//scales step: SCALE_STEP; hor step: 10% of width; verstep: 10% of height; minsize: 20pix
for(double h=initBox.height, w=initBox.width;h<cols && w<rows;){
for(double x=0;(x+w+1.0)<=cols;x+=(0.1*w)){
for(double y=0;(y+h+1.0)<=rows;y+=(0.1*h)){
for(double h = initBox.height, w = initBox.width;h < cols && w < rows;){
for( double x = 0; (x + w + 1.0) <= cols; x += (0.1 * w))
{
for(double y = 0; (y + h + 1.0) <= rows; y += (0.1 * h))
res.push_back(Rect2d(x,y,w,h));
}
if( withScaling )
{
if( h <= initBox.height )
{
h /= SCALE_STEP; w /= SCALE_STEP;
if( h < 20 || w < 20 )
{
h = initBox.height * SCALE_STEP; w = initBox.width * SCALE_STEP;
CV_Assert(h > initBox.height || w > initBox.width);
}
if(withScaling){
if(h<=initBox.height){
h/=SCALE_STEP; w/=SCALE_STEP;
if(h<20 || w<20){
h=initBox.height*SCALE_STEP; w=initBox.width*SCALE_STEP;
CV_Assert(h>initBox.height || w>initBox.width);
}
}else{
h*=SCALE_STEP; w*=SCALE_STEP;
else
{
h *= SCALE_STEP; w *= SCALE_STEP;
}
}else{
}
else
{
break;
}
}
......@@ -487,46 +511,47 @@ void TLDDetector::generateScanGrid(int rows,int cols,Size initBox,std::vector<Re
}
bool TLDDetector::detect(const Mat& img,const Mat& imgBlurred,Rect2d& res,std::vector<LabeledPatch>& patches){
TrackerTLDModel* tldModel=((TrackerTLDModel*)static_cast<TrackerModel*>(model));
Size initSize=tldModel->getMinSize();
TrackerTLDModel* tldModel = ((TrackerTLDModel*)static_cast<TrackerModel*>(model));
Size initSize = tldModel->getMinSize();
patches.clear();
Mat resized_img,blurred_img;
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE,STANDARD_PATCH_SIZE);
img.copyTo(resized_img);
imgBlurred.copyTo(blurred_img);
double originalVariance=tldModel->getOriginalVariance();;
int dx=initSize.width/10,dy=initSize.height/10;
Size2d size=img.size();
double scale=1.0;
int total=0,pass=0;
int npos=0,nneg=0;
double tmp=0,maxSc=-5.0;
double originalVariance = tldModel->getOriginalVariance();;
int dx = initSize.width / 10, dy = initSize.height / 10;
Size2d size = img.size();
double scale = 1.0;
int total = 0,pass = 0;
int npos = 0,nneg = 0;
double tmp = 0,maxSc = -5.0;
Rect2d maxScRect;
START_TICK("detector");
do{
do
{
Mat_<double> intImgP,intImgP2;
computeIntegralImages(resized_img,intImgP,intImgP2);
tldModel->prepareClassifiers((int)blurred_img.step[0]);
for(int i=0,imax=cvFloor((0.0+resized_img.cols-initSize.width)/dx);i<imax;i++){
for(int j=0,jmax=cvFloor((0.0+resized_img.rows-initSize.height)/dy);j<jmax;j++){
for(int i = 0,imax = cvFloor((0.0 + resized_img.cols - initSize.width) / dx);i < imax;i++){
for(int j = 0,jmax = cvFloor((0.0 + resized_img.rows - initSize.height) / dy);j < jmax;j++){
LabeledPatch labPatch;
total++;
if(!patchVariance(intImgP,intImgP2,originalVariance,Point(dx*i,dy*j),initSize)){
if(!patchVariance(intImgP,intImgP2,originalVariance,Point(dx * i,dy * j),initSize)){
continue;
}
if(!(tldModel->ensembleClassifierNum(&blurred_img.at<uchar>(dy*j,dx*i))>ENSEMBLE_THRESHOLD)){
if(!(tldModel->ensembleClassifierNum(&blurred_img.at<uchar>(dy * j,dx * i)) > ENSEMBLE_THRESHOLD)){
continue;
}
pass++;
labPatch.rect=Rect2d(dx*i*scale,dy*j*scale,initSize.width*scale,initSize.height*scale);
resample(resized_img,Rect2d(Point(dx*i,dy*j),initSize),standardPatch);
tmp=tldModel->Sr(standardPatch);
labPatch.isObject=tmp>THETA_NN;
labPatch.shouldBeIntegrated=abs(tmp-THETA_NN)<0.1;
labPatch.rect = Rect2d(dx * i * scale,dy * j * scale,initSize.width * scale,initSize.height * scale);
resample(resized_img,Rect2d(Point(dx * i,dy * j),initSize),standardPatch);
tmp = tldModel->Sr(standardPatch);
labPatch.isObject = tmp > THETA_NN;
labPatch.shouldBeIntegrated = abs(tmp - THETA_NN) < 0.1;
patches.push_back(labPatch);
if(!labPatch.isObject){
......@@ -535,27 +560,28 @@ bool TLDDetector::detect(const Mat& img,const Mat& imgBlurred,Rect2d& res,std::v
}else{
npos++;
}
tmp=tldModel->Sc(standardPatch);
if(tmp>maxSc){
maxSc=tmp;
maxScRect=labPatch.rect;
tmp = tldModel->Sc(standardPatch);
if(tmp > maxSc){
maxSc = tmp;
maxScRect = labPatch.rect;
}
}
}
size.width/=SCALE_STEP;
size.height/=SCALE_STEP;
scale*=SCALE_STEP;
size.width /= SCALE_STEP;
size.height /= SCALE_STEP;
scale *= SCALE_STEP;
resize(img,resized_img,size,0,0,DOWNSCALE_MODE);
GaussianBlur(resized_img,blurred_img,GaussBlurKernelSize,0.0f);
}while(size.width>=initSize.width && size.height>=initSize.height);
}
while( size.width >= initSize.width && size.height >= initSize.height );
END_TICK("detector");
dfprintf((stdout,"after NCC: nneg=%d npos=%d\n",nneg,npos));
dfprintf((stdout,"after NCC: nneg = %d npos = %d\n",nneg,npos));
#if !0
std::vector<Rect2d> poss,negs;
for(int i=0;i<(int)patches.size();i++){
for(int i = 0;i < (int)patches.size();i++){
if(patches[i].isObject)
poss.push_back(patches[i].rect);
else
......@@ -566,184 +592,198 @@ bool TLDDetector::detect(const Mat& img,const Mat& imgBlurred,Rect2d& res,std::v
#endif
dfprintf((stdout,"%d after ensemble\n",pass));
if(maxSc<0){
if(maxSc < 0){
return false;
}
res=maxScRect;
res = maxScRect;
return true;
}
/** Computes the variance of subimage given by box, with the help of two integral
* images intImgP and intImgP2 (sum of squares), which should be also provided.*/
bool TLDDetector::patchVariance(Mat_<double>& intImgP,Mat_<double>& intImgP2,double originalVariance,Point pt,Size size){
int x=(pt.x),y=(pt.y),width=(size.width),height=(size.height);
CV_Assert(0<=x && (x+width)<intImgP.cols && (x+width)<intImgP2.cols);
CV_Assert(0<=y && (y+height)<intImgP.rows && (y+height)<intImgP2.rows);
double p=0,p2=0;
int x = (pt.x),y = (pt.y),width = (size.width),height = (size.height);
CV_Assert( 0 <= x && (x + width) < intImgP.cols && (x + width) < intImgP2.cols );
CV_Assert( 0 <= y && (y + height) < intImgP.rows && (y + height) < intImgP2.rows );
double p = 0,p2 = 0;
double A,B,C,D;
A=intImgP(y,x);
B=intImgP(y,x+width);
C=intImgP(y+height,x);
D=intImgP(y+height,x+width);
p=(0.0+A+D-B-C)/(width*height);
A = intImgP(y,x);
B = intImgP(y,x + width);
C = intImgP(y + height,x);
D = intImgP(y + height,x + width);
p = (A + D - B - C) / (width * height);
A=intImgP2(y,x);
B=intImgP2(y,x+width);
C=intImgP2(y+height,x);
D=intImgP2(y+height,x+width);
p2=(0.0+(D-B)-(C-A))/(width*height);
A = intImgP2(y,x);
B = intImgP2(y,x + width);
C = intImgP2(y + height,x);
D = intImgP2(y + height,x + width);
p2 = (A + D - B - C) / (width * height);
return ((p2-p*p)>VARIANCE_THRESHOLD*originalVariance);
return ((p2 - p * p) > VARIANCE_THRESHOLD * originalVariance);
}
double TrackerTLDModel::ensembleClassifierNum(const uchar* data){
double p=0;
for(int k=0;k<(int)classifiers.size();k++){
p+=classifiers[k].posteriorProbabilityFast(data);
}
p/=classifiers.size();
double TrackerTLDModel::ensembleClassifierNum(const uchar* data)
{
double p = 0;
for( int k = 0; k < (int)classifiers.size(); k++ )
p += classifiers[k].posteriorProbabilityFast(data);
p /= classifiers.size();
return p;
}
double TrackerTLDModel::Sr(const Mat_<uchar>& patch){
double splus=0.0, sminus=0.0;
for(int i=0;i<(int)positiveExamples.size();i++){
splus=std::max(splus,0.5*(NCC(positiveExamples[i],patch)+1.0));
}
for(int i=0;i<(int)negativeExamples.size();i++){
sminus=std::max(sminus,0.5*(NCC(negativeExamples[i],patch)+1.0));
}
if(splus+sminus==0.0){
double TrackerTLDModel::Sr(const Mat_<uchar>& patch)
{
double splus = 0.0, sminus = 0.0;
for( int i = 0; i < (int)positiveExamples.size(); i++ )
splus = std::max(splus, 0.5 * (NCC(positiveExamples[i],patch) + 1.0));
for( int i = 0; i < (int)negativeExamples.size(); i++ )
sminus = std::max(sminus, 0.5 * (NCC(negativeExamples[i],patch) + 1.0));
if( splus + sminus == 0.0)
return 0.0;
}
return splus/(sminus+splus);
return splus / (sminus + splus);
}
double TrackerTLDModel::Sc(const Mat_<uchar>& patch){
double splus=0.0,sminus=0.0;
int med=getMedian(timeStampsPositive);
for(int i=0;i<(int)positiveExamples.size();i++){
if((int)timeStampsPositive[i]<=med){
splus=std::max(splus,0.5*(NCC(positiveExamples[i],patch)+1.0));
}
}
for(int i=0;i<(int)negativeExamples.size();i++){
sminus=std::max(sminus,0.5*(NCC(negativeExamples[i],patch)+1.0));
}
if(splus+sminus==0.0){
double TrackerTLDModel::Sc(const Mat_<uchar>& patch)
{
double splus = 0.0,sminus = 0.0;
int med = getMedian(timeStampsPositive);
for( int i = 0; i < (int)positiveExamples.size(); i++ )
{
if( (int)timeStampsPositive[i] <= med )
splus = std::max(splus, 0.5 * (NCC(positiveExamples[i],patch) + 1.0));
}
for( int i = 0; i < (int)negativeExamples.size(); i++ )
sminus = std::max(sminus, 0.5 * (NCC(negativeExamples[i],patch) + 1.0));
if( splus + sminus == 0.0 )
return 0.0;
}
return splus/(sminus+splus);
return splus / (sminus + splus);
}
void TrackerTLDModel::integrateRelabeled(Mat& img,Mat& imgBlurred,const std::vector<TLDDetector::LabeledPatch>& patches){
void TrackerTLDModel::integrateRelabeled(Mat& img,Mat& imgBlurred,const std::vector<TLDDetector::LabeledPatch>& patches)
{
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE,STANDARD_PATCH_SIZE),blurredPatch(minSize_);
int positiveIntoModel=0,negativeIntoModel=0,positiveIntoEnsemble=0,negativeIntoEnsemble=0;
for(int k=0;k<(int)patches.size();k++){
if(patches[k].shouldBeIntegrated){
int positiveIntoModel = 0,negativeIntoModel = 0,positiveIntoEnsemble = 0,negativeIntoEnsemble = 0;
for( int k = 0; k < (int)patches.size(); k++ )
{
if( patches[k].shouldBeIntegrated )
{
resample(img,patches[k].rect,standardPatch);
if(patches[k].isObject){
if( patches[k].isObject )
{
positiveIntoModel++;
pushIntoModel(standardPatch,true);
}else{
}
else
{
negativeIntoModel++;
pushIntoModel(standardPatch,false);
}
}
#ifdef CLOSED_LOOP
if(patches[k].shouldBeIntegrated || (patches[k].isPositive==false)){
if( patches[k].shouldBeIntegrated || ( patches[k].isPositive == false ) )
#else
if(patches[k].shouldBeIntegrated){
if( patches[k].shouldBeIntegrated )
#endif
{
resample(imgBlurred,patches[k].rect,blurredPatch);
if(patches[k].isObject){
if( patches[k].isObject )
positiveIntoEnsemble++;
}else{
else
negativeIntoEnsemble++;
}
for(int i=0;i<(int)classifiers.size();i++){
for( int i = 0; i < (int)classifiers.size(); i++ )
{
classifiers[i].integrate(blurredPatch,patches[k].isObject);
}
}
}
if(negativeIntoModel>0)
dfprintf((stdout,"negativeIntoModel=%d ",negativeIntoModel));
if(positiveIntoModel>0)
dfprintf((stdout,"positiveIntoModel=%d ",positiveIntoModel));
if(negativeIntoEnsemble>0)
dfprintf((stdout,"negativeIntoEnsemble=%d ",negativeIntoEnsemble));
if(positiveIntoEnsemble>0)
dfprintf((stdout,"positiveIntoEnsemble=%d ",positiveIntoEnsemble));
if( negativeIntoModel > 0 )
dfprintf((stdout,"negativeIntoModel = %d ",negativeIntoModel));
if( positiveIntoModel > 0)
dfprintf((stdout,"positiveIntoModel = %d ",positiveIntoModel));
if( negativeIntoEnsemble > 0 )
dfprintf((stdout,"negativeIntoEnsemble = %d ",negativeIntoEnsemble));
if( positiveIntoEnsemble > 0 )
dfprintf((stdout,"positiveIntoEnsemble = %d ",positiveIntoEnsemble));
dfprintf((stdout,"\n"));
}
void TrackerTLDModel::integrateAdditional(const std::vector<Mat_<uchar> >& eForModel,const std::vector<Mat_<uchar> >& eForEnsemble,bool isPositive){
int positiveIntoModel=0,negativeIntoModel=0,positiveIntoEnsemble=0,negativeIntoEnsemble=0;
for(int k=0;k<(int)eForModel.size();k++){
double sr=Sr(eForModel[k]);
if((sr>THETA_NN)!=isPositive){
if(isPositive){
void TrackerTLDModel::integrateAdditional(const std::vector<Mat_<uchar> >& eForModel,const std::vector<Mat_<uchar> >& eForEnsemble,bool isPositive)
{
int positiveIntoModel = 0,negativeIntoModel = 0,positiveIntoEnsemble = 0,negativeIntoEnsemble = 0;
for( int k = 0; k < (int)eForModel.size(); k++ )
{
double sr = Sr(eForModel[k]);
if( ( sr > THETA_NN ) != isPositive )
{
if( isPositive )
{
positiveIntoModel++;
pushIntoModel(eForModel[k],true);
}else{
}
else
{
negativeIntoModel++;
pushIntoModel(eForModel[k],false);
}
}
double p=0;
for(int i=0;i<(int)classifiers.size();i++){
p+=classifiers[i].posteriorProbability(eForEnsemble[k].data,(int)eForEnsemble[k].step[0]);
}
p/=classifiers.size();
if((p>ENSEMBLE_THRESHOLD)!=isPositive){
if(isPositive){
double p = 0;
for( int i = 0; i < (int)classifiers.size(); i++ )
p += classifiers[i].posteriorProbability(eForEnsemble[k].data,(int)eForEnsemble[k].step[0]);
p /= classifiers.size();
if( (p > ENSEMBLE_THRESHOLD) != isPositive )
{
if( isPositive )
positiveIntoEnsemble++;
}else{
else
negativeIntoEnsemble++;
}
for(int i=0;i<(int)classifiers.size();i++){
for( int i = 0; i < (int)classifiers.size(); i++ )
classifiers[i].integrate(eForEnsemble[k],isPositive);
}
}
}
if(negativeIntoModel>0)
dfprintf((stdout,"negativeIntoModel=%d ",negativeIntoModel));
if(positiveIntoModel>0)
dfprintf((stdout,"positiveIntoModel=%d ",positiveIntoModel));
if(negativeIntoEnsemble>0)
dfprintf((stdout,"negativeIntoEnsemble=%d ",negativeIntoEnsemble));
if(positiveIntoEnsemble>0)
dfprintf((stdout,"positiveIntoEnsemble=%d ",positiveIntoEnsemble));
if( negativeIntoModel > 0 )
dfprintf((stdout,"negativeIntoModel = %d ",negativeIntoModel));
if( positiveIntoModel > 0 )
dfprintf((stdout,"positiveIntoModel = %d ",positiveIntoModel));
if( negativeIntoEnsemble > 0 )
dfprintf((stdout,"negativeIntoEnsemble = %d ",negativeIntoEnsemble));
if( positiveIntoEnsemble > 0 )
dfprintf((stdout,"positiveIntoEnsemble = %d ",positiveIntoEnsemble));
dfprintf((stdout,"\n"));
}
int TrackerTLDImpl::Pexpert::additionalExamples(std::vector<Mat_<uchar> >& examplesForModel,std::vector<Mat_<uchar> >& examplesForEnsemble){
int TrackerTLDImpl::Pexpert::additionalExamples(std::vector<Mat_<uchar > >& examplesForModel,std::vector<Mat_<uchar > >& examplesForEnsemble)
{
examplesForModel.clear();examplesForEnsemble.clear();
examplesForModel.reserve(100);examplesForEnsemble.reserve(100);
std::vector<Rect2d> closest,scanGrid;
Mat scaledImg,blurredImg;
double scale=scaleAndBlur(img_,cvRound(log(1.0*resultBox_.width/(initSize_.width))/log(SCALE_STEP)),scaledImg,blurredImg,GaussBlurKernelSize);
double scale = scaleAndBlur(img_,cvRound(log(1.0 * resultBox_.width / (initSize_.width)) / log(SCALE_STEP)),
scaledImg,blurredImg,GaussBlurKernelSize,SCALE_STEP);
TLDDetector::generateScanGrid(img_.rows,img_.cols,initSize_,scanGrid);
getClosestN(scanGrid,Rect2d(resultBox_.x/scale,resultBox_.y/scale,resultBox_.width/scale,resultBox_.height/scale),10,closest);
getClosestN(scanGrid,Rect2d(resultBox_.x / scale,resultBox_.y / scale,resultBox_.width / scale,resultBox_.height / scale),10,closest);
for(int i=0;i<(int)closest.size();i++){
for(int j=0;j<10;j++){
for( int i = 0; i < (int)closest.size(); i++ )
{
for( int j = 0; j < 10; j++ )
{
Point2f center;
Size2f size;
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE,STANDARD_PATCH_SIZE),blurredPatch(initSize_);
center.x=(float)(closest[i].x+closest[i].width*(0.5+rng.uniform(-0.01,0.01)));
center.y=(float)(closest[i].y+closest[i].height*(0.5+rng.uniform(-0.01,0.01)));
size.width=(float)(closest[i].width*rng.uniform((double)0.99,(double)1.01));
size.height=(float)(closest[i].height*rng.uniform((double)0.99,(double)1.01));
float angle=(float)rng.uniform(-5.0,5.0);
center.x = (float)(closest[i].x + closest[i].width * (0.5 + rng.uniform(-0.01,0.01)));
center.y = (float)(closest[i].y + closest[i].height * (0.5 + rng.uniform(-0.01,0.01)));
size.width = (float)(closest[i].width * rng.uniform((double)0.99,(double)1.01));
size.height = (float)(closest[i].height * rng.uniform((double)0.99,(double)1.01));
float angle = (float)rng.uniform(-5.0,5.0);
for(int y=0;y<standardPatch.rows;y++){
for(int x=0;x<standardPatch.cols;x++){
standardPatch(x,y)+=(uchar)rng.gaussian(5.0);
for( int y = 0; y < standardPatch.rows; y++ )
{
for( int x = 0; x < standardPatch.cols; x++ )
{
standardPatch(x,y) += (uchar)rng.gaussian(5.0);
}
}
#ifdef BLUR_AS_VADIM
......@@ -760,95 +800,112 @@ int TrackerTLDImpl::Pexpert::additionalExamples(std::vector<Mat_<uchar> >& examp
return 0;
}
bool TrackerTLDImpl::Nexpert::operator()(Rect2d box){
if(overlap(resultBox_,box)<0.2){
bool TrackerTLDImpl::Nexpert::operator()(Rect2d box)
{
if( overlap(resultBox_,box) < NEXPERT_THRESHOLD )
return false;
}
else
return true;
}
Data::Data(Rect2d initBox){
double minDim=std::min(initBox.width,initBox.height);
scale = 20.0/minDim;
minSize.width=(int)(initBox.width*20.0/minDim);
minSize.height=(int)(initBox.height*20.0/minDim);
frameNum=0;
dprintf(("minSize= %dx%d\n",minSize.width,minSize.height));
Data::Data(Rect2d initBox)
{
double minDim = std::min(initBox.width,initBox.height);
scale = 20.0 / minDim;
minSize.width = (int)(initBox.width * 20.0 / minDim);
minSize.height = (int)(initBox.height * 20.0 / minDim);
frameNum = 0;
dprintf(("minSize = %dx%d\n",minSize.width,minSize.height));
}
void Data::printme(FILE* port){
void Data::printme(FILE* port)
{
dfprintf((port,"Data:\n"));
dfprintf((port,"\tframeNum=%d\n",frameNum));
dfprintf((port,"\tconfident=%s\n",confident?"true":"false"));
dfprintf((port,"\tfailedLastTime=%s\n",failedLastTime?"true":"false"));
dfprintf((port,"\tminSize=%dx%d\n",minSize.width,minSize.height));
dfprintf((port,"\tframeNum = %d\n",frameNum));
dfprintf((port,"\tconfident = %s\n",confident?"true":"false"));
dfprintf((port,"\tfailedLastTime = %s\n",failedLastTime?"true":"false"));
dfprintf((port,"\tminSize = %dx%d\n",minSize.width,minSize.height));
}
void TrackerTLDModel::printme(FILE* port){
void TrackerTLDModel::printme(FILE* port)
{
dfprintf((port,"TrackerTLDModel:\n"));
dfprintf((port,"\tpositiveExamples.size()=%d\n",(int)positiveExamples.size()));
dfprintf((port,"\tnegativeExamples.size()=%d\n",(int)negativeExamples.size()));
dfprintf((port,"\tpositiveExamples.size() = %d\n",(int)positiveExamples.size()));
dfprintf((port,"\tnegativeExamples.size() = %d\n",(int)negativeExamples.size()));
}
void MyMouseCallbackDEBUG::onMouse( int event, int x, int y){
if(event== EVENT_LBUTTONDOWN){
void MyMouseCallbackDEBUG::onMouse( int event, int x, int y)
{
if( event == EVENT_LBUTTONDOWN )
{
Mat imgCanvas;
img_.copyTo(imgCanvas);
TrackerTLDModel* tldModel=((TrackerTLDModel*)static_cast<TrackerModel*>(detector_->model));
Size initSize=tldModel->getMinSize();
TrackerTLDModel* tldModel = ((TrackerTLDModel*)static_cast<TrackerModel*>(detector_->model));
Size initSize = tldModel->getMinSize();
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE,STANDARD_PATCH_SIZE);
double originalVariance=tldModel->getOriginalVariance();;
double originalVariance = tldModel->getOriginalVariance();;
double tmp;
Mat resized_img,blurred_img;
double scale=SCALE_STEP;
//double scale=SCALE_STEP*SCALE_STEP*SCALE_STEP*SCALE_STEP;
Size2d size(img_.cols/scale,img_.rows/scale);
double scale = SCALE_STEP;
//double scale = SCALE_STEP * SCALE_STEP * SCALE_STEP * SCALE_STEP;
Size2d size(img_.cols / scale,img_.rows / scale);
resize(img_,resized_img,size);
resize(imgBlurred_,blurred_img,size);
Mat_<double> intImgP,intImgP2;
detector_->computeIntegralImages(resized_img,intImgP,intImgP2);
int dx=initSize.width/10, dy=initSize.height/10,
i=(int)(x/scale/dx), j=(int)(y/scale/dy);
int dx = initSize.width / 10, dy = initSize.height / 10,
i = (int)(x / scale / dx), j = (int)(y / scale / dy);
dfprintf((stderr,"patchVariance=%s\n",(detector_->patchVariance(intImgP,intImgP2,originalVariance,Point(dx*i,dy*j),initSize))?"true":"false"));
dfprintf((stderr,"patchVariance = %s\n",(detector_->patchVariance(intImgP,intImgP2,originalVariance,
Point(dx * i,dy * j),initSize))?"true":"false"));
tldModel->prepareClassifiers((int)blurred_img.step[0]);
dfprintf((stderr,"p=%f\n",(tldModel->ensembleClassifierNum(&blurred_img.at<uchar>(dy*j,dx*i)))));
fprintf(stderr,"ensembleClassifier=%s\n",
(!(tldModel->ensembleClassifierNum(&blurred_img.at<uchar>(dy*j,dx*i))>ENSEMBLE_THRESHOLD))?"true":"false");
resample(resized_img,Rect2d(Point(dx*i,dy*j),initSize),standardPatch);
tmp=tldModel->Sr(standardPatch);
dfprintf((stderr,"Sr=%f\n",tmp));
dfprintf((stderr,"isObject=%s\n",(tmp>THETA_NN)?"true":"false"));
dfprintf((stderr,"shouldBeIntegrated=%s\n",(abs(tmp-THETA_NN)<0.1)?"true":"false"));
dfprintf((stderr,"Sc=%f\n",tldModel->Sc(standardPatch)));
rectangle(imgCanvas,Rect2d(Point2d(scale*dx*i,scale*dy*j),Size2d(initSize.width*scale,initSize.height*scale)), 0, 2, 1 );
dfprintf((stderr,"p = %f\n",(tldModel->ensembleClassifierNum(&blurred_img.at<uchar>(dy * j,dx * i)))));
fprintf(stderr,"ensembleClassifier = %s\n",
(!(tldModel->ensembleClassifierNum(&blurred_img.at<uchar>(dy * j,dx * i)) > ENSEMBLE_THRESHOLD))?"true":"false");
resample(resized_img,Rect2d(Point(dx * i,dy * j),initSize),standardPatch);
tmp = tldModel->Sr(standardPatch);
dfprintf((stderr,"Sr = %f\n",tmp));
dfprintf((stderr,"isObject = %s\n",(tmp > THETA_NN)?"true":"false"));
dfprintf((stderr,"shouldBeIntegrated = %s\n",(abs(tmp - THETA_NN) < 0.1)?"true":"false"));
dfprintf((stderr,"Sc = %f\n",tldModel->Sc(standardPatch)));
rectangle(imgCanvas,Rect2d(Point2d(scale * dx * i,scale * dy * j),Size2d(initSize.width * scale,initSize.height * scale)), 0, 2, 1 );
imshow("picker",imgCanvas);
waitKey();
}
}
void TrackerTLDModel::pushIntoModel(const Mat_<uchar>& example,bool positive){
void TrackerTLDModel::pushIntoModel(const Mat_<uchar>& example, bool positive)
{
std::vector<Mat_<uchar> >* proxyV;
unsigned int* proxyN;
std::vector<unsigned int>* proxyT;
if(positive){
proxyV=&positiveExamples;
proxyN=&timeStampPositiveNext;
proxyT=&timeStampsPositive;
}else{
proxyV=&negativeExamples;
proxyN=&timeStampNegativeNext;
proxyT=&timeStampsNegative;
int* proxyN;
std::vector<int>* proxyT;
if( positive )
{
proxyV = &positiveExamples;
proxyN = &timeStampPositiveNext;
proxyT = &timeStampsPositive;
}
else
{
proxyV = &negativeExamples;
proxyN = &timeStampNegativeNext;
proxyT = &timeStampsNegative;
}
if(proxyV->size()<MAX_EXAMPLES_IN_MODEL){
if( proxyV->size() < MAX_EXAMPLES_IN_MODEL )
{
proxyV->push_back(example);
proxyT->push_back(*proxyN);
}else{
int index=rng.uniform((int)0,(int)proxyV->size());
(*proxyV)[index]=example;
(*proxyT)[index]=(*proxyN);
}
else
{
int index = rng.uniform((int)0,(int)proxyV->size());
(*proxyV)[index] = example;
(*proxyT)[index] = (*proxyN);
}
(*proxyN)++;
}
......
......@@ -42,8 +42,8 @@
#include "precomp.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include <algorithm>
#include <limits.h>
#include<algorithm>
#include<limits.h>
namespace cv {namespace tld
{
......@@ -58,22 +58,21 @@ namespace cv {namespace tld
#define dprintf(x)
#endif
#define MEASURE_TIME(a) {\
clock_t start;float milisec=0.0;\
start=clock();{a} milisec=1000.0*(clock()-start)/CLOCKS_PER_SEC;\
clock_t start;float milisec = 0.0;\
start = clock();{a} milisec = 1000.0 * (clock() - start) / CLOCKS_PER_SEC;\
dprintf(("%-90s took %f milis\n",#a,milisec)); }
#define HERE dprintf(("line %d\n",__LINE__));fflush(stderr);
#define START_TICK(name) { clock_t start;double milisec=0.0; start=clock();
#define END_TICK(name) milisec=1000.0*(clock()-start)/CLOCKS_PER_SEC;\
#define START_TICK(name) { clock_t start;double milisec = 0.0; start = clock();
#define END_TICK(name) milisec = 1000.0 * (clock() - start) / CLOCKS_PER_SEC;\
dprintf(("%s took %f milis\n",name,milisec)); }
extern Rect2d etalon;
void myassert(const Mat& img);
void printPatch(const Mat_<uchar>& standardPatch);
std::string type2str(const Mat& mat);
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,Rect2d whiteOne=Rect2d(-1.0,-1.0,-1.0,-1.0));
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,std::vector<Rect2d>& whiteOnes,String fileName="");
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,Rect2d whiteOne = Rect2d(-1.0,-1.0,-1.0,-1.0));
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,std::vector<Rect2d>& whiteOnes,String fileName = "");
//aux functions and variables
//#define CLIP(x,a,b) std::min(std::max((x),(a)),(b))
template<typename T> inline T CLIP(T x,T a,T b){return std::min(std::max(x,a),b);}
/** Computes overlap between the two given rectangles. Overlap is computed as ratio of rectangles' intersection to that
* of their union.*/
......@@ -88,8 +87,8 @@ double variance(const Mat& img);
* of the same size).*/
double NCC(const Mat_<uchar>& patch1,const Mat_<uchar>& patch2);
void getClosestN(std::vector<Rect2d>& scanGrid,Rect2d bBox,int n,std::vector<Rect2d>& res);
double scaleAndBlur(const Mat& originalImg,int scale,Mat& scaledImg,Mat& blurredImg,Size GaussBlurKernelSize);
unsigned int getMedian(const std::vector<unsigned int>& values, int size=-1);
double scaleAndBlur(const Mat& originalImg,int scale,Mat& scaledImg,Mat& blurredImg,Size GaussBlurKernelSize, double scaleStep);
int getMedian(const std::vector<int>& values, int size = -1);
class TLDEnsembleClassifier{
public:
......@@ -99,8 +98,8 @@ public:
double posteriorProbabilityFast(const uchar* data)const;
void prepareClassifier(int rowstep);
private:
TLDEnsembleClassifier(std::vector<Vec4b> meas,int beg,int end);
static void stepPrefSuff(std::vector<Vec4b>& arr,int pos,int len,int gridSize);
TLDEnsembleClassifier(const std::vector<Vec4b>& meas,int beg,int end);
static void stepPrefSuff(std::vector<Vec4b> & arr,int pos,int len,int gridSize);
int code(const uchar* data,int rowstep)const;
int codeFast(const uchar* data)const;
std::vector<Point2i> posAndNeg;
......@@ -111,8 +110,8 @@ private:
class TrackerProxy{
public:
virtual bool init( const Mat& image, const Rect2d& boundingBox)=0;
virtual bool update(const Mat& image, Rect2d& boundingBox)=0;
virtual bool init( const Mat& image, const Rect2d& boundingBox) = 0;
virtual bool update(const Mat& image, Rect2d& boundingBox) = 0;
virtual ~TrackerProxy(){}
};
......
......@@ -43,10 +43,10 @@
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "time.h"
#include <algorithm>
#include <limits.h>
#include <math.h>
#include <opencv2/highgui.hpp>
#include<algorithm>
#include<limits.h>
#include<math.h>
#include<opencv2/highgui.hpp>
#include "tld_tracker.hpp"
namespace cv {namespace tld
......@@ -57,26 +57,26 @@ Rect2d etalon(14.0,110.0,20.0,20.0);
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,Rect2d whiteOne){
Mat image;
img.copyTo(image);
if(whiteOne.width>=0){
if(whiteOne.width >= 0){
rectangle( image,whiteOne, 255, 1, 1 );
}
for(int i=0;i<(int)blackOnes.size();i++){
for(int i = 0;i < (int)blackOnes.size();i++){
rectangle( image,blackOnes[i], 0, 1, 1 );
}
imshow("img",image);
}
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,std::vector<Rect2d>& whiteOnes,String filename){
Mat image;
static int frameCounter=1;
static int frameCounter = 1;
img.copyTo(image);
for(int i=0;i<(int)whiteOnes.size();i++){
for(int i = 0;i < (int)whiteOnes.size();i++){
rectangle( image,whiteOnes[i], 255, 1, 1 );
}
for(int i=0;i<(int)blackOnes.size();i++){
for(int i = 0;i < (int)blackOnes.size();i++){
rectangle( image,blackOnes[i], 0, 1, 1 );
}
imshow("img",image);
if(filename.length()>0){
if(filename.length() > 0){
char inbuf[100];
sprintf(inbuf,"%s%d.jpg",filename.c_str(),frameCounter);
imwrite(inbuf,image);
......@@ -84,20 +84,20 @@ void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,std::vector<Rec
}
}
void myassert(const Mat& img){
int count=0;
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;j++){
if(img.at<uchar>(i,j)==0){
int count = 0;
for(int i = 0;i < img.rows;i++){
for(int j = 0;j < img.cols;j++){
if(img.at<uchar>(i,j) == 0){
count++;
}
}
}
dprintf(("black: %d out of %d (%f)\n",count,img.rows*img.cols,1.0*count/img.rows/img.cols));
dprintf(("black: %d out of %d (%f)\n",count,img.rows * img.cols,1.0 * count / img.rows / img.cols));
}
void printPatch(const Mat_<uchar>& standardPatch){
for(int i=0;i<standardPatch.rows;i++){
for(int j=0;j<standardPatch.cols;j++){
for(int i = 0;i < standardPatch.rows;i++){
for(int j = 0;j < standardPatch.cols;j++){
dprintf(("%5.2f, ",(double)standardPatch(i,j)));
}
dprintf(("\n"));
......@@ -105,11 +105,11 @@ void printPatch(const Mat_<uchar>& standardPatch){
}
std::string type2str(const Mat& mat){
int type=mat.type();
int type = mat.type();
std::string r;
uchar depth = type & CV_MAT_DEPTH_MASK;
uchar chans =(uchar)( 1 + (type >> CV_CN_SHIFT));
uchar chans = (uchar)(1 + (type >> CV_CN_SHIFT));
switch ( depth ) {
case CV_8U: r = "8U"; break;
......@@ -123,31 +123,31 @@ std::string type2str(const Mat& mat){
}
r += "C";
r += (chans+'0');
r += (chans + '0');
return r;
}
//generic functions
double scaleAndBlur(const Mat& originalImg,int scale,Mat& scaledImg,Mat& blurredImg,Size GaussBlurKernelSize){
double dScale=1.0;
for(int i=0;i<scale;i++,dScale*=1.2);
Size2d size=originalImg.size();
size.height/=dScale;size.width/=dScale;
double scaleAndBlur(const Mat& originalImg,int scale,Mat& scaledImg,Mat& blurredImg,Size GaussBlurKernelSize, double scaleStep){
double dScale = 1.0;
for(int i = 0; i < scale; i++, dScale *= scaleStep);
Size2d size = originalImg.size();
size.height /= dScale; size.width /= dScale;
resize(originalImg,scaledImg,size);
GaussianBlur(scaledImg,blurredImg,GaussBlurKernelSize,0.0);
return dScale;
}
void getClosestN(std::vector<Rect2d>& scanGrid,Rect2d bBox,int n,std::vector<Rect2d>& res){
if(n>=(int)scanGrid.size()){
if(n >= (int)scanGrid.size()){
res.assign(scanGrid.begin(),scanGrid.end());
return;
}
std::vector<double> overlaps;
overlaps.assign(n,0.0);
res.assign(scanGrid.begin(),scanGrid.begin()+n);
for(int i=0;i<n;i++){
overlaps[i]=overlap(res[i],bBox);
res.assign(scanGrid.begin(),scanGrid.begin() + n);
for(int i = 0;i < n;i++){
overlaps[i] = overlap(res[i],bBox);
}
double otmp;
Rect2d rtmp;
......@@ -160,142 +160,144 @@ void getClosestN(std::vector<Rect2d>& scanGrid,Rect2d bBox,int n,std::vector<Rec
}
}
for(int i=n;i<(int)scanGrid.size();i++){
double o=0.0;
if((o=overlap(scanGrid[i],bBox))<=overlaps[0]){
for(int i = n;i < (int)scanGrid.size();i++){
double o = 0.0;
if((o = overlap(scanGrid[i],bBox)) <= overlaps[0]){
continue;
}
int j=0;
int j = 0;
while( j < n && overlaps[j] < o )
{
j++;
}
j--;
for(int k=0;k<j;overlaps[k]=overlaps[k+1],res[k]=res[k+1],k++);
overlaps[j]=o;res[j]=scanGrid[i];
for(int k = 0;k < j;overlaps[k] = overlaps[k + 1],res[k] = res[k + 1],k++);
overlaps[j] = o;res[j] = scanGrid[i];
}
}
double variance(const Mat& img){
double p=0,p2=0;
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;j++){
p+=img.at<uchar>(i,j);
p2+=img.at<uchar>(i,j)*img.at<uchar>(i,j);
double p = 0,p2 = 0;
for(int i = 0;i < img.rows;i++){
for(int j = 0;j < img.cols;j++){
p += img.at<uchar>(i,j);
p2 += img.at<uchar>(i,j) * img.at<uchar>(i,j);
}
}
p/=(img.cols*img.rows);
p2/=(img.cols*img.rows);
return p2-p*p;
p /= (img.cols * img.rows);
p2 /= (img.cols * img.rows);
return p2 - p * p;
}
double NCC(const Mat_<uchar>& patch1,const Mat_<uchar>& patch2){
CV_Assert(patch1.rows==patch2.rows);
CV_Assert(patch1.cols==patch2.cols);
CV_Assert(patch1.rows == patch2.rows);
CV_Assert(patch1.cols == patch2.cols);
int N=patch1.rows*patch1.cols;
int s1=0,s2=0,n1=0,n2=0,prod=0;
for(int i=0;i<patch1.rows;i++){
for(int j=0;j<patch1.cols;j++){
int p1=patch1(i,j), p2=patch2(i,j);
s1+=p1; s2+=p2;
n1+=(p1*p1); n2+=(p2*p2);
prod+=(p1*p2);
}
}
double sq1=sqrt(std::max(0.0,n1-1.0*s1*s1/N)),sq2=sqrt(std::max(0.0,n2-1.0*s2*s2/N));
double ares=(sq2==0)?sq1/abs(sq1):(prod-s1*s2/N)/sq1/sq2;
int N = patch1.rows * patch1.cols;
int s1 = 0,s2 = 0,n1 = 0,n2 = 0,prod = 0;
for(int i = 0;i < patch1.rows;i++){
for(int j = 0;j < patch1.cols;j++){
int p1 = patch1(i,j), p2 = patch2(i,j);
s1 += p1; s2 += p2;
n1 += (p1 * p1); n2 += (p2 * p2);
prod += (p1 * p2);
}
}
double sq1 = sqrt(std::max(0.0,n1 - 1.0 * s1 * s1 / N)),sq2 = sqrt(std::max(0.0,n2 - 1.0 * s2 * s2 / N));
double ares = (sq2 == 0) ? sq1 / abs(sq1) : (prod - s1 * s2 / N) / sq1 / sq2;
return ares;
}
unsigned int getMedian(const std::vector<unsigned int>& values, int size){
if(size==-1){
size=(int)values.size();
int getMedian(const std::vector<int>& values, int size){
if(size == -1){
size = (int)values.size();
}
std::vector<int> copy(values.begin(),values.begin()+size);
std::vector<int> copy(values.begin(),values.begin() + size);
std::sort(copy.begin(),copy.end());
if(size%2==0){
return (copy[size/2-1]+copy[size/2])/2;
if(size % 2 == 0){
return (copy[size / 2 - 1] + copy[size / 2]) / 2;
}else{
return copy[(size-1)/2];
return copy[(size - 1) / 2];
}
}
double overlap(const Rect2d& r1,const Rect2d& r2){
double a1=r1.area(), a2=r2.area(), a0=(r1&r2).area();
return a0/(a1+a2-a0);
double a1 = r1.area(), a2 = r2.area(), a0 = (r1&r2).area();
return a0 / (a1 + a2 - a0);
}
void resample(const Mat& img,const RotatedRect& r2,Mat_<uchar>& samples){
Mat_<float> M(2,3),R(2,2),Si(2,2),s(2,1),o(2,1);
R(0,0)=(float)cos(r2.angle*CV_PI/180);R(0,1)=(float)(-sin(r2.angle*CV_PI/180));
R(1,0)=(float)sin(r2.angle*CV_PI/180);R(1,1)=(float)cos(r2.angle*CV_PI/180);
Si(0,0)=(float)(samples.cols/r2.size.width); Si(0,1)=0.0f;
Si(1,0)=0.0f; Si(1,1)=(float)(samples.rows/r2.size.height);
s(0,0)=(float)samples.cols; s(1,0)=(float)samples.rows;
o(0,0)=r2.center.x;o(1,0)=r2.center.y;
R(0,0) = (float)cos(r2.angle * CV_PI / 180);R(0,1) = (float)(-sin(r2.angle * CV_PI / 180));
R(1,0) = (float)sin(r2.angle * CV_PI / 180);R(1,1) = (float)cos(r2.angle * CV_PI / 180);
Si(0,0) = (float)(samples.cols / r2.size.width); Si(0,1) = 0.0f;
Si(1,0) = 0.0f; Si(1,1) = (float)(samples.rows / r2.size.height);
s(0,0) = (float)samples.cols; s(1,0) = (float)samples.rows;
o(0,0) = r2.center.x;o(1,0) = r2.center.y;
Mat_<float> A(2,2),b(2,1);
A=Si*R;
b=s/2.0-Si*R*o;
A = Si * R;
b = s / 2.0 - Si * R * o;
A.copyTo(M.colRange(Range(0,2)));
b.copyTo(M.colRange(Range(2,3)));
warpAffine(img,samples,M,samples.size());
}
void resample(const Mat& img,const Rect2d& r2,Mat_<uchar>& samples){
Mat_<float> M(2,3);
M(0,0)=(float)(samples.cols/r2.width); M(0,1)=0.0f; M(0,2)=(float)(-r2.x*samples.cols/r2.width);
M(1,0)=0.0f; M(1,1)=(float)(samples.rows/r2.height); M(1,2)=(float)(-r2.y*samples.rows/r2.height);
M(0,0) = (float)(samples.cols / r2.width); M(0,1) = 0.0f; M(0,2) = (float)(-r2.x * samples.cols / r2.width);
M(1,0) = 0.0f; M(1,1) = (float)(samples.rows / r2.height); M(1,2) = (float)(-r2.y * samples.rows / r2.height);
warpAffine(img,samples,M,samples.size());
}
//other stuff
void TLDEnsembleClassifier::stepPrefSuff(std::vector<Vec4b>& arr,int pos,int len,int gridSize){
#if 0
int step=len/(gridSize-1), pref=(len-step*(gridSize-1))/2;
for(int i=0;i<(int)(sizeof(x1)/sizeof(x1[0]));i++){
arr[i]=pref+arr[i]*step;
int step = len / (gridSize - 1), pref = (len - step * (gridSize - 1)) / 2;
for(int i = 0;i < (int)(sizeof(x1) / sizeof(x1[0]));i++){
arr[i] = pref + arr[i] * step;
}
#else
int total=len-gridSize;
int quo=total/(gridSize-1),rem=total%(gridSize-1);
int smallStep=quo,bigStep=quo+1;
int bigOnes=rem,smallOnes=gridSize-bigOnes-1;
int bigOnes_front=bigOnes/2,bigOnes_back=bigOnes-bigOnes_front;
for(int i=0;i<(int)arr.size();i++){
if(arr[i].val[pos]<bigOnes_back){
arr[i].val[pos]=(uchar)(arr[i].val[pos]*bigStep+arr[i].val[pos]);
int total = len - gridSize;
int quo = total / (gridSize - 1),rem = total % (gridSize - 1);
int smallStep = quo,bigStep = quo + 1;
int bigOnes = rem,smallOnes = gridSize - bigOnes - 1;
int bigOnes_front = bigOnes / 2, bigOnes_back = bigOnes - bigOnes_front;
for(int i = 0;i < (int)arr.size();i++){
if(arr[i].val[pos] < bigOnes_back){
arr[i].val[pos] = (uchar)(arr[i].val[pos] * bigStep + arr[i].val[pos]);
continue;
}
if(arr[i].val[pos]<(bigOnes_front+smallOnes)){
arr[i].val[pos]=(uchar)(bigOnes_front*bigStep+(arr[i].val[pos]-bigOnes_front)*smallStep+arr[i].val[pos]);
if(arr[i].val[pos] < (bigOnes_front + smallOnes)){
arr[i].val[pos] = (uchar)(bigOnes_front * bigStep + (arr[i].val[pos] - bigOnes_front) * smallStep + arr[i].val[pos]);
continue;
}
if(arr[i].val[pos]<(bigOnes_front+smallOnes+bigOnes_back)){
arr[i].val[pos]=
(uchar)(bigOnes_front*bigStep+smallOnes*smallStep+(arr[i].val[pos]-(bigOnes_front+smallOnes))*bigStep+arr[i].val[pos]);
if(arr[i].val[pos] < (bigOnes_front + smallOnes + bigOnes_back)){
arr[i].val[pos] =
(uchar)(bigOnes_front * bigStep + smallOnes * smallStep +
(arr[i].val[pos] - (bigOnes_front + smallOnes)) * bigStep + arr[i].val[pos]);
continue;
}
arr[i].val[pos]=(uchar)(len-1);
arr[i].val[pos] = (uchar)(len - 1);
}
#endif
}
void TLDEnsembleClassifier::prepareClassifier(int rowstep){
if(lastStep_!=rowstep){
lastStep_=rowstep;
for(int i=0;i<(int)offset.size();i++){
offset[i].x=rowstep*measurements[i].val[0]+measurements[i].val[1];
offset[i].y=rowstep*measurements[i].val[2]+measurements[i].val[3];
if(lastStep_ != rowstep){
lastStep_ = rowstep;
for(int i = 0;i < (int)offset.size();i++){
offset[i].x = rowstep * measurements[i].val[0] + measurements[i].val[1];
offset[i].y = rowstep * measurements[i].val[2] + measurements[i].val[3];
}
}
}
TLDEnsembleClassifier::TLDEnsembleClassifier(std::vector<Vec4b> meas,int beg,int end):lastStep_(-1){
int posSize=1,mpc=end-beg;
for(int i=0;i<mpc;i++)posSize*=2;
TLDEnsembleClassifier::TLDEnsembleClassifier(const std::vector<Vec4b>& meas,int beg,int end):lastStep_(-1){
int posSize = 1, mpc = end - beg;
for( int i = 0; i < mpc; i++ )
posSize *= 2;
posAndNeg.assign(posSize,Point2i(0,0));
measurements.assign(meas.begin()+beg,meas.begin()+end);
measurements.assign(meas.begin() + beg,meas.begin() + end);
offset.assign(mpc,Point2i(0,0));
}
void TLDEnsembleClassifier::integrate(const Mat_<uchar>& patch,bool isPositive){
int position=code(patch.data,(int)patch.step[0]);
int position = code(patch.data,(int)patch.step[0]);
if(isPositive){
posAndNeg[position].x++;
}else{
......@@ -303,38 +305,40 @@ void TLDEnsembleClassifier::integrate(const Mat_<uchar>& patch,bool isPositive){
}
}
double TLDEnsembleClassifier::posteriorProbability(const uchar* data,int rowstep)const{
int position=code(data,rowstep);
double posNum=(double)posAndNeg[position].x, negNum=(double)posAndNeg[position].y;
if(posNum==0.0 && negNum==0.0){
int position = code(data,rowstep);
double posNum = (double)posAndNeg[position].x, negNum = (double)posAndNeg[position].y;
if(posNum == 0.0 && negNum == 0.0){
return 0.0;
}else{
return posNum/(posNum+negNum);
return posNum / (posNum + negNum);
}
}
double TLDEnsembleClassifier::posteriorProbabilityFast(const uchar* data)const{
int position=codeFast(data);
double posNum=(double)posAndNeg[position].x, negNum=(double)posAndNeg[position].y;
if(posNum==0.0 && negNum==0.0){
int position = codeFast(data);
double posNum = (double)posAndNeg[position].x, negNum = (double)posAndNeg[position].y;
if(posNum == 0.0 && negNum == 0.0){
return 0.0;
}else{
return posNum/(posNum+negNum);
return posNum / (posNum + negNum);
}
}
int TLDEnsembleClassifier::codeFast(const uchar* data)const{
int position=0;
for(int i=0;i<(int)measurements.size();i++){
position=position<<1;
if(data[offset[i].x]<data[offset[i].y]){
int position = 0;
for(int i = 0;i < (int)measurements.size();i++){
position = position << 1;
if(data[offset[i].x] < data[offset[i].y]){
position++;
}
}
return position;
}
int TLDEnsembleClassifier::code(const uchar* data,int rowstep)const{
int position=0;
for(int i=0;i<(int)measurements.size();i++){
position=position<<1;
if(*(data+rowstep*measurements[i].val[0]+measurements[i].val[1])<*(data+rowstep*measurements[i].val[2]+measurements[i].val[3])){
int position = 0;
for(int i = 0;i < (int)measurements.size();i++){
position = position << 1;
if( *(data + rowstep * measurements[i].val[0] + measurements[i].val[1]) <
*(data + rowstep * measurements[i].val[2] + measurements[i].val[3]) )
{
position++;
}
}
......@@ -345,15 +349,15 @@ int TLDEnsembleClassifier::makeClassifiers(Size size,int measurePerClassifier,in
std::vector<Vec4b> measurements;
for(int i=0;i<gridSize;i++){
for(int j=0;j<gridSize;j++){
for(int k=0;k<j;k++){
for(int i = 0;i < gridSize;i++){
for(int j = 0;j < gridSize;j++){
for(int k = 0;k < j;k++){
Vec4b m;
m.val[0]=m.val[2]=i;
m.val[1]=j;m.val[3]=k;
m.val[0] = m.val[2] = i;
m.val[1] = j;m.val[3] = k;
measurements.push_back(m);
m.val[1]=m.val[3]=i;
m.val[0]=j;m.val[2]=k;
m.val[1] = m.val[3] = i;
m.val[0] = j;m.val[2] = k;
measurements.push_back(m);
}
}
......@@ -365,8 +369,8 @@ int TLDEnsembleClassifier::makeClassifiers(Size size,int measurePerClassifier,in
stepPrefSuff(measurements,2,size.height,gridSize);
stepPrefSuff(measurements,3,size.height,gridSize);
for(int i=0,howMany=measurements.size()/measurePerClassifier;i<howMany;i++){
classifiers.push_back(TLDEnsembleClassifier(measurements,i*measurePerClassifier,(i+1)*measurePerClassifier));
for(int i = 0,howMany = measurements.size() / measurePerClassifier;i < howMany;i++){
classifiers.push_back(TLDEnsembleClassifier(measurements,i * measurePerClassifier, (i + 1) * measurePerClassifier));
}
return (int)classifiers.size();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment