Commit eafd7a18 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #29 from nailbiter/luca

Tracking-Learning-Detection and MedianFlow
parents f843851e 24cca8d7
set(the_description "Tracking API")
ocv_define_module(tracking opencv_imgproc opencv_optim)
ocv_define_module(tracking opencv_imgproc opencv_optim opencv_video opencv_highgui)
......@@ -39,8 +39,8 @@
//
//M*/
#ifndef __OPENCV_TRACKING_HPP__
#define __OPENCV_TRACKING_HPP__
#ifndef __OPENCV_TRACKING_LENLEN_HPP__
#define __OPENCV_TRACKING_LENLEN_HPP__
#include "opencv2/core/cvdef.h"
......@@ -50,5 +50,4 @@ CV_EXPORTS bool initModule_tracking(void);
}
#include "opencv2/tracking/tracker.hpp"
#endif //__OPENCV_TRACKING_HPP__
#endif //__OPENCV_TRACKING_LENLEN
......@@ -1005,6 +1005,38 @@ class CV_EXPORTS_W TrackerBoosting : public Tracker
BOILERPLATE_CODE("BOOSTING",TrackerBoosting);
};
/**
\brief Median Flow tracker implementation.
Implementation of a paper "Forward-Backward Error: Automatic Detection of Tracking Failures" by Z. Kalal, K. Mikolajczyk
and Jiri Matas.
*/
class CV_EXPORTS_W TrackerMedianFlow : public Tracker
{
public:
struct CV_EXPORTS Params
{
Params();
int pointsInGrid;
void read( const FileNode& /*fn*/ );
void write( FileStorage& /*fs*/ ) const;
};
BOILERPLATE_CODE("MEDIANFLOW",TrackerMedianFlow);
};
class CV_EXPORTS_W TrackerTLD : public Tracker
{
public:
struct CV_EXPORTS Params
{
Params();
void read( const FileNode& /*fn*/ );
void write( FileStorage& /*fs*/ ) const;
};
BOILERPLATE_CODE("TLD",TrackerTLD);
};
} /* namespace cv */
#endif
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <time.h>
#include <cstring>
#include <climits>
#define CMDLINEMAX 10
#define ASSESS_TILL 100
#define LINEMAX 40
using namespace std;
using namespace cv;
/* TODO:
do normalization ala Kalal's assessment protocol for TLD
*/
static Mat image;
static bool paused;
vector<Scalar> palette;
void print_table(char* videos[],int videoNum,char* algorithms[],int algNum,const vector<vector<char*> >& results,char* tableName);
static int lineToRect(char* line,Rect2d& res){
char * ptr=line,*pos=ptr;
if(line==NULL || line[0]=='\0'){
return -1;
}
if(strcmp(line,"NaN,NaN,NaN,NaN\n")==0){
res.height=res.width=-1.0;
return 0;
}
double nums[4]={0};
for(int i=0; i<4 && (ptr=strpbrk(ptr,"0123456789-"))!= NULL;i++,ptr=pos){
nums[i]=strtod(ptr,&pos);
if(pos==ptr){
printf("lineToRect had problems with decoding line %s\n",line);
return -1;
}
}
res.x=cv::min(nums[0],nums[2]);
res.y=cv::min(nums[1],nums[3]);
res.width=cv::abs(nums[0]-nums[2]);
res.height=cv::abs(nums[1]-nums[3]);
return 0;
}
static inline double overlap(Rect2d r1,Rect2d r2){
if(r1.width<0 || r2.width<0 || r1.height<0 || r1.width<0)return -1.0;
double a1=r1.area(), a2=r2.area(), a0=(r1&r2).area();
return a0/(a1+a2-a0);
}
static void help(){
cout << "\nThis example shows the functionality of \"Long-term optical tracking API\""
"-- pause video [p] and draw a bounding box around the target to start the tracker\n"
"Example of <video_name> is in opencv_extra/testdata/cv/tracking/\n"
"Call:\n"
"./tracker [<keys and args>] <video_name> <ground_truth> <algorithm1> <init_box1> <algorithm2> <init_box2> ...\n"
<< endl;
cout << "\n\nHot keys: \n"
"\tq - quit the program\n"
"\tp - pause video\n";
exit(EXIT_SUCCESS);
}
static void parseCommandLineArgs(int argc, char** argv,char* videos[],char* gts[],
int* vc,char* algorithms[],char* initBoxes[][CMDLINEMAX],int* ac){
*ac=*vc=0;
for(int i=1;i<argc;i++){
if(argv[i][0]=='-'){
char *key=(argv[i]+1),*argument=NULL;
if(std::strcmp("h",key)==0||std::strcmp("help",key)==0){
help();
}
if((argument=strchr(argv[i],'='))==NULL){
i++;
argument=argv[i];
}else{
argument++;
}
continue;
}
bool isVideo=false;
for(int j=0,len=(int)strlen(argv[i]);j<len;j++){
if(!('A'<=argv[i][j] && argv[i][j]<='Z') && argv[i][j]!='.'){
isVideo=true;
break;
}
}
if(isVideo){
videos[*vc]=argv[i];
i++;
gts[*vc]=(i<argc)?argv[i]:NULL;
(*vc)++;
}else{
algorithms[*ac]=argv[i];
i++;
for(int j=0;j<*vc;j++,i++){
initBoxes[*ac][j]=(i<argc)?argv[i]:NULL;
}
i--;(*ac)++;
}
}
}
void print_table(char* videos[],int videoNum,char* algorithms[],int algNum,const vector<vector<char*> >& results,char* tableName){
printf("\n%s",tableName);
vector<int> grid(1+algNum,0);
char spaces[100];memset(spaces,' ',100);
for(int i=0;i<videoNum;i++){
grid[0]=std::max(grid[0],(int)strlen(videos[i]));
}
for(int i=0;i<algNum;i++){
grid[i+1]=(int)strlen(algorithms[i]);
for(int j=0;j<videoNum;j++)
grid[i+1]=std::max(grid[i+1],(int)strlen(results[j][i]));
}
printf("%.*s ",(int)grid[0],spaces);
for(int i=0;i<algNum;i++)
printf("%s%.*s",algorithms[i],(int)(grid[i+1]+1-strlen(algorithms[i])),spaces);
printf("\n");
for(int i=0;i<videoNum;i++){
printf("%s%.*s",videos[i],(int)(grid[0]+1-strlen(videos[i])),spaces);
for(int j=0;j<algNum;j++)
printf("%s%.*s",results[i][j],(int)(grid[j+1]+1-strlen(results[i][j])),spaces);
printf("\n");
}
printf("*************************************************************\n");
}
struct AssessmentRes{
class Assessment{
public:
virtual int printf(char* buf)=0;
virtual int printName(char* buf)=0;
virtual void assess(const Rect2d& ethalon,const Rect2d& res)=0;
virtual ~Assessment(){}
};
AssessmentRes(int algnum);
int len;
char* videoName;
vector<vector<Ptr<Assessment> > >results;
};
class CorrectFrames : public AssessmentRes::Assessment{
public:
CorrectFrames(double tol):tol_(tol),len_(1),correctFrames_(1){}
int printf(char* buf){return sprintf(buf,"%d/%d",correctFrames_,len_);}
int printName(char* buf){return sprintf(buf,(char*)"Num of correct frames (overlap>%g)\n",tol_);}
void assess(const Rect2d& ethalon,const Rect2d& res){len_++;if(overlap(ethalon,res)>tol_)correctFrames_++;}
private:
double tol_;
int len_;
int correctFrames_;
};
class AvgTime : public AssessmentRes::Assessment{
public:
AvgTime(double res):res_(res){}
int printf(char* buf){return sprintf(buf,"%gms",res_);}
int printName(char* buf){return sprintf(buf,(char*)"Average frame tracking time\n");}
void assess(const Rect2d& /*ethalon*/,const Rect2d&/* res*/){};
private:
double res_;
};
class PRF : public AssessmentRes::Assessment{
public:
PRF():occurences_(0),responses_(0),true_responses_(0){};
int printName(char* buf){return sprintf(buf,(char*)"PRF\n");}
int printf(char* buf){return sprintf(buf,"%g/%g/%g",(1.0*true_responses_)/responses_,(1.0*true_responses_)/occurences_,
(2.0*true_responses_)/(responses_+occurences_));}
void assess(const Rect2d& ethalon,const Rect2d& res){
if(res.height>=0)responses_++;
if(ethalon.height>=0)occurences_++;
if(ethalon.height>=0 && res.height>=0)true_responses_++;
}
private:
int occurences_,responses_,true_responses_;
};
AssessmentRes::AssessmentRes(int algnum):len(0),results(algnum){
for(int i=0;i<(int)results.size();i++){
results[i].push_back(Ptr<Assessment>(new CorrectFrames(0.0)));
results[i].push_back(Ptr<Assessment>(new CorrectFrames(0.5)));
results[i].push_back(Ptr<Assessment>(new PRF()));
}
}
static AssessmentRes assessment(char* video,char* gt_str, char* algorithms[],char* initBoxes_str[],int algnum){
char buf[200];
int start_frame=0;
int linecount=0;
Rect2d boundingBox;
vector<double> averageMillisPerFrame(algnum,0.0);
FILE* gt=fopen(gt_str,"r");
if(gt==NULL){
printf("cannot open the ground truth file %s\n",gt_str);
exit(EXIT_FAILURE);
}
for(linecount=0;fgets(buf,sizeof(buf),gt)!=NULL;linecount++);
if(linecount==0){
printf("ground truth file %s has no lines\n",gt_str);
exit(EXIT_FAILURE);
}
fseek(gt,0,SEEK_SET);
if(fgets(buf,sizeof(buf),gt)==NULL){
printf("ground truth file %s has no lines\n",gt_str);
exit(EXIT_FAILURE);
}
std::vector<Rect2d> initBoxes(algnum);
for(int i=0;i<algnum;i++){
printf("%s %s\n",algorithms[i],initBoxes_str[CMDLINEMAX*i]);
if(lineToRect(initBoxes_str[CMDLINEMAX*i],boundingBox)<0){
printf("please, specify bounding box for video %s, algorithm %s\n",video,algorithms[i]);
printf("FYI, initial bounding box in ground truth is %s\n",buf);
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}else{
initBoxes[i].x=boundingBox.x;
initBoxes[i].y=boundingBox.y;
initBoxes[i].width=boundingBox.width;
initBoxes[i].height=boundingBox.height;
}
}
VideoCapture cap;
cap.open( String(video) );
cap.set( CAP_PROP_POS_FRAMES, start_frame );
if( !cap.isOpened() ){
printf("cannot open video %s\n",video);
help();
}
Mat frame;
namedWindow( "Tracking API", 1 );
std::vector<Ptr<Tracker> >trackers(algnum);
for(int i=0;i<algnum;i++){
trackers[i] = Tracker::create( algorithms[i] );
if( trackers[i] == NULL ){
printf("error in the instantiation of the tracker %s\n",algorithms[i]);
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}
}
cap >> frame;
frame.copyTo( image );
if(lineToRect(buf,boundingBox)<0){
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}
rectangle( image, boundingBox,palette[0], 2, 1 );
for(int i=0;i<(int)trackers.size();i++){
rectangle(image,initBoxes[i],palette[i+1], 2, 1 );
if( !trackers[i]->init( frame, initBoxes[i] ) ){
printf("could not initialize tracker %s with box %s at video %s\n",algorithms[i],initBoxes_str[i],video);
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}
}
imshow( "Tracking API", image );
int frameCounter = 0;
AssessmentRes res((int)trackers.size());
for ( ;; ){
if( !paused ){
cap >> frame;
if(frame.empty()){
break;
}
frame.copyTo( image );
if(fgets(buf,sizeof(buf),gt)==NULL){
printf("ground truth is over\n");
break;
}
if(lineToRect(buf,boundingBox)<0){
if(gt!=NULL){
fclose(gt);
}
exit(EXIT_FAILURE);
}
rectangle( image, boundingBox,palette[0], 2, 1 );
frameCounter++;
for(int i=0;i<(int)trackers.size();i++){
bool trackerRes=true;
clock_t start;start=clock();
trackerRes=trackers[i]->update( frame, initBoxes[i] );
start=clock()-start;
averageMillisPerFrame[i]+=1000.0*start/CLOCKS_PER_SEC;
if(trackerRes==false){
initBoxes[i].height=initBoxes[i].width=-1.0;
}else{
rectangle( image, initBoxes[i], palette[i+1], 2, 1 );
}
for(int j=0;j<(int)res.results[i].size();j++)
res.results[i][j]->assess(boundingBox,initBoxes[i]);
}
imshow( "Tracking API", image );
if((frameCounter+1)>=ASSESS_TILL){
break;
}
char c = (char) waitKey( 2 );
if( c == 'q' )
break;
if( c == 'p' )
paused = !paused;
}
}
if(gt!=NULL){
fclose(gt);
}
destroyWindow( "Tracking API");
res.len=linecount;
res.videoName=video;
for(int i=0;i<(int)res.results.size();i++)
res.results[i].push_back(Ptr<AssessmentRes::Assessment>(new AvgTime(averageMillisPerFrame[i]/res.len)));
return res;
}
int main( int argc, char** argv ){
palette.push_back(Scalar(255,0,0));//BGR
palette.push_back(Scalar(0,0,255));
palette.push_back(Scalar(0,255,255));
int vcount=0,acount=0;
char* videos[CMDLINEMAX],*gts[CMDLINEMAX],*algorithms[CMDLINEMAX],*initBoxes[CMDLINEMAX][CMDLINEMAX];
parseCommandLineArgs(argc,argv,videos,gts,&vcount,algorithms,initBoxes,&acount);
CV_Assert(acount<CMDLINEMAX && vcount<CMDLINEMAX);
printf("videos and gts\n");
for(int i=0;i<vcount;i++){
printf("%s %s\n",videos[i],gts[i]);
}
printf("algorithms and boxes (%d)\n",acount);
for(int i=0;i<acount;i++){
printf("%s ",algorithms[i]);
for(int j=0;j<vcount;j++){
printf("%s ",initBoxes[i][j]);
}
printf("\n");
}
std::vector<AssessmentRes> results;
for(int i=0;i<vcount;i++){
results.push_back(assessment(videos[i],gts[i],algorithms,((char**)initBoxes)+i,acount));
}
CV_Assert(results[0].results[0].size()<CMDLINEMAX);
printf("\n\n");
char buf[CMDLINEMAX*CMDLINEMAX*LINEMAX], buf2[CMDLINEMAX*40];
vector<vector<char*> > resultStrings(vcount);
vector<char*> nameStrings;
for(int i=0;i<vcount;i++){
for(int j=0;j<acount;j++){
resultStrings[i].push_back(buf+i*CMDLINEMAX*LINEMAX + j*40);
}
}
for(int i=0;i<(int)results[0].results[0].size();i++){
nameStrings.push_back(buf2+LINEMAX*i);
}
for(int tableCount=0;tableCount<(int)results[0].results[0].size();tableCount++){
CV_Assert(results[0].results[0][tableCount]->printName(nameStrings[tableCount])<LINEMAX);
for(int videoCount=0;videoCount<(int)results.size();videoCount++)
for(int algoCount=0;algoCount<(int)results[0].results.size();algoCount++){
(results[videoCount].results[algoCount][tableCount])->printf(resultStrings[videoCount][algoCount]);
}
print_table(videos,vcount,algorithms,acount,resultStrings,nameStrings[tableCount]);
}
return 0;
}
......@@ -2,6 +2,7 @@
#include <opencv2/tracking.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <cstring>
using namespace std;
using namespace cv;
......@@ -15,23 +16,9 @@ static bool startSelection = false;
static const char* keys =
{ "{@tracker_algorithm | | Tracker algorithm }"
"{@video_name | | video name }"
"{@start_frame |1| Start frame }"
"{@start_frame |0| Start frame }"
"{@bounding_frame |0,0,0,0| Initial bounding frame}"};
static void help()
{
cout << "\nThis example shows the functionality of \"Long-term optical tracking API\""
"-- pause video [p] and draw a bounding box around the target to start the tracker\n"
"Example of <video_name> is in opencv_extra/testdata/cv/tracking/\n"
"Call:\n"
"./tracker <tracker_algorithm> <video_name> <start_frame> [<bounding_frame>]\n"
<< endl;
cout << "\n\nHot keys: \n"
"\tq - quit the program\n"
"\tp - pause video\n";
}
static void onMouse( int event, int x, int y, int, void* )
{
if( !selectObject )
......@@ -58,7 +45,7 @@ static void onMouse( int event, int x, int y, int, void* )
//draw the bounding box
Mat currentFrame;
image.copyTo( currentFrame );
rectangle( currentFrame, Point( (int)boundingBox.x, (int)boundingBox.y ), Point( x, y ), Scalar( 255, 0, 0 ), 2, 1 );
rectangle( currentFrame, Point((int) boundingBox.x, (int)boundingBox.y ), Point( x, y ), Scalar( 255, 0, 0 ), 2, 1 );
imshow( "Tracking API", currentFrame );
}
break;
......@@ -66,8 +53,21 @@ static void onMouse( int event, int x, int y, int, void* )
}
}
int main( int argc, char** argv )
static void help()
{
cout << "\nThis example shows the functionality of \"Long-term optical tracking API\""
"-- pause video [p] and draw a bounding box around the target to start the tracker\n"
"Example of <video_name> is in opencv_extra/testdata/cv/tracking/\n"
"Call:\n"
"./tracker <tracker_algorithm> <video_name> <start_frame> [<bounding_frame>]\n"
<< endl;
cout << "\n\nHot keys: \n"
"\tq - quit the program\n"
"\tp - pause video\n";
}
int main( int argc, char** argv ){
CommandLineParser parser( argc, argv, keys );
String tracker_algorithm = parser.get<String>( 0 );
......@@ -81,24 +81,19 @@ int main( int argc, char** argv )
}
int coords[4]={0,0,0,0};
bool initFrameWasGivenInCommandLine=false;
do
bool initBoxWasGivenInCommandLine=false;
{
String initBoundingBox=parser.get<String>(3);
for(size_t pos=0,ctr=0;ctr<4;ctr++)
{
size_t npos=initBoundingBox.find_first_of(',',pos);
if(npos==string::npos && ctr<3)
{
for(size_t npos=0,pos=0,ctr=0;ctr<4;ctr++){
npos=initBoundingBox.find_first_of(',',pos);
if(npos==string::npos && ctr<3){
printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n");
printf("got: %s\n",initBoundingBox.substr(pos,string::npos).c_str());
printf("manual selection of bounding box will be employed\n");
break;
}
int num=atoi(initBoundingBox.substr(pos,(ctr==3)?(string::npos):(npos-pos)).c_str());
if(num<=0)
{
if(num<=0){
printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n");
printf("got: %s\n",initBoundingBox.substr(pos,npos-pos).c_str());
printf("manual selection of bounding box will be employed\n");
......@@ -107,9 +102,10 @@ int main( int argc, char** argv )
coords[ctr]=num;
pos=npos+1;
}
if(coords[0]>0 && coords[1]>0 && coords[2]>0 && coords[3]>0)
initFrameWasGivenInCommandLine=true;
} while((void)0, 0);
if(coords[0]>0 && coords[1]>0 && coords[2]>0 && coords[3]>0){
initBoxWasGivenInCommandLine=true;
}
}
//open the capture
VideoCapture cap;
......@@ -141,7 +137,7 @@ int main( int argc, char** argv )
//get the first frame
cap >> frame;
frame.copyTo( image );
if(initFrameWasGivenInCommandLine){
if(initBoxWasGivenInCommandLine){
selectObject=true;
paused=false;
boundingBox.x = coords[0];
......@@ -160,15 +156,14 @@ int main( int argc, char** argv )
{
if( !paused )
{
cap >> frame;
if( frame.empty() )
{
break;
if(initialized){
cap >> frame;
if(frame.empty()){
break;
}
frame.copyTo( image );
}
frame.copyTo( image );
if( !initialized && selectObject )
{
//initializes the tracker
......
/*///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "time.h"
#include <algorithm>
#include <limits.h>
#include <math.h>
#include <opencv2/highgui.hpp>
#include "TLD.hpp"
namespace cv {namespace tld
{
//debug functions and variables
Rect2d etalon(14.0,110.0,20.0,20.0);
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,Rect2d whiteOne){
Mat image;
img.copyTo(image);
if(whiteOne.width>=0){
rectangle( image,whiteOne, 255, 1, 1 );
}
for(int i=0;i<(int)blackOnes.size();i++){
rectangle( image,blackOnes[i], 0, 1, 1 );
}
imshow("img",image);
}
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,std::vector<Rect2d>& whiteOnes){
Mat image;
img.copyTo(image);
for(int i=0;i<(int)whiteOnes.size();i++){
rectangle( image,whiteOnes[i], 255, 1, 1 );
}
for(int i=0;i<(int)blackOnes.size();i++){
rectangle( image,blackOnes[i], 0, 1, 1 );
}
imshow("img",image);
}
void myassert(const Mat& img){
int count=0;
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;j++){
if(img.at<uchar>(i,j)==0){
count++;
}
}
}
dprintf(("black: %d out of %d (%f)\n",count,img.rows*img.cols,1.0*count/img.rows/img.cols));
}
void printPatch(const Mat_<uchar>& standardPatch){
for(int i=0;i<standardPatch.rows;i++){
for(int j=0;j<standardPatch.cols;j++){
dprintf(("%5.2f, ",(double)standardPatch(i,j)));
}
dprintf(("\n"));
}
}
std::string type2str(const Mat& mat){
int type=mat.type();
std::string r;
uchar depth = type & CV_MAT_DEPTH_MASK;
uchar chans =(uchar)( 1 + (type >> CV_CN_SHIFT));
switch ( depth ) {
case CV_8U: r = "8U"; break;
case CV_8S: r = "8S"; break;
case CV_16U: r = "16U"; break;
case CV_16S: r = "16S"; break;
case CV_32S: r = "32S"; break;
case CV_32F: r = "32F"; break;
case CV_64F: r = "64F"; break;
default: r = "User"; break;
}
r += "C";
r += (chans+'0');
return r;
}
//generic functions
double scaleAndBlur(const Mat& originalImg,int scale,Mat& scaledImg,Mat& blurredImg,Size GaussBlurKernelSize){
double dScale=1.0;
for(int i=0;i<scale;i++,dScale*=1.2);
Size2d size=originalImg.size();
size.height/=dScale;size.width/=dScale;
resize(originalImg,scaledImg,size);
GaussianBlur(scaledImg,blurredImg,GaussBlurKernelSize,0.0);
return dScale;
}
void getClosestN(std::vector<Rect2d>& scanGrid,Rect2d bBox,int n,std::vector<Rect2d>& res){
if(n>=(int)scanGrid.size()){
res.assign(scanGrid.begin(),scanGrid.end());
return;
}
std::vector<double> overlaps(n,0.0);
res.assign(scanGrid.begin(),scanGrid.begin()+n);
for(int i=0;i<n;i++){
overlaps[i]=overlap(res[i],bBox);
}
double otmp;
Rect2d rtmp;
for (int i = 1; i < n; i++){
int j = i;
while (j > 0 && overlaps[j - 1] > overlaps[j]) {
otmp = overlaps[j];overlaps[j] = overlaps[j - 1];overlaps[j - 1] = otmp;
rtmp = res[j];res[j] = res[j - 1];res[j - 1] = rtmp;
j--;
}
}
double o=0.0;
for(int i=n;i<(int)scanGrid.size();i++){
if((o=overlap(scanGrid[i],bBox))<=overlaps[0]){
continue;
}
int j=0;
for(j=0;j<n && overlaps[j]<o;j++);
j--;
for(int k=0;k<j;overlaps[k]=overlaps[k+1],res[k]=res[k+1],k++);
overlaps[j]=o;res[j]=scanGrid[i];
}
}
double variance(const Mat& img){
double p=0,p2=0;
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;j++){
p+=img.at<uchar>(i,j);
p2+=img.at<uchar>(i,j)*img.at<uchar>(i,j);
}
}
p/=(img.cols*img.rows);
p2/=(img.cols*img.rows);
return p2-p*p;
}
double variance(Mat_<double>& intImgP,Mat_<double>& intImgP2,Rect box){
int x=(box.x),y=(box.y),width=(box.width),height=(box.height);
CV_Assert(0<=x && (x+width)<intImgP.cols && (x+width)<intImgP2.cols);
CV_Assert(0<=y && (y+height)<intImgP.rows && (y+height)<intImgP2.rows);
double p=0,p2=0;
double A,B,C,D;
A=intImgP(y,x);
B=intImgP(y,x+width);
C=intImgP(y+height,x);
D=intImgP(y+height,x+width);
p=(0.0+A+D-B-C)/(width*height);
A=intImgP2(y,x);
B=intImgP2(y,x+width);
C=intImgP2(y+height,x);
D=intImgP2(y+height,x+width);
p2=(0.0+(D-B)-(C-A))/(width*height);
return p2-p*p;
}
double NCC(Mat_<uchar> patch1,Mat_<uchar> patch2){
CV_Assert(patch1.rows==patch2.rows);
CV_Assert(patch1.cols==patch2.cols);
int N=patch1.rows*patch1.cols;
double s1=sum(patch1)(0),s2=sum(patch2)(0);
double n1=norm(patch1),n2=norm(patch2);
double prod=patch1.dot(patch2);
double sq1=sqrt(MAX(0.0,n1*n1-s1*s1/N)),sq2=sqrt(MAX(0.0,n2*n2-s2*s2/N));
double ares=(sq2==0)?sq1/abs(sq1):(prod-s1*s2/N)/sq1/sq2;
return ares;
}
unsigned int getMedian(const std::vector<unsigned int>& values, int size){
if(size==-1){
size=(int)values.size();
}
std::vector<int> copy(values.begin(),values.begin()+size);
std::sort(copy.begin(),copy.end());
if(size%2==0){
return (copy[size/2-1]+copy[size/2])/2;
}else{
return copy[(size-1)/2];
}
}
double overlap(const Rect2d& r1,const Rect2d& r2){
double a1=r1.area(), a2=r2.area(), a0=(r1&r2).area();
return a0/(a1+a2-a0);
}
void resample(const Mat& img,const RotatedRect& r2,Mat_<uchar>& samples){
Mat_<float> M(2,3),R(2,2),Si(2,2),s(2,1),o(2,1);
R(0,0)=(float)cos(r2.angle*CV_PI/180);R(0,1)=(float)(-sin(r2.angle*CV_PI/180));
R(1,0)=(float)sin(r2.angle*CV_PI/180);R(1,1)=(float)cos(r2.angle*CV_PI/180);
Si(0,0)=(float)(samples.cols/r2.size.width); Si(0,1)=0.0f;
Si(1,0)=0.0f; Si(1,1)=(float)(samples.rows/r2.size.height);
s(0,0)=(float)samples.cols; s(1,0)=(float)samples.rows;
o(0,0)=r2.center.x;o(1,0)=r2.center.y;
Mat_<float> A(2,2),b(2,1);
A=Si*R;
b=s/2.0-Si*R*o;
A.copyTo(M.colRange(Range(0,2)));
b.copyTo(M.colRange(Range(2,3)));
warpAffine(img,samples,M,samples.size());
}
void resample(const Mat& img,const Rect2d& r2,Mat_<uchar>& samples){
Mat_<float> M(2,3);
M(0,0)=(float)(samples.cols/r2.width); M(0,1)=0.0f; M(0,2)=(float)(-r2.x*samples.cols/r2.width);
M(1,0)=0.0f; M(1,1)=(float)(samples.rows/r2.height); M(1,2)=(float)(-r2.y*samples.rows/r2.height);
warpAffine(img,samples,M,samples.size());
}
//other stuff
void TLDEnsembleClassifier::stepPrefSuff(std::vector<uchar>& arr,int len){
int gridSize=getGridSize();
#if 0
int step=len/(gridSize-1), pref=(len-step*(gridSize-1))/2;
for(int i=0;i<(int)(sizeof(x1)/sizeof(x1[0]));i++){
arr[i]=pref+arr[i]*step;
}
#else
int total=len-gridSize;
int quo=total/(gridSize-1),rem=total%(gridSize-1);
int smallStep=quo,bigStep=quo+1;
int bigOnes=rem,smallOnes=gridSize-bigOnes-1;
int bigOnes_front=bigOnes/2,bigOnes_back=bigOnes-bigOnes_front;
for(int i=0;i<(int)arr.size();i++){
if(arr[i]<bigOnes_back){
arr[i]=(uchar)(arr[i]*bigStep+arr[i]);
continue;
}
if(arr[i]<(bigOnes_front+smallOnes)){
arr[i]=(uchar)(bigOnes_front*bigStep+(arr[i]-bigOnes_front)*smallStep+arr[i]);
continue;
}
if(arr[i]<(bigOnes_front+smallOnes+bigOnes_back)){
arr[i]=(uchar)(bigOnes_front*bigStep+smallOnes*smallStep+(arr[i]-(bigOnes_front+smallOnes))*bigStep+arr[i]);
continue;
}
arr[i]=(uchar)(len-1);
}
#endif
}
TLDEnsembleClassifier::TLDEnsembleClassifier(int ordinal,Size size,int measurePerClassifier){
x1=std::vector<uchar>(measurePerClassifier,0);
x2=std::vector<uchar>(measurePerClassifier,0);
y1=std::vector<uchar>(measurePerClassifier,0);
y2=std::vector<uchar>(measurePerClassifier,0);
preinit(ordinal);
stepPrefSuff(x1,size.width);
stepPrefSuff(x2,size.width);
stepPrefSuff(y1,size.height);
stepPrefSuff(y2,size.height);
int posSize=1;
for(int i=0;i<measurePerClassifier;i++)posSize*=2;
pos=std::vector<unsigned int>(posSize,0);
neg=std::vector<unsigned int>(posSize,0);
}
void TLDEnsembleClassifier::integrate(Mat_<uchar> patch,bool isPositive){
unsigned short int position=code(patch.data,(int)patch.step[0]);
if(isPositive){
pos[position]++;
}else{
neg[position]++;
}
}
double TLDEnsembleClassifier::posteriorProbability(const uchar* data,int rowstep)const{
unsigned short int position=code(data,rowstep);
double posNum=(double)pos[position], negNum=(double)neg[position];
if(posNum==0.0 && negNum==0.0){
return 0.0;
}else{
return posNum/(posNum+negNum);
}
}
unsigned short int TLDEnsembleClassifier::code(const uchar* data,int rowstep)const{
unsigned short int position=0;
for(int i=0;i<(int)x1.size();i++){
position=position<<1;
if(*(data+rowstep*y1[i]+x1[i])<*(data+rowstep*y2[i]+x2[i])){
position++;
}else{
}
}
return position;
}
}}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include <algorithm>
#include <limits.h>
namespace cv {namespace tld
{
//debug functions and variables
#define ALEX_DEBUG
#ifdef ALEX_DEBUG
#define dfprintf(x) fprintf x
#define dprintf(x) printf x
#else
#define dfprintf(x)
#define dprintf(x)
#endif
#define MEASURE_TIME(a) {\
clock_t start;float milisec=0.0;\
start=clock();{a} milisec=1000.0*(clock()-start)/CLOCKS_PER_SEC;\
dprintf(("%-90s took %f milis\n",#a,milisec)); }
#define HERE dprintf(("%d\n",__LINE__));fflush(stderr);
#define START_TICK(name) { clock_t start;double milisec=0.0; start=clock();
#define END_TICK(name) milisec=1000.0*(clock()-start)/CLOCKS_PER_SEC;\
dprintf(("%s took %f milis\n",name,milisec)); }
extern Rect2d etalon;
void myassert(const Mat& img);
void printPatch(const Mat_<uchar>& standardPatch);
std::string type2str(const Mat& mat);
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,Rect2d whiteOne=Rect2d(-1.0,-1.0,-1.0,-1.0));
void drawWithRects(const Mat& img,std::vector<Rect2d>& blackOnes,std::vector<Rect2d>& whiteOnes);
//aux functions and variables
//#define CLIP(x,a,b) MIN(MAX((x),(a)),(b))
template<typename T> inline T CLIP(T x,T a,T b){return MIN(MAX(x,a),b);}
double overlap(const Rect2d& r1,const Rect2d& r2);
void resample(const Mat& img,const RotatedRect& r2,Mat_<uchar>& samples);
void resample(const Mat& img,const Rect2d& r2,Mat_<uchar>& samples);
double variance(const Mat& img);
double variance(Mat_<double>& intImgP,Mat_<double>& intImgP2,Rect box);
double NCC(Mat_<uchar> patch1,Mat_<uchar> patch2);
void getClosestN(std::vector<Rect2d>& scanGrid,Rect2d bBox,int n,std::vector<Rect2d>& res);
double scaleAndBlur(const Mat& originalImg,int scale,Mat& scaledImg,Mat& blurredImg,Size GaussBlurKernelSize);
unsigned int getMedian(const std::vector<unsigned int>& values, int size=-1);
class TLDEnsembleClassifier{
public:
TLDEnsembleClassifier(int ordinal,Size size,int measurePerClassifier);
void integrate(Mat_<uchar> patch,bool isPositive);
double posteriorProbability(const uchar* data,int rowstep)const;
static int getMaxOrdinal();
private:
static int getGridSize();
inline void stepPrefSuff(std::vector<uchar>& arr,int len);
void preinit(int ordinal);
unsigned short int code(const uchar* data,int rowstep)const;
std::vector<unsigned int> pos,neg;
std::vector<uchar> x1,y1,x2,y2;
};
class TrackerProxy{
public:
virtual bool init( const Mat& image, const Rect2d& boundingBox)=0;
virtual bool update(const Mat& image, Rect2d& boundingBox)=0;
virtual ~TrackerProxy(){}
};
}}
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -78,7 +78,7 @@ bool Tracker::init( const Mat& image, const Rect2d& boundingBox )
//check if the model component is initialized
if( model == 0 )
{
CV_Error( -1, "The model are not initialized" );
CV_Error( -1, "The model is not initialized" );
return false;
}
......@@ -112,6 +112,8 @@ Ptr<Tracker> Tracker::create( const String& trackerType )
{
BOILERPLATE_CODE("MIL",TrackerMIL);
BOILERPLATE_CODE("BOOSTING",TrackerBoosting);
BOILERPLATE_CODE("MEDIANFLOW",TrackerMedianFlow);
BOILERPLATE_CODE("TLD",TrackerTLD);
return Ptr<Tracker>();
}
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include <algorithm>
#include <limits.h>
namespace cv
{
#undef ALEX_DEBUG
#ifdef ALEX_DEBUG
#define dfprintf(x) fprintf x
#define dprintf(x) printf x
#else
#define dfprintf(x)
#define dprintf(x)
#endif
/*
* TrackerMedianFlow
*/
/*
* TODO:
* add "non-detected" answer in algo --> test it with 2 rects --> frame-by-frame debug in TLD --> test it!!
* take all parameters out
* asessment framework
*
*
* FIXME:
* when patch is cut from image to compute NCC, there can be problem with size
* optimize (allocation<-->reallocation)
* optimize (remove vector.erase() calls)
* bring "out" all the parameters to TrackerMedianFlow::Param
*/
class TrackerMedianFlowImpl : public TrackerMedianFlow{
public:
TrackerMedianFlowImpl(TrackerMedianFlow::Params paramsIn):termcrit(TermCriteria::COUNT|TermCriteria::EPS,20,0.3){params=paramsIn;isInit=false;}
void read( const FileNode& fn );
void write( FileStorage& fs ) const;
private:
bool initImpl( const Mat& image, const Rect2d& boundingBox );
bool updateImpl( const Mat& image, Rect2d& boundingBox );
bool medianFlowImpl(Mat oldImage,Mat newImage,Rect2d& oldBox);
Rect2d vote(const std::vector<Point2f>& oldPoints,const std::vector<Point2f>& newPoints,const Rect2d& oldRect,Point2f& mD);
//FIXME: this can be optimized: current method uses sort->select approach, there are O(n) selection algo for median; besides
//it makes copy all the time
template<typename T>
T getMedian( std::vector<T>& values,int size=-1);
float dist(Point2f p1,Point2f p2);
std::string type2str(int type);
void computeStatistics(std::vector<float>& data,int size=-1);
void check_FB(const Mat& oldImage,const Mat& newImage,
const std::vector<Point2f>& oldPoints,const std::vector<Point2f>& newPoints,std::vector<bool>& status);
void check_NCC(const Mat& oldImage,const Mat& newImage,
const std::vector<Point2f>& oldPoints,const std::vector<Point2f>& newPoints,std::vector<bool>& status);
inline double l2distance(Point2f p1,Point2f p2);
TrackerMedianFlow::Params params;
TermCriteria termcrit;
};
class TrackerMedianFlowModel : public TrackerModel{
public:
TrackerMedianFlowModel(TrackerMedianFlow::Params /*params*/){}
Rect2d getBoundingBox(){return boundingBox_;}
void setBoudingBox(Rect2d boundingBox){boundingBox_=boundingBox;}
Mat getImage(){return image_;}
void setImage(const Mat& image){image.copyTo(image_);}
protected:
Rect2d boundingBox_;
Mat image_;
void modelEstimationImpl( const std::vector<Mat>& /*responses*/ ){}
void modelUpdateImpl(){}
};
/*
* Parameters
*/
TrackerMedianFlow::Params::Params(){
pointsInGrid=10;
}
void TrackerMedianFlow::Params::read( const cv::FileNode& fn ){
pointsInGrid=fn["pointsInGrid"];
}
void TrackerMedianFlow::Params::write( cv::FileStorage& fs ) const{
fs << "pointsInGrid" << pointsInGrid;
}
void TrackerMedianFlowImpl::read( const cv::FileNode& fn )
{
params.read( fn );
}
void TrackerMedianFlowImpl::write( cv::FileStorage& fs ) const
{
params.write( fs );
}
Ptr<TrackerMedianFlow> TrackerMedianFlow::createTracker(const TrackerMedianFlow::Params &parameters){
return Ptr<TrackerMedianFlowImpl>(new TrackerMedianFlowImpl(parameters));
}
bool TrackerMedianFlowImpl::initImpl( const Mat& image, const Rect2d& boundingBox ){
model=Ptr<TrackerMedianFlowModel>(new TrackerMedianFlowModel(params));
((TrackerMedianFlowModel*)static_cast<TrackerModel*>(model))->setImage(image);
((TrackerMedianFlowModel*)static_cast<TrackerModel*>(model))->setBoudingBox(boundingBox);
return true;
}
bool TrackerMedianFlowImpl::updateImpl( const Mat& image, Rect2d& boundingBox ){
Mat oldImage=((TrackerMedianFlowModel*)static_cast<TrackerModel*>(model))->getImage();
Rect2d oldBox=((TrackerMedianFlowModel*)static_cast<TrackerModel*>(model))->getBoundingBox();
if(!medianFlowImpl(oldImage,image,oldBox)){
return false;
}
boundingBox=oldBox;
((TrackerMedianFlowModel*)static_cast<TrackerModel*>(model))->setImage(image);
((TrackerMedianFlowModel*)static_cast<TrackerModel*>(model))->setBoudingBox(oldBox);
return true;
}
std::string TrackerMedianFlowImpl::type2str(int type) {
std::string r;
uchar depth = type & CV_MAT_DEPTH_MASK;
uchar chans = (uchar)(1 + (type >> CV_CN_SHIFT));
switch ( depth ) {
case CV_8U: r = "8U"; break;
case CV_8S: r = "8S"; break;
case CV_16U: r = "16U"; break;
case CV_16S: r = "16S"; break;
case CV_32S: r = "32S"; break;
case CV_32F: r = "32F"; break;
case CV_64F: r = "64F"; break;
default: r = "User"; break;
}
r += "C";
r += (chans+'0');
return r;
}
bool TrackerMedianFlowImpl::medianFlowImpl(Mat oldImage,Mat newImage,Rect2d& oldBox){
std::vector<Point2f> pointsToTrackOld,pointsToTrackNew;
Mat oldImage_gray,newImage_gray;
cvtColor( oldImage, oldImage_gray, COLOR_BGR2GRAY );
cvtColor( newImage, newImage_gray, COLOR_BGR2GRAY );
//"open ended" grid
for(int i=0;i<params.pointsInGrid;i++){
for(int j=0;j<params.pointsInGrid;j++){
pointsToTrackOld.push_back(
Point2f((float)(oldBox.x+((1.0*oldBox.width)/params.pointsInGrid)*j+.5*oldBox.width/params.pointsInGrid),
(float)(oldBox.y+((1.0*oldBox.height)/params.pointsInGrid)*i+.5*oldBox.height/params.pointsInGrid)));
}
}
std::vector<uchar> status(pointsToTrackOld.size());
std::vector<float> errors(pointsToTrackOld.size());
calcOpticalFlowPyrLK(oldImage_gray, newImage_gray,pointsToTrackOld,pointsToTrackNew,status,errors,Size(3,3),5,termcrit,0);
dprintf(("\t%d after LK forward\n",(int)pointsToTrackOld.size()));
std::vector<Point2f> di;
for(int i=0;i<(int)pointsToTrackOld.size();i++){
if(status[i]==1){
di.push_back(pointsToTrackNew[i]-pointsToTrackOld[i]);
}
}
std::vector<bool> filter_status;
check_FB(oldImage_gray,newImage_gray,pointsToTrackOld,pointsToTrackNew,filter_status);
check_NCC(oldImage_gray,newImage_gray,pointsToTrackOld,pointsToTrackNew,filter_status);
// filter
for(int i=0;i<(int)pointsToTrackOld.size();i++){
if(!filter_status[i]){
pointsToTrackOld.erase(pointsToTrackOld.begin()+i);
pointsToTrackNew.erase(pointsToTrackNew.begin()+i);
filter_status.erase(filter_status.begin()+i);
i--;
}
}
dprintf(("\t%d after LK backward\n",(int)pointsToTrackOld.size()));
if(pointsToTrackOld.size()==0 || di.size()==0){
return false;
}
Point2f mDisplacement;
oldBox=vote(pointsToTrackOld,pointsToTrackNew,oldBox,mDisplacement);
std::vector<double> displacements;
for(int i=0;i<(int)di.size();i++){
di[i]-=mDisplacement;
displacements.push_back(sqrt(di[i].ddot(di[i])));
}
if(getMedian(displacements,(int)displacements.size())>10){
return false;
}
return true;
}
Rect2d TrackerMedianFlowImpl::vote(const std::vector<Point2f>& oldPoints,const std::vector<Point2f>& newPoints,const Rect2d& oldRect,Point2f& mD){
static int iteration=0;//FIXME -- we don't want this static var in final release
Rect2d newRect;
Point2d newCenter(oldRect.x+oldRect.width/2.0,oldRect.y+oldRect.height/2.0);
int n=(int)oldPoints.size();
std::vector<double> buf(std::max(n*(n-1)/2,3),0.0);
if(oldPoints.size()==1){
newRect.x=oldRect.x+newPoints[0].x-oldPoints[0].x;
newRect.y=oldRect.y+newPoints[0].y-oldPoints[0].y;
newRect.width=oldRect.width;
newRect.height=oldRect.height;
return newRect;
}
double xshift=0,yshift=0;
for(int i=0;i<n;i++){ buf[i]=newPoints[i].x-oldPoints[i].x; }
xshift=getMedian(buf,n);
newCenter.x+=xshift;
for(int i=0;i<n;i++){ buf[i]=newPoints[i].y-oldPoints[i].y; }
yshift=getMedian(buf,n);
newCenter.y+=yshift;
mD=Point2f((float)xshift,(float)yshift);
if(oldPoints.size()==1){
newRect.x=newCenter.x-oldRect.width/2.0;
newRect.y=newCenter.y-oldRect.height/2.0;
newRect.width=oldRect.width;
newRect.height=oldRect.height;
return newRect;
}
double nd,od;
for(int i=0,ctr=0;i<n;i++){
for(int j=0;j<i;j++){
nd=l2distance(newPoints[i],newPoints[j]);
od=l2distance(oldPoints[i],oldPoints[j]);
buf[ctr]=(od==0.0)?0.0:(nd/od);
ctr++;
}
}
double scale=getMedian(buf,n*(n-1)/2);
dprintf(("iter %d %f %f %f\n",iteration,xshift,yshift,scale));
newRect.x=newCenter.x-scale*oldRect.width/2.0;
newRect.y=newCenter.y-scale*oldRect.height/2.0;
newRect.width=scale*oldRect.width;
newRect.height=scale*oldRect.height;
/*if(newRect.x<=0){
exit(0);
}*/
dprintf(("rect old [%f %f %f %f]\n",oldRect.x,oldRect.y,oldRect.width,oldRect.height));
dprintf(("rect [%f %f %f %f]\n",newRect.x,newRect.y,newRect.width,newRect.height));
iteration++;
return newRect;
}
template<typename T>
T TrackerMedianFlowImpl::getMedian(std::vector<T>& values,int size){
if(size==-1){
size=(int)values.size();
}
std::vector<T> copy(values.begin(),values.begin()+size);
std::sort(copy.begin(),copy.end());
if(size%2==0){
return (copy[size/2-1]+copy[size/2])/((T)2.0);
}else{
return copy[(size-1)/2];
}
}
void TrackerMedianFlowImpl::computeStatistics(std::vector<float>& data,int size){
int binnum=10;
if(size==-1){
size=(int)data.size();
}
float mini=*std::min_element(data.begin(),data.begin()+size),maxi=*std::max_element(data.begin(),data.begin()+size);
std::vector<int> bins(binnum,(int)0);
for(int i=0;i<size;i++){
bins[std::min((int)(binnum*(data[i]-mini)/(maxi-mini)),binnum-1)]++;
}
for(int i=0;i<binnum;i++){
dprintf(("[%4f,%4f] -- %4d\n",mini+(maxi-mini)/binnum*i,mini+(maxi-mini)/binnum*(i+1),bins[i]));
}
}
double TrackerMedianFlowImpl::l2distance(Point2f p1,Point2f p2){
double dx=p1.x-p2.x, dy=p1.y-p2.y;
return sqrt(dx*dx+dy*dy);
}
void TrackerMedianFlowImpl::check_FB(const Mat& oldImage,const Mat& newImage,
const std::vector<Point2f>& oldPoints,const std::vector<Point2f>& newPoints,std::vector<bool>& status){
if(status.size()==0){
status=std::vector<bool>(oldPoints.size(),true);
}
std::vector<uchar> LKstatus(oldPoints.size());
std::vector<float> errors(oldPoints.size());
std::vector<double> FBerror(oldPoints.size());
std::vector<Point2f> pointsToTrackReprojection;
calcOpticalFlowPyrLK(newImage, oldImage,newPoints,pointsToTrackReprojection,LKstatus,errors,Size(3,3),5,termcrit,0);
for(int i=0;i<(int)oldPoints.size();i++){
FBerror[i]=l2distance(oldPoints[i],pointsToTrackReprojection[i]);
}
double FBerrorMedian=getMedian(FBerror);
dprintf(("point median=%f\n",FBerrorMedian));
dprintf(("FBerrorMedian=%f\n",FBerrorMedian));
for(int i=0;i<(int)oldPoints.size();i++){
status[i]=(FBerror[i]<FBerrorMedian);
}
}
void TrackerMedianFlowImpl::check_NCC(const Mat& oldImage,const Mat& newImage,
const std::vector<Point2f>& oldPoints,const std::vector<Point2f>& newPoints,std::vector<bool>& status){
std::vector<float> NCC(oldPoints.size(),0.0);
Size patch(30,30);
Mat p1,p2;
for (int i = 0; i < (int)oldPoints.size(); i++) {
getRectSubPix( oldImage, patch, oldPoints[i],p1);
getRectSubPix( newImage, patch, newPoints[i],p2);
const int N=900;
double s1=sum(p1)(0),s2=sum(p2)(0);
double n1=norm(p1),n2=norm(p2);
double prod=p1.dot(p2);
double sq1=sqrt(n1*n1-s1*s1/N),sq2=sqrt(n2*n2-s2*s2/N);
double ares=(sq2==0)?sq1/abs(sq1):(prod-s1*s2/N)/sq1/sq2;
NCC[i] = (float)ares;
}
float median = getMedian(NCC);
for(int i = 0; i < (int)oldPoints.size(); i++) {
status[i] = status[i] && (NCC[i]>median);
}
}
} /* namespace cv */
/*///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "time.h"
#include <algorithm>
#include <limits.h>
#include "TLD.hpp"
#include "opencv2/highgui.hpp"
#define THETA_NN 0.5
#define CORE_THRESHOLD 0.5
#define NEG_EXAMPLES_IN_INIT_MODEL 300
#define MAX_EXAMPLES_IN_MODEL 500
#define MEASURES_PER_CLASSIFIER 13
#undef BLUR_AS_VADIM
#undef CLOSED_LOOP
static const cv::Size GaussBlurKernelSize(3,3);
using namespace cv;
using namespace tld;
/*
* FIXME(optimize):
* no median
* direct formula in resamples
* FIXME(issues)
* THETA_NN 0.5<->0.6 dramatic change vs video 6 !!
* FIXME(features)
* benchmark: save photos --> two streams of photos --> better video
* TODO:
* schoolPC: codec, libopencv-dev
* fix pushbot ->pick commits -> compare_branches->all in 1->resubmit
* ||video(0.5<->0.6) --> debug if box size is less than 20 --> (remove ensemble self-loop) --> (try inter_area for resize)
* perfect PN
*
* vadim:
*
* blurred in TrackerTLDModel()
*
* warpAffine -- ?
*/
/* design decisions:
* blur --> resize (vs. resize-->blur) in detect(), ensembleClassifier stage
* no random gauss noise, when making examples for ensembleClassifier
*/
namespace cv
{
class TLDDetector;
class MyMouseCallbackDEBUG{
public:
MyMouseCallbackDEBUG( Mat& img, Mat& imgBlurred,TLDDetector* detector):img_(img),imgBlurred_(imgBlurred),detector_(detector){}
static void onMouse( int event, int x, int y, int, void* obj){
((MyMouseCallbackDEBUG*)obj)->onMouse(event,x,y);
}
MyMouseCallbackDEBUG& operator= (const MyMouseCallbackDEBUG& /*other*/){return *this;}
private:
void onMouse( int event, int x, int y);
Mat& img_,imgBlurred_;
TLDDetector* detector_;
};
class Data {
public:
Data(Rect2d initBox);
Size getMinSize(){return minSize;}
double getScale(){return scale;}
bool confident;
bool failedLastTime;
int frameNum;
void printme(FILE* port=stdout);
private:
double scale;
Size minSize;
};
class TrackerTLDModel;
class TLDDetector {
public:
TLDDetector(const TrackerTLD::Params& params,Ptr<TrackerModel>model_in):model(model_in),params_(params){}
~TLDDetector(){}
static void generateScanGrid(int rows,int cols,Size initBox,std::vector<Rect2d>& res,bool withScaling=false);
bool detect(const Mat& img,const Mat& imgBlurred,Rect2d& res,std::vector<Rect2d>& rect,std::vector<bool>& isObject,
std::vector<bool>& shouldBeIntegrated);
protected:
friend class MyMouseCallbackDEBUG;
Ptr<TrackerModel> model;
void computeIntegralImages(const Mat& img,Mat_<double>& intImgP,Mat_<double>& intImgP2){integral(img,intImgP,intImgP2,CV_64F);}
inline bool patchVariance(Mat_<double>& intImgP,Mat_<double>& intImgP2,double originalVariance,Point pt,Size size);
bool ensembleClassifier(const uchar* data,int rowstep){return ensembleClassifierNum(data,rowstep)>0.5;}
double ensembleClassifierNum(const uchar* data,int rowstep);
TrackerTLD::Params params_;
};
class Pexpert{
public:
Pexpert(const Mat& img,const Mat& imgBlurred,Rect2d& resultBox,const TLDDetector* detector,TrackerTLD::Params params,Size initSize):
img_(img),imgBlurred_(imgBlurred),resultBox_(resultBox),detector_(detector),params_(params),initSize_(initSize){}
bool operator()(Rect2d /*box*/){return false;}
int additionalExamples(std::vector<Mat_<uchar> >& examplesForModel,std::vector<Mat_<uchar> >& examplesForEnsemble);
protected:
Mat img_,imgBlurred_;
Rect2d resultBox_;
const TLDDetector* detector_;
TrackerTLD::Params params_;
RNG rng;
Size initSize_;
};
class Nexpert{
public:
Nexpert(const Mat& img,Rect2d& resultBox,const TLDDetector* detector,TrackerTLD::Params params):img_(img),resultBox_(resultBox),
detector_(detector),params_(params){}
bool operator()(Rect2d box);
int additionalExamples(std::vector<Mat_<uchar> >& examplesForModel,std::vector<Mat_<uchar> >& examplesForEnsemble){
examplesForModel.clear();examplesForEnsemble.clear();return 0;}
protected:
Mat img_;
Rect2d resultBox_;
const TLDDetector* detector_;
TrackerTLD::Params params_;
};
template <class T,class Tparams>
class TrackerProxyImpl : public TrackerProxy{
public:
TrackerProxyImpl(Tparams params=Tparams()):params_(params){}
bool init( const Mat& image, const Rect2d& boundingBox ){
trackerPtr=T::createTracker();
return trackerPtr->init(image,boundingBox);
}
bool update( const Mat& image,Rect2d& boundingBox){
return trackerPtr->update(image,boundingBox);
}
private:
Ptr<T> trackerPtr;
Tparams params_;
Rect2d boundingBox_;
};
class TrackerTLDModel : public TrackerModel{
public:
TrackerTLDModel(TrackerTLD::Params params,const Mat& image, const Rect2d& boundingBox,Size minSize);
Rect2d getBoundingBox(){return boundingBox_;}
void setBoudingBox(Rect2d boundingBox){boundingBox_=boundingBox;}
double getOriginalVariance(){return originalVariance_;}
std::vector<TLDEnsembleClassifier>* getClassifiers(){return &classifiers;}
double Sr(const Mat_<uchar> patch);
double Sc(const Mat_<uchar> patch);
void integrateRelabeled(Mat& img,Mat& imgBlurred,const std::vector<Rect2d>& box,const std::vector<bool>& isPositive,
const std::vector<bool>& alsoIntoModel);
void integrateAdditional(const std::vector<Mat_<uchar> >& eForModel,const std::vector<Mat_<uchar> >& eForEnsemble,bool isPositive);
Size getMinSize(){return minSize_;}
void printme(FILE* port=stdout);
protected:
Size minSize_;
unsigned int timeStampPositiveNext,timeStampNegativeNext;
TrackerTLD::Params params_;
void pushIntoModel(const Mat_<uchar>& example,bool positive);
void modelEstimationImpl( const std::vector<Mat>& /*responses*/ ){}
void modelUpdateImpl(){}
Rect2d boundingBox_;
double originalVariance_;
std::vector<Mat_<uchar> > positiveExamples,negativeExamples;
std::vector<unsigned int> timeStampsPositive,timeStampsNegative;
RNG rng;
std::vector<TLDEnsembleClassifier> classifiers;
};
class TrackerTLDImpl : public TrackerTLD
{
public:
TrackerTLDImpl( const TrackerTLD::Params &parameters = TrackerTLD::Params() );
void read( const FileNode& fn );
void write( FileStorage& fs ) const;
protected:
bool initImpl( const Mat& image, const Rect2d& boundingBox );
bool updateImpl( const Mat& image, Rect2d& boundingBox );
TrackerTLD::Params params;
Ptr<Data> data;
Ptr<TrackerProxy> trackerProxy;
Ptr<TLDDetector> detector;
};
TrackerTLD::Params::Params(){
}
void TrackerTLD::Params::read( const cv::FileNode& /*fn*/ ){
}
void TrackerTLD::Params::write( cv::FileStorage& /*fs*/ ) const{
}
Ptr<TrackerTLD> TrackerTLD::createTracker(const TrackerTLD::Params &parameters){
return Ptr<TrackerTLDImpl>(new TrackerTLDImpl(parameters));
}
TrackerTLDImpl::TrackerTLDImpl( const TrackerTLD::Params &parameters) :
params( parameters ){
isInit = false;
trackerProxy=Ptr<TrackerProxyImpl<TrackerMedianFlow,TrackerMedianFlow::Params> >(
new TrackerProxyImpl<TrackerMedianFlow,TrackerMedianFlow::Params>());
}
void TrackerTLDImpl::read( const cv::FileNode& fn )
{
params.read( fn );
}
void TrackerTLDImpl::write( cv::FileStorage& fs ) const
{
params.write( fs );
}
bool TrackerTLDImpl::initImpl(const Mat& image, const Rect2d& boundingBox ){
Mat image_gray;
trackerProxy->init(image,boundingBox);
cvtColor( image, image_gray, COLOR_BGR2GRAY );
data=Ptr<Data>(new Data(boundingBox));
double scale=data->getScale();
Rect2d myBoundingBox=boundingBox;
if(scale>1.0){
Mat image_proxy;
resize(image_gray,image_proxy,Size(cvRound(image.cols*scale),cvRound(image.rows*scale)));
image_proxy.copyTo(image_gray);
myBoundingBox.x*=scale;
myBoundingBox.y*=scale;
myBoundingBox.width*=scale;
myBoundingBox.height*=scale;
}
model=Ptr<TrackerTLDModel>(new TrackerTLDModel(params,image_gray,myBoundingBox,data->getMinSize()));
detector=Ptr<TLDDetector>(new TLDDetector(params,model));
data->confident=false;
data->failedLastTime=false;
#if !1
dprintf(("here I am\n"));
Mat image_blurred;
GaussianBlur(image_gray,image_blurred,GaussBlurKernelSize,0.0);
MyMouseCallbackDEBUG* callback=new MyMouseCallbackDEBUG(image_gray,image_blurred,detector);
imshow("picker",image_gray);
setMouseCallback( "picker", MyMouseCallbackDEBUG::onMouse, (void*)callback);
waitKey();
#endif
return true;
}
bool TrackerTLDImpl::updateImpl(const Mat& image, Rect2d& boundingBox){
Mat image_gray,image_blurred,imageForDetector;
cvtColor( image, image_gray, COLOR_BGR2GRAY );
double scale=data->getScale();
if(scale>1.0){
resize(image_gray,imageForDetector,Size(cvRound(image.cols*scale),cvRound(image.rows*scale)));
}else{
imageForDetector=image_gray;
}
GaussianBlur(imageForDetector,image_blurred,GaussBlurKernelSize,0.0);
TrackerTLDModel* tldModel=((TrackerTLDModel*)static_cast<TrackerModel*>(model));
data->frameNum++;
Mat_<uchar> standardPatch(15,15);
std::vector<Rect2d> detectorResults;
std::vector<bool> isObject,shouldBeIntegrated;
//best overlap around 92%
Rect2d tmpCandid=boundingBox;
std::vector<Rect2d> candidates;
std::vector<double> candidatesRes;
bool trackerNeedsReInit=false;
for(int i=0;i<2;i++){
if(((i==0)&&!(data->failedLastTime)&&trackerProxy->update(image,tmpCandid)) ||
((i==1)&&(detector->detect(imageForDetector,image_blurred,tmpCandid,detectorResults,isObject,shouldBeIntegrated)))){
candidates.push_back(tmpCandid);
if(i==0){
resample(image_gray,tmpCandid,standardPatch);
}else{
resample(imageForDetector,tmpCandid,standardPatch);
}
candidatesRes.push_back(tldModel->Sc(standardPatch));
}else{
if(i==0){
trackerNeedsReInit=true;
}
}
}
std::vector<double>::iterator it=std::max_element(candidatesRes.begin(),candidatesRes.end());
dfprintf((stdout,"scale=%f\n",log(1.0*boundingBox.width/(data->getMinSize()).width)/log(1.2)));
for(int i=0;i<(int)candidatesRes.size();i++){
dprintf(("\tcandidatesRes[%d]=%f\n",i,candidatesRes[i]));
}
data->printme();
tldModel->printme(stdout);
#if !1
if(data->frameNum==82){
dprintf(("here I am\n"));
MyMouseCallbackDEBUG* callback=new MyMouseCallbackDEBUG(imageForDetector,image_blurred,detector);
imshow("picker",imageForDetector);
setMouseCallback( "picker", MyMouseCallbackDEBUG::onMouse, (void*)callback);
waitKey();
}
#endif
if(it==candidatesRes.end()){
data->confident=false;
data->failedLastTime=true;
return false;
}else{
boundingBox=candidates[it-candidatesRes.begin()];
data->failedLastTime=false;
if(trackerNeedsReInit || it!=candidatesRes.begin()){
trackerProxy->init(image,boundingBox);
}
}
if(!false && it!=candidatesRes.end()){
resample(imageForDetector,candidates[it-candidatesRes.begin()],standardPatch);
dfprintf((stderr,"%d %f %f\n",data->frameNum,tldModel->Sc(standardPatch),tldModel->Sr(standardPatch)));
if(candidatesRes.size()==2 && it==(candidatesRes.begin()+1))
dfprintf((stderr,"detector WON\n"));
}else{
dfprintf((stderr,"%d x x\n",data->frameNum));
}
if(*it > CORE_THRESHOLD){
data->confident=true;
}
if(data->confident){
Pexpert pExpert(imageForDetector,image_blurred,boundingBox,detector,params,data->getMinSize());
Nexpert nExpert(imageForDetector,boundingBox,detector,params);
bool expertResult;
std::vector<Mat_<uchar> > examplesForModel,examplesForEnsemble;
examplesForModel.reserve(100);examplesForEnsemble.reserve(100);
int negRelabeled=0;
for(int i=0;i<(int)detectorResults.size();i++){
if(isObject[i]){
expertResult=nExpert(detectorResults[i]);
if(expertResult!=isObject[i]){negRelabeled++;}
}else{
expertResult=pExpert(detectorResults[i]);
}
shouldBeIntegrated[i]=shouldBeIntegrated[i] || (isObject[i]!=expertResult);
isObject[i]=expertResult;
}
tldModel->integrateRelabeled(imageForDetector,image_blurred,detectorResults,isObject,shouldBeIntegrated);
dprintf(("%d relabeled by nExpert\n",negRelabeled));
pExpert.additionalExamples(examplesForModel,examplesForEnsemble);
tldModel->integrateAdditional(examplesForModel,examplesForEnsemble,true);
examplesForModel.clear();examplesForEnsemble.clear();
nExpert.additionalExamples(examplesForModel,examplesForEnsemble);
tldModel->integrateAdditional(examplesForModel,examplesForEnsemble,false);
}else{
#ifdef CLOSED_LOOP
tldModel->integrateRelabeled(imageForDetector,image_blurred,detectorResults,isObject,shouldBeIntegrated);
#endif
}
return true;
}
TrackerTLDModel::TrackerTLDModel(TrackerTLD::Params params,const Mat& image, const Rect2d& boundingBox,Size minSize):minSize_(minSize),
timeStampPositiveNext(0),timeStampNegativeNext(0),params_(params){
boundingBox_=boundingBox;
originalVariance_=variance(image(boundingBox));
std::vector<Rect2d> closest(10),scanGrid;
Mat scaledImg,blurredImg,image_blurred;
double scale=scaleAndBlur(image,cvRound(log(1.0*boundingBox.width/(minSize.width))/log(1.2)),scaledImg,blurredImg,GaussBlurKernelSize);
GaussianBlur(image,image_blurred,GaussBlurKernelSize,0.0);
TLDDetector::generateScanGrid(image.rows,image.cols,minSize,scanGrid);
getClosestN(scanGrid,Rect2d(boundingBox.x/scale,boundingBox.y/scale,boundingBox.width/scale,boundingBox.height/scale),10,closest);
Mat_<uchar> blurredPatch(minSize);
for(int i=0,howMany=TLDEnsembleClassifier::getMaxOrdinal();i<howMany;i++){
classifiers.push_back(TLDEnsembleClassifier(i+1,minSize,MEASURES_PER_CLASSIFIER));
}
positiveExamples.reserve(200);
Point2f center;
Size2f size;
for(int i=0;i<(int)closest.size();i++){
for(int j=0;j<20;j++){
Mat_<uchar> standardPatch(15,15);
center.x=(float)(closest[i].x+closest[i].width*(0.5+rng.uniform(-0.01,0.01)));
center.y=(float)(closest[i].y+closest[i].height*(0.5+rng.uniform(-0.01,0.01)));
size.width=(float)(closest[i].width*rng.uniform((double)0.99,(double)1.01));
size.height=(float)(closest[i].height*rng.uniform((double)0.99,(double)1.01));
float angle=(float)rng.uniform(-10.0,10.0);
resample(scaledImg,RotatedRect(center,size,angle),standardPatch);
for(int y=0;y<standardPatch.rows;y++){
for(int x=0;x<standardPatch.cols;x++){
standardPatch(x,y)+=(uchar)rng.gaussian(5.0);
}
}
#ifdef BLUR_AS_VADIM
GaussianBlur(standardPatch,blurredPatch,GaussBlurKernelSize,0.0);
#else
resample(blurredImg,RotatedRect(center,size,angle),blurredPatch);
#endif
pushIntoModel(standardPatch,true);
for(int k=0;k<(int)classifiers.size();k++){
classifiers[k].integrate(blurredPatch,true);
}
}
}
TLDDetector::generateScanGrid(image.rows,image.cols,minSize,scanGrid,true);
negativeExamples.clear();
negativeExamples.reserve(NEG_EXAMPLES_IN_INIT_MODEL);
std::vector<int> indices;
indices.reserve(NEG_EXAMPLES_IN_INIT_MODEL);
while(negativeExamples.size()<NEG_EXAMPLES_IN_INIT_MODEL){
int i=rng.uniform((int)0,(int)scanGrid.size());
if(std::find(indices.begin(),indices.end(),i)==indices.end() && overlap(boundingBox,scanGrid[i])<0.2){
Mat_<uchar> standardPatch(15,15);
resample(image,scanGrid[i],standardPatch);
pushIntoModel(standardPatch,false);
resample(image_blurred,scanGrid[i],blurredPatch);
for(int k=0;k<(int)classifiers.size();k++){
classifiers[k].integrate(blurredPatch,false);
}
}
}
dprintf(("positive patches: %d\nnegative patches: %d\n",(int)positiveExamples.size(),(int)negativeExamples.size()));
}
void TLDDetector::generateScanGrid(int rows,int cols,Size initBox,std::vector<Rect2d>& res,bool withScaling){
res.clear();
//scales step: 1.2; hor step: 10% of width; verstep: 10% of height; minsize: 20pix
for(double h=initBox.height, w=initBox.width;h<cols && w<rows;){
for(double x=0;(x+w)<=(cols-1.0);x+=(0.1*w)){
for(double y=0;(y+h)<=(rows-1.0);y+=(0.1*h)){
res.push_back(Rect2d(x,y,w,h));
}
}
if(withScaling){
if(h<=initBox.height){
h/=1.2; w/=1.2;
if(h<20 || w<20){
h=initBox.height*1.2; w=initBox.width*1.2;
CV_Assert(h>initBox.height || w>initBox.width);
}
}else{
h*=1.2; w*=1.2;
}
}else{
break;
}
}
dprintf(("%d rects in res\n",(int)res.size()));
}
bool TLDDetector::detect(const Mat& img,const Mat& imgBlurred,Rect2d& res,std::vector<Rect2d>& rect,std::vector<bool>& isObject,
std::vector<bool>& shouldBeIntegrated){
TrackerTLDModel* tldModel=((TrackerTLDModel*)static_cast<TrackerModel*>(model));
Size initSize=tldModel->getMinSize();
rect.clear();
isObject.clear();
shouldBeIntegrated.clear();
Mat resized_img,blurred_img;
Mat_<uchar> standardPatch(15,15);
img.copyTo(resized_img);
imgBlurred.copyTo(blurred_img);
double originalVariance=tldModel->getOriginalVariance();;
int dx=initSize.width/10,dy=initSize.height/10;
Size2d size=img.size();
double scale=1.0;
int total=0,pass=0;
int npos=0,nneg=0;
double tmp=0,maxSc=-5.0;
Rect2d maxScRect;
START_TICK("detector");
do{
Mat_<double> intImgP,intImgP2;
computeIntegralImages(resized_img,intImgP,intImgP2);
for(int i=0,imax=cvFloor((0.0+resized_img.cols-initSize.width)/dx);i<imax;i++){
for(int j=0,jmax=cvFloor((0.0+resized_img.rows-initSize.height)/dy);j<jmax;j++){
total++;
if(!patchVariance(intImgP,intImgP2,originalVariance,Point(dx*i,dy*j),initSize)){
continue;
}
if(!ensembleClassifier(&blurred_img.at<uchar>(dy*j,dx*i),(int)blurred_img.step[0])){
continue;
}
pass++;
rect.push_back(Rect2d(dx*i*scale,dy*j*scale,initSize.width*scale,initSize.height*scale));
resample(resized_img,Rect2d(Point(dx*i,dy*j),initSize),standardPatch);
tmp=tldModel->Sr(standardPatch);
isObject.push_back(tmp>THETA_NN);
shouldBeIntegrated.push_back(abs(tmp-THETA_NN)<0.1);
if(!isObject[isObject.size()-1]){
nneg++;
continue;
}else{
npos++;
}
tmp=tldModel->Sc(standardPatch);
if(tmp>maxSc){
maxSc=tmp;
maxScRect=rect[rect.size()-1];
}
}
}
size.width/=1.2;
size.height/=1.2;
scale*=1.2;
resize(img,resized_img,size);
GaussianBlur(resized_img,blurred_img,GaussBlurKernelSize,0.0f);
}while(size.width>=initSize.width && size.height>=initSize.height);
END_TICK("detector");
dfprintf((stdout,"after NCC: nneg=%d npos=%d\n",nneg,npos));
#if !0
std::vector<Rect2d> poss,negs;
for(int i=0;i<(int)rect.size();i++){
if(isObject[i])
poss.push_back(rect[i]);
else
negs.push_back(rect[i]);
}
dfprintf((stdout,"%d pos and %d neg\n",(int)poss.size(),(int)negs.size()));
drawWithRects(img,negs,poss);
#endif
#if !1
std::vector<Rect2d> scanGrid;
generateScanGrid(img.rows,img.cols,initSize,scanGrid);
std::vector<double> results;
Mat_<uchar> standardPatch_inner(15,15);
for(int i=0;i<(int)scanGrid.size();i++){
resample(img,scanGrid[i],standardPatch_inner);
results.push_back(tldModel->Sr(standardPatch_inner));
}
std::vector<double>::iterator it=std::max_element(results.begin(),results.end());
Mat image;
img.copyTo(image);
rectangle( image,scanGrid[it-results.begin()], 255, 1, 1 );
imshow("img",image);
waitKey();
#endif
#if !1
Mat image;
img.copyTo(image);
rectangle( image,res, 255, 1, 1 );
for(int i=0;i<(int)rect.size();i++){
rectangle( image,rect[i], 0, 1, 1 );
}
imshow("img",image);
waitKey();
#endif
dfprintf((stdout,"%d after ensemble\n",pass));
if(maxSc<0){
return false;
}
res=maxScRect;
return true;
}
bool TLDDetector::patchVariance(Mat_<double>& intImgP,Mat_<double>& intImgP2,double originalVariance,Point pt,Size size){
return variance(intImgP,intImgP2,Rect(pt.x,pt.y,size.width,size.height)) >= 0.5*originalVariance;
}
double TLDDetector::ensembleClassifierNum(const uchar* data,int rowstep){
TrackerTLDModel* tldModel=((TrackerTLDModel*)static_cast<TrackerModel*>(model));
std::vector<TLDEnsembleClassifier>* classifiers=tldModel->getClassifiers();
double p=0;
for(int k=0;k<(int)classifiers->size();k++){
p+=(*classifiers)[k].posteriorProbability(data,rowstep);
}
p/=classifiers->size();
return p;
}
double TrackerTLDModel::Sr(const Mat_<uchar> patch){
double splus=0.0;
for(int i=0;i<(int)positiveExamples.size();i++){
splus=MAX(splus,0.5*(NCC(positiveExamples[i],patch)+1.0));
}
double sminus=0.0;
for(int i=0;i<(int)negativeExamples.size();i++){
sminus=MAX(sminus,0.5*(NCC(negativeExamples[i],patch)+1.0));
}
if(splus+sminus==0.0){
return 0.0;
}
return splus/(sminus+splus);
}
double TrackerTLDModel::Sc(const Mat_<uchar> patch){
double splus=0.0;
int med=getMedian(timeStampsPositive);
for(int i=0;i<(int)positiveExamples.size();i++){
if((int)timeStampsPositive[i]<=med){
splus=MAX(splus,0.5*(NCC(positiveExamples[i],patch)+1.0));
}
}
double sminus=0.0;
for(int i=0;i<(int)negativeExamples.size();i++){
sminus=MAX(sminus,0.5*(NCC(negativeExamples[i],patch)+1.0));
}
if(splus+sminus==0.0){
return 0.0;
}
return splus/(sminus+splus);
}
void TrackerTLDModel::integrateRelabeled(Mat& img,Mat& imgBlurred,const std::vector<Rect2d>& box,const std::vector<bool>& isPositive,
const std::vector<bool>& alsoIntoModel){
Mat_<uchar> standardPatch(15,15),blurredPatch(minSize_);
int positiveIntoModel=0,negativeIntoModel=0,positiveIntoEnsemble=0,negativeIntoEnsemble=0;
for(int k=0;k<(int)box.size();k++){
if(alsoIntoModel[k]){
resample(img,box[k],standardPatch);
if(isPositive[k]){
positiveIntoModel++;
pushIntoModel(standardPatch,true);
}else{
negativeIntoModel++;
pushIntoModel(standardPatch,false);
}
}
#ifdef CLOSED_LOOP
if(alsoIntoModel[k] || (isPositive[k]==false)){
#else
if(alsoIntoModel[k]){
#endif
resample(imgBlurred,box[k],blurredPatch);
if(isPositive[k]){
positiveIntoEnsemble++;
}else{
negativeIntoEnsemble++;
}
for(int i=0;i<(int)classifiers.size();i++){
classifiers[i].integrate(blurredPatch,isPositive[k]);
}
}
}
if(negativeIntoModel>0)
dfprintf((stdout,"negativeIntoModel=%d ",negativeIntoModel));
if(positiveIntoModel>0)
dfprintf((stdout,"positiveIntoModel=%d ",positiveIntoModel));
if(negativeIntoEnsemble>0)
dfprintf((stdout,"negativeIntoEnsemble=%d ",negativeIntoEnsemble));
if(positiveIntoEnsemble>0)
dfprintf((stdout,"positiveIntoEnsemble=%d ",positiveIntoEnsemble));
dfprintf((stdout,"\n"));
}
void TrackerTLDModel::integrateAdditional(const std::vector<Mat_<uchar> >& eForModel,const std::vector<Mat_<uchar> >& eForEnsemble,bool isPositive){
int positiveIntoModel=0,negativeIntoModel=0,positiveIntoEnsemble=0,negativeIntoEnsemble=0;
for(int k=0;k<(int)eForModel.size();k++){
double sr=Sr(eForModel[k]);
if((sr>THETA_NN)!=isPositive){
if(isPositive){
positiveIntoModel++;
pushIntoModel(eForModel[k],true);
}else{
negativeIntoModel++;
pushIntoModel(eForModel[k],false);
}
}
double p=0;
for(int i=0;i<(int)classifiers.size();i++){
p+=classifiers[i].posteriorProbability(eForEnsemble[k].data,(int)eForEnsemble[k].step[0]);
}
p/=classifiers.size();
if((p>0.5)!=isPositive){
if(isPositive){
positiveIntoEnsemble++;
}else{
negativeIntoEnsemble++;
}
for(int i=0;i<(int)classifiers.size();i++){
classifiers[i].integrate(eForEnsemble[k],isPositive);
}
}
}
if(negativeIntoModel>0)
dfprintf((stdout,"negativeIntoModel=%d ",negativeIntoModel));
if(positiveIntoModel>0)
dfprintf((stdout,"positiveIntoModel=%d ",positiveIntoModel));
if(negativeIntoEnsemble>0)
dfprintf((stdout,"negativeIntoEnsemble=%d ",negativeIntoEnsemble));
if(positiveIntoEnsemble>0)
dfprintf((stdout,"positiveIntoEnsemble=%d ",positiveIntoEnsemble));
dfprintf((stdout,"\n"));
}
int Pexpert::additionalExamples(std::vector<Mat_<uchar> >& examplesForModel,std::vector<Mat_<uchar> >& examplesForEnsemble){
examplesForModel.clear();examplesForEnsemble.clear();
examplesForModel.reserve(100);examplesForEnsemble.reserve(100);
std::vector<Rect2d> closest(10),scanGrid;
Mat scaledImg,blurredImg;
double scale=scaleAndBlur(img_,cvRound(log(1.0*resultBox_.width/(initSize_.width))/log(1.2)),scaledImg,blurredImg,GaussBlurKernelSize);
TLDDetector::generateScanGrid(img_.rows,img_.cols,initSize_,scanGrid);
getClosestN(scanGrid,Rect2d(resultBox_.x/scale,resultBox_.y/scale,resultBox_.width/scale,resultBox_.height/scale),10,closest);
Point2f center;
Size2f size;
for(int i=0;i<(int)closest.size();i++){
for(int j=0;j<10;j++){
Mat_<uchar> standardPatch(15,15),blurredPatch(initSize_);
center.x=(float)(closest[i].x+closest[i].width*(0.5+rng.uniform(-0.01,0.01)));
center.y=(float)(closest[i].y+closest[i].height*(0.5+rng.uniform(-0.01,0.01)));
size.width=(float)(closest[i].width*rng.uniform((double)0.99,(double)1.01));
size.height=(float)(closest[i].height*rng.uniform((double)0.99,(double)1.01));
float angle=(float)rng.uniform(-5.0,5.0);
#ifdef BLUR_AS_VADIM
GaussianBlur(standardPatch,blurredPatch,GaussBlurKernelSize,0.0);
#else
resample(blurredImg,RotatedRect(center,size,angle),blurredPatch);
#endif
resample(scaledImg,RotatedRect(center,size,angle),standardPatch);
for(int y=0;y<standardPatch.rows;y++){
for(int x=0;x<standardPatch.cols;x++){
standardPatch(x,y)+=(uchar)rng.gaussian(5.0);
}
}
examplesForModel.push_back(standardPatch);
examplesForEnsemble.push_back(blurredPatch);
}
}
return 0;
}
bool Nexpert::operator()(Rect2d box){
if(overlap(resultBox_,box)<0.2){
return false;
}
return true;
}
Data::Data(Rect2d initBox){
double minDim=MIN(initBox.width,initBox.height);
scale = 20.0/minDim;
minSize.width=(int)(initBox.width*20.0/minDim);
minSize.height=(int)(initBox.height*20.0/minDim);
frameNum=0;
dprintf(("minSize= %dx%d\n",minSize.width,minSize.height));
}
void Data::printme(FILE* port){
dfprintf((port,"Data:\n"));
dfprintf((port,"\tframeNum=%d\n",frameNum));
dfprintf((port,"\tconfident=%s\n",confident?"true":"false"));
dfprintf((port,"\tfailedLastTime=%s\n",failedLastTime?"true":"false"));
dfprintf((port,"\tminSize=%dx%d\n",minSize.width,minSize.height));
}
void TrackerTLDModel::printme(FILE* port){
dfprintf((port,"TrackerTLDModel:\n"));
dfprintf((port,"\tpositiveExamples.size()=%d\n",(int)positiveExamples.size()));
dfprintf((port,"\tnegativeExamples.size()=%d\n",(int)negativeExamples.size()));
}
void MyMouseCallbackDEBUG::onMouse( int event, int x, int y){
if(event== EVENT_LBUTTONDOWN){
Mat imgCanvas;
img_.copyTo(imgCanvas);
TrackerTLDModel* tldModel=((TrackerTLDModel*)static_cast<TrackerModel*>(detector_->model));
Size initSize=tldModel->getMinSize();
Mat_<uchar> standardPatch(15,15);
double originalVariance=tldModel->getOriginalVariance();;
double tmp;
Mat resized_img,blurred_img;
double scale=1.2;
//double scale=1.2*1.2*1.2*1.2;
Size2d size(img_.cols/scale,img_.rows/scale);
resize(img_,resized_img,size);
resize(imgBlurred_,blurred_img,size);
Mat_<double> intImgP,intImgP2;
detector_->computeIntegralImages(resized_img,intImgP,intImgP2);
int dx=initSize.width/10, dy=initSize.height/10,
i=(int)(x/scale/dx), j=(int)(y/scale/dy);
dfprintf((stderr,"patchVariance=%s\n",(detector_->patchVariance(intImgP,intImgP2,originalVariance,Point(dx*i,dy*j),initSize))?"true":"false"));
dfprintf((stderr,"p=%f\n",(detector_->ensembleClassifierNum(&blurred_img.at<uchar>(dy*j,dx*i),(int)blurred_img.step[0]))));
fprintf(stderr,"ensembleClassifier=%s\n",
(detector_->ensembleClassifier(&blurred_img.at<uchar>(dy*j,dx*i),(int)blurred_img.step[0]))?"true":"false");
resample(resized_img,Rect2d(Point(dx*i,dy*j),initSize),standardPatch);
tmp=tldModel->Sr(standardPatch);
dfprintf((stderr,"Sr=%f\n",tmp));
dfprintf((stderr,"isObject=%s\n",(tmp>THETA_NN)?"true":"false"));
dfprintf((stderr,"shouldBeIntegrated=%s\n",(abs(tmp-THETA_NN)<0.1)?"true":"false"));
dfprintf((stderr,"Sc=%f\n",tldModel->Sc(standardPatch)));
rectangle(imgCanvas,Rect2d(Point2d(scale*dx*i,scale*dy*j),Size2d(initSize.width*scale,initSize.height*scale)), 0, 2, 1 );
imshow("picker",imgCanvas);
waitKey();
}
}
void TrackerTLDModel::pushIntoModel(const Mat_<uchar>& example,bool positive){
std::vector<Mat_<uchar> >* proxyV;
unsigned int* proxyN;
std::vector<unsigned int>* proxyT;
if(positive){
proxyV=&positiveExamples;
proxyN=&timeStampPositiveNext;
proxyT=&timeStampsPositive;
}else{
proxyV=&negativeExamples;
proxyN=&timeStampNegativeNext;
proxyT=&timeStampsNegative;
}
if(proxyV->size()<MAX_EXAMPLES_IN_MODEL){
proxyV->push_back(example);
proxyT->push_back(*proxyN);
}else{
int index=rng.uniform((int)0,(int)proxyV->size());
(*proxyV)[index]=example;
(*proxyT)[index]=(*proxyN);
}
(*proxyN)++;
}
} /* namespace cv */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment