Commit 26f16e64 authored by Vladimir's avatar Vladimir

Merge pull request #1 from Auron-X/TLD_fixes_&_optimizations

Tld fixes & optimizations
parents 844c30e8 1c70b5a0
...@@ -306,6 +306,7 @@ The first argument is the name of the tracker and the second is a video source. ...@@ -306,6 +306,7 @@ The first argument is the name of the tracker and the second is a video source.
*/ */
#include "opencv2/tracking/tracker.hpp" #include <opencv2/tracking/tracker.hpp>
#include <opencv2/tracking/tldDataset.hpp>
#endif //__OPENCV_TRACKING_LENLEN #endif //__OPENCV_TRACKING_LENLEN
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_TLD_DATASET
#define OPENCV_TLD_DATASET
#include "opencv2/highgui.hpp"
namespace cv
{
namespace tld
{
CV_EXPORTS cv::Rect2d tld_InitDataset(int datasetInd, const char* rootPath = "TLD_dataset");
CV_EXPORTS cv::Mat tld_getNextDatasetFrame();
}
}
#endif
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
#define NUM_TEST_FRAMES 500
#define TEST_VIDEO_INDEX 1 //TLD Dataset Video Index from 1-10
//#define RECORD_VIDEO_FLG
static Mat image;
static Rect2d boundingBox;
static bool paused;
static bool selectObject = false;
static bool startSelection = false;
static void onMouse(int event, int x, int y, int, void*)
{
if (!selectObject)
{
switch (event)
{
case EVENT_LBUTTONDOWN:
//set origin of the bounding box
startSelection = true;
boundingBox.x = x;
boundingBox.y = y;
boundingBox.width = boundingBox.height = 0;
break;
case EVENT_LBUTTONUP:
//sei with and height of the bounding box
boundingBox.width = std::abs(x - boundingBox.x);
boundingBox.height = std::abs(y - boundingBox.y);
paused = false;
selectObject = true;
break;
case EVENT_MOUSEMOVE:
if (startSelection && !selectObject)
{
//draw the bounding box
Mat currentFrame;
image.copyTo(currentFrame);
rectangle(currentFrame, Point((int)boundingBox.x, (int)boundingBox.y), Point(x, y), Scalar(255, 0, 0), 2, 1);
imshow("Tracking API", currentFrame);
}
break;
}
}
}
int main()
{
//
// "MIL", "BOOSTING", "MEDIANFLOW", "TLD"
//
string tracker_algorithm_name = "TLD";
Mat frame;
paused = false;
namedWindow("Tracking API", 0);
setMouseCallback("Tracking API", onMouse, 0);
Ptr<Tracker> tracker = Tracker::create(tracker_algorithm_name);
if (tracker == NULL)
{
cout << "***Error in the instantiation of the tracker...***\n";
getchar();
return 0;
}
//Get the first frame
////Open the capture
// VideoCapture cap(0);
// if( !cap.isOpened() )
// {
// cout << "Video stream error";
// return;
// }
//cap >> frame;
//From TLD dataset
selectObject = true;
boundingBox = tld::tld_InitDataset(TEST_VIDEO_INDEX, "D:/opencv/TLD_dataset");
frame = tld::tld_getNextDatasetFrame();
frame.copyTo(image);
// Setup output video
#ifdef RECORD_VIDEO_FLG
String outputFilename = "test.avi";
VideoWriter outputVideo;
outputVideo.open(outputFilename, -1, 30, Size(image.cols, image.rows));
if (!outputVideo.isOpened())
{
std::cout << "!!! Output video could not be opened" << std::endl;
getchar();
return;
}
#endif
rectangle(image, boundingBox, Scalar(255, 0, 0), 2, 1);
imshow("Tracking API", image);
bool initialized = false;
int frameCounter = 0;
//Time measurment
int64 e3 = getTickCount();
for (;;)
{
//Time measurment
int64 e1 = getTickCount();
//Frame num
frameCounter++;
if (frameCounter == NUM_TEST_FRAMES) break;
char c = (char)waitKey(2);
if (c == 'q' || c == 27)
break;
if (c == 'p')
paused = !paused;
if (!paused)
{
//cap >> frame;
frame = tld::tld_getNextDatasetFrame();
if (frame.empty())
{
break;
}
frame.copyTo(image);
if (selectObject)
{
if (!initialized)
{
//initializes the tracker
if (!tracker->init(frame, boundingBox))
{
cout << "***Could not initialize tracker...***\n";
return 0;
}
initialized = true;
rectangle(image, boundingBox, Scalar(255, 0, 0), 2, 1);
}
else
{
//updates the tracker
if (tracker->update(frame, boundingBox))
{
rectangle(image, boundingBox, Scalar(255, 0, 0), 2, 1);
}
}
}
imshow("Tracking API", image);
#ifdef RECORD_VIDEO_FLG
outputVideo << image;
#endif
//Time measurment
int64 e2 = getTickCount();
double t1 = (e2 - e1) / getTickFrequency();
cout << frameCounter << "\tframe : " << t1 * 1000.0 << "ms" << endl;
//waitKey(0);
}
}
//Time measurment
int64 e4 = getTickCount();
double t2 = (e4 - e3) / getTickFrequency();
cout << "Average Time for Frame: " << t2 * 1000.0 / frameCounter << "ms" << endl;
cout << "Average FPS: " << 1.0 / t2*frameCounter << endl;
waitKey(0);
return 0;
}
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/tracking/tldDataset.hpp"
namespace cv
{
namespace tld
{
char tldRootPath[100];
int frameNum = 0;
bool flagPNG = false;
cv::Rect2d tld_InitDataset(int datasetInd,const char* rootPath)
{
char* folderName = (char *)"";
int x, y, w, h;
flagPNG = false;
frameNum = 1;
if (datasetInd == 1) {
folderName = (char *)"01_david";
x = 165, y = 83;
w = 51; h = 54;
frameNum = 100;
}
if (datasetInd == 2) {
folderName = (char *)"02_jumping";
x = 147, y = 110;
w = 33; h = 32;
}
if (datasetInd == 3) {
folderName = (char *)"03_pedestrian1";
x = 47, y = 51;
w = 21; h = 36;
}
if (datasetInd == 4) {
folderName = (char *)"04_pedestrian2";
x = 130, y = 134;
w = 21; h = 53;
}
if (datasetInd == 5) {
folderName = (char *)"05_pedestrian3";
x = 154, y = 102;
w = 24; h = 52;
}
if (datasetInd == 6) {
folderName = (char *)"06_car";
x = 142, y = 125;
w = 90; h = 39;
}
if (datasetInd == 7) {
folderName = (char *)"07_motocross";
x = 290, y = 43;
w = 23; h = 40;
flagPNG = true;
}
if (datasetInd == 8) {
folderName = (char *)"08_volkswagen";
x = 273, y = 77;
w = 27; h = 25;
}
if (datasetInd == 9) {
folderName = (char *)"09_carchase";
x = 145, y = 84;
w = 54; h = 37;
}
if (datasetInd == 10){
folderName = (char *)"10_panda";
x = 58, y = 100;
w = 27; h = 22;
}
strcpy(tldRootPath, rootPath);
strcat(tldRootPath, "\\");
strcat(tldRootPath, folderName);
return cv::Rect2d(x, y, w, h);
}
cv::Mat tld_getNextDatasetFrame()
{
char fullPath[100];
char numStr[10];
strcpy(fullPath, tldRootPath);
strcat(fullPath, "\\");
if (frameNum < 10) strcat(fullPath, "0000");
else if (frameNum < 100) strcat(fullPath, "000");
else if (frameNum < 1000) strcat(fullPath, "00");
else if (frameNum < 10000) strcat(fullPath, "0");
sprintf(numStr, "%d", frameNum);
strcat(fullPath, numStr);
if (flagPNG) strcat(fullPath, ".png");
else strcat(fullPath, ".jpg");
frameNum++;
return cv::imread(fullPath);
}
}
}
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "tldDetector.hpp"
namespace cv
{
namespace tld
{
// Calculate offsets for classifiers
void TLDDetector::prepareClassifiers(int rowstep)
{
for (int i = 0; i < (int)classifiers.size(); i++)
classifiers[i].prepareClassifier(rowstep);
}
// Calculate posterior probability, that the patch belongs to the current EC model
double TLDDetector::ensembleClassifierNum(const uchar* data)
{
double p = 0;
for (int k = 0; k < (int)classifiers.size(); k++)
p += classifiers[k].posteriorProbabilityFast(data);
p /= classifiers.size();
return p;
}
// Calculate Relative similarity of the patch (NN-Model)
double TLDDetector::Sr(const Mat_<uchar>& patch)
{
double splus = 0.0, sminus = 0.0;
for (int i = 0; i < (int)(*positiveExamples).size(); i++)
splus = std::max(splus, 0.5 * (NCC((*positiveExamples)[i], patch) + 1.0));
for (int i = 0; i < (int)(*negativeExamples).size(); i++)
sminus = std::max(sminus, 0.5 * (NCC((*negativeExamples)[i], patch) + 1.0));
if (splus + sminus == 0.0)
return 0.0;
return splus / (sminus + splus);
}
// Calculate Conservative similarity of the patch (NN-Model)
double TLDDetector::Sc(const Mat_<uchar>& patch)
{
double splus = 0.0, sminus = 0.0;
int med = getMedian((*timeStampsPositive));
for (int i = 0; i < (int)(*positiveExamples).size(); i++)
{
if ((int)(*timeStampsPositive)[i] <= med)
splus = std::max(splus, 0.5 * (NCC((*positiveExamples)[i], patch) + 1.0));
}
for (int i = 0; i < (int)(*negativeExamples).size(); i++)
sminus = std::max(sminus, 0.5 * (NCC((*negativeExamples)[i], patch) + 1.0));
if (splus + sminus == 0.0)
return 0.0;
return splus / (sminus + splus);
}
// Generate Search Windows for detector from aspect ratio of initial BBs
void TLDDetector::generateScanGrid(int rows, int cols, Size initBox, std::vector<Rect2d>& res, bool withScaling)
{
res.clear();
//Scales step: SCALE_STEP; Translation steps: 10% of width & 10% of height; minSize: 20pix
for (double h = initBox.height, w = initBox.width; h < cols && w < rows;)
{
for (double x = 0; (x + w + 1.0) <= cols; x += (0.1 * w))
{
for (double y = 0; (y + h + 1.0) <= rows; y += (0.1 * h))
res.push_back(Rect2d(x, y, w, h));
}
if (withScaling)
{
if (h <= initBox.height)
{
h /= SCALE_STEP; w /= SCALE_STEP;
if (h < 20 || w < 20)
{
h = initBox.height * SCALE_STEP; w = initBox.width * SCALE_STEP;
CV_Assert(h > initBox.height || w > initBox.width);
}
}
else
{
h *= SCALE_STEP; w *= SCALE_STEP;
}
}
else
{
break;
}
}
//dprintf(("%d rects in res\n", (int)res.size()));
}
//Detection - returns most probable new target location (Max Sc)
bool TLDDetector::detect(const Mat& img, const Mat& imgBlurred, Rect2d& res, std::vector<LabeledPatch>& patches, Size initSize)
{
patches.clear();
Mat resized_img, blurred_img;
Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE, STANDARD_PATCH_SIZE);
img.copyTo(resized_img);
imgBlurred.copyTo(blurred_img);
int dx = initSize.width / 10, dy = initSize.height / 10;
Size2d size = img.size();
double scale = 1.0;
int total = 0, pass = 0;
int npos = 0, nneg = 0;
double tmp = 0, maxSc = -5.0;
Rect2d maxScRect;
//Detection part
//To fix: use precalculated BB
do
{
Mat_<double> intImgP, intImgP2;
computeIntegralImages(resized_img, intImgP, intImgP2);
prepareClassifiers((int)blurred_img.step[0]);
for (int i = 0, imax = cvFloor((0.0 + resized_img.cols - initSize.width) / dx); i < imax; i++)
{
for (int j = 0, jmax = cvFloor((0.0 + resized_img.rows - initSize.height) / dy); j < jmax; j++)
{
LabeledPatch labPatch;
total++;
if (!patchVariance(intImgP, intImgP2, originalVariancePtr, Point(dx * i, dy * j), initSize))
continue;
if (ensembleClassifierNum(&blurred_img.at<uchar>(dy * j, dx * i)) <= ENSEMBLE_THRESHOLD)
continue;
pass++;
labPatch.rect = Rect2d(dx * i * scale, dy * j * scale, initSize.width * scale, initSize.height * scale);
resample(resized_img, Rect2d(Point(dx * i, dy * j), initSize), standardPatch);
tmp = Sr(standardPatch);
////To fix: Check the paper, probably this cause wrong learning
//
labPatch.isObject = tmp > THETA_NN;
labPatch.shouldBeIntegrated = abs(tmp - THETA_NN) < 0.1;
patches.push_back(labPatch);
//
if (!labPatch.isObject)
{
nneg++;
continue;
}
else
{
npos++;
}
tmp = Sc(standardPatch);
if (tmp > maxSc)
{
maxSc = tmp;
maxScRect = labPatch.rect;
}
}
}
size.width /= SCALE_STEP;
size.height /= SCALE_STEP;
scale *= SCALE_STEP;
resize(img, resized_img, size, 0, 0, DOWNSCALE_MODE);
GaussianBlur(resized_img, blurred_img, GaussBlurKernelSize, 0.0f);
} while (size.width >= initSize.width && size.height >= initSize.height);
if (maxSc < 0)
return false;
res = maxScRect;
return true;
}
// Computes the variance of subimage given by box, with the help of two integral
// images intImgP and intImgP2 (sum of squares), which should be also provided.
bool TLDDetector::patchVariance(Mat_<double>& intImgP, Mat_<double>& intImgP2, double *originalVariance, Point pt, Size size)
{
int x = (pt.x), y = (pt.y), width = (size.width), height = (size.height);
CV_Assert(0 <= x && (x + width) < intImgP.cols && (x + width) < intImgP2.cols);
CV_Assert(0 <= y && (y + height) < intImgP.rows && (y + height) < intImgP2.rows);
double p = 0, p2 = 0;
double A, B, C, D;
A = intImgP(y, x);
B = intImgP(y, x + width);
C = intImgP(y + height, x);
D = intImgP(y + height, x + width);
p = (A + D - B - C) / (width * height);
A = intImgP2(y, x);
B = intImgP2(y, x + width);
C = intImgP2(y + height, x);
D = intImgP2(y + height, x + width);
p2 = (A + D - B - C) / (width * height);
return ((p2 - p * p) > VARIANCE_THRESHOLD * *originalVariance);
}
}
}
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_TLD_DETECTOR
#define OPENCV_TLD_DETECTOR
#include "precomp.hpp"
#include "tldEnsembleClassifier.hpp"
#include "tldUtils.hpp"
namespace cv
{
namespace tld
{
const int STANDARD_PATCH_SIZE = 15;
const int NEG_EXAMPLES_IN_INIT_MODEL = 300;
const int MAX_EXAMPLES_IN_MODEL = 500;
const int MEASURES_PER_CLASSIFIER = 13;
const int GRIDSIZE = 15;
const int DOWNSCALE_MODE = cv::INTER_LINEAR;
const double THETA_NN = 0.50;
const double CORE_THRESHOLD = 0.5;
const double SCALE_STEP = 1.2;
const double ENSEMBLE_THRESHOLD = 0.5;
const double VARIANCE_THRESHOLD = 0.5;
const double NEXPERT_THRESHOLD = 0.2;
static const cv::Size GaussBlurKernelSize(3, 3);
class TLDDetector
{
public:
TLDDetector(){}
~TLDDetector(){}
inline double ensembleClassifierNum(const uchar* data);
inline void prepareClassifiers(int rowstep);
double Sr(const Mat_<uchar>& patch);
double Sc(const Mat_<uchar>& patch);
std::vector<TLDEnsembleClassifier> classifiers;
std::vector<Mat_<uchar> > *positiveExamples, *negativeExamples;
std::vector<int> *timeStampsPositive, *timeStampsNegative;
double *originalVariancePtr;
static void generateScanGrid(int rows, int cols, Size initBox, std::vector<Rect2d>& res, bool withScaling = false);
struct LabeledPatch
{
Rect2d rect;
bool isObject, shouldBeIntegrated;
};
bool detect(const Mat& img, const Mat& imgBlurred, Rect2d& res, std::vector<LabeledPatch>& patches, Size initSize);
protected:
friend class MyMouseCallbackDEBUG;
void computeIntegralImages(const Mat& img, Mat_<double>& intImgP, Mat_<double>& intImgP2){ integral(img, intImgP, intImgP2, CV_64F); }
inline bool patchVariance(Mat_<double>& intImgP, Mat_<double>& intImgP2, double *originalVariance, Point pt, Size size);
};
}
}
#endif
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "tldEnsembleClassifier.hpp"
namespace cv
{
namespace tld
{
// Constructor
TLDEnsembleClassifier::TLDEnsembleClassifier(const std::vector<Vec4b>& meas, int beg, int end) :lastStep_(-1)
{
int posSize = 1, mpc = end - beg;
for (int i = 0; i < mpc; i++)
posSize *= 2;
posAndNeg.assign(posSize, Point2i(0, 0));
measurements.assign(meas.begin() + beg, meas.begin() + end);
offset.assign(mpc, Point2i(0, 0));
}
// Calculate measure locations from 15x15 grid on minSize patches
void TLDEnsembleClassifier::stepPrefSuff(std::vector<Vec4b>& arr, int pos, int len, int gridSize)
{
#if 0
int step = len / (gridSize - 1), pref = (len - step * (gridSize - 1)) / 2;
for (int i = 0; i < (int)(sizeof(x1) / sizeof(x1[0])); i++)
arr[i] = pref + arr[i] * step;
#else
int total = len - gridSize;
int quo = total / (gridSize - 1), rem = total % (gridSize - 1);
int smallStep = quo, bigStep = quo + 1;
int bigOnes = rem, smallOnes = gridSize - bigOnes - 1;
int bigOnes_front = bigOnes / 2, bigOnes_back = bigOnes - bigOnes_front;
for (int i = 0; i < (int)arr.size(); i++)
{
if (arr[i].val[pos] < bigOnes_back)
{
arr[i].val[pos] = (uchar)(arr[i].val[pos] * bigStep + arr[i].val[pos]);
continue;
}
if (arr[i].val[pos] < (bigOnes_front + smallOnes))
{
arr[i].val[pos] = (uchar)(bigOnes_front * bigStep + (arr[i].val[pos] - bigOnes_front) * smallStep + arr[i].val[pos]);
continue;
}
if (arr[i].val[pos] < (bigOnes_front + smallOnes + bigOnes_back))
{
arr[i].val[pos] =
(uchar)(bigOnes_front * bigStep + smallOnes * smallStep +
(arr[i].val[pos] - (bigOnes_front + smallOnes)) * bigStep + arr[i].val[pos]);
continue;
}
arr[i].val[pos] = (uchar)(len - 1);
}
#endif
}
// Calculate offsets for classifier
void TLDEnsembleClassifier::prepareClassifier(int rowstep)
{
if (lastStep_ != rowstep)
{
lastStep_ = rowstep;
for (int i = 0; i < (int)offset.size(); i++)
{
offset[i].x = rowstep * measurements[i].val[2] + measurements[i].val[0];
offset[i].y = rowstep * measurements[i].val[3] + measurements[i].val[1];
}
}
}
// Integrate patch into the Ensemble Classifier model
void TLDEnsembleClassifier::integrate(const Mat_<uchar>& patch, bool isPositive)
{
int position = code(patch.data, (int)patch.step[0]);
if (isPositive)
posAndNeg[position].x++;
else
posAndNeg[position].y++;
}
// Calculate posterior probability on the patch
double TLDEnsembleClassifier::posteriorProbability(const uchar* data, int rowstep) const
{
int position = code(data, rowstep);
double posNum = (double)posAndNeg[position].x, negNum = (double)posAndNeg[position].y;
if (posNum == 0.0 && negNum == 0.0)
return 0.0;
else
return posNum / (posNum + negNum);
}
double TLDEnsembleClassifier::posteriorProbabilityFast(const uchar* data) const
{
int position = codeFast(data);
double posNum = (double)posAndNeg[position].x, negNum = (double)posAndNeg[position].y;
if (posNum == 0.0 && negNum == 0.0)
return 0.0;
else
return posNum / (posNum + negNum);
}
// Calculate the 13-bit fern index
int TLDEnsembleClassifier::codeFast(const uchar* data) const
{
int position = 0;
for (int i = 0; i < (int)measurements.size(); i++)
{
position = position << 1;
if (data[offset[i].x] < data[offset[i].y])
position++;
}
return position;
}
int TLDEnsembleClassifier::code(const uchar* data, int rowstep) const
{
int position = 0;
for (int i = 0; i < (int)measurements.size(); i++)
{
position = position << 1;
if (*(data + rowstep * measurements[i].val[2] + measurements[i].val[0]) <
*(data + rowstep * measurements[i].val[3] + measurements[i].val[1]))
{
position++;
}
}
return position;
}
// Create fern classifiers
int TLDEnsembleClassifier::makeClassifiers(Size size, int measurePerClassifier, int gridSize,
std::vector<TLDEnsembleClassifier>& classifiers)
{
std::vector<Vec4b> measurements;
//Generate random measures for 10 ferns x 13 measures
for (int i = 0; i < 10*measurePerClassifier; i++)
{
Vec4b m;
m.val[0] = rand() % 15;
m.val[1] = rand() % 15;
m.val[2] = rand() % 15;
m.val[3] = rand() % 15;
measurements.push_back(m);
}
//Warp measures to minSize patch coordinates
stepPrefSuff(measurements, 0, size.width, gridSize);
stepPrefSuff(measurements, 1, size.width, gridSize);
stepPrefSuff(measurements, 2, size.height, gridSize);
stepPrefSuff(measurements, 3, size.height, gridSize);
//Compile fern classifiers
for (int i = 0, howMany = (int)measurements.size() / measurePerClassifier; i < howMany; i++)
classifiers.push_back(TLDEnsembleClassifier(measurements, i * measurePerClassifier, (i + 1) * measurePerClassifier));
return (int)classifiers.size();
}
}
}
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <vector>
#include "precomp.hpp"
namespace cv
{
namespace tld
{
class TLDEnsembleClassifier
{
public:
static int makeClassifiers(Size size, int measurePerClassifier, int gridSize, std::vector<TLDEnsembleClassifier>& classifiers);
void integrate(const Mat_<uchar>& patch, bool isPositive);
double posteriorProbability(const uchar* data, int rowstep) const;
double posteriorProbabilityFast(const uchar* data) const;
void prepareClassifier(int rowstep);
private:
TLDEnsembleClassifier(const std::vector<Vec4b>& meas, int beg, int end);
static void stepPrefSuff(std::vector<Vec4b> & arr, int pos, int len, int gridSize);
int code(const uchar* data, int rowstep) const;
int codeFast(const uchar* data) const;
std::vector<Point2i> posAndNeg;
std::vector<Vec4b> measurements;
std::vector<Point2i> offset;
int lastStep_;
};
}
}
\ No newline at end of file
This diff is collapsed.
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_TLD_MODEL
#define OPENCV_TLD_MODEL
#include "precomp.hpp"
#include "tldDetector.hpp"
#include "tldUtils.hpp"
namespace cv
{
namespace tld
{
class TrackerTLDModel : public TrackerModel
{
public:
TrackerTLDModel(TrackerTLD::Params params, const Mat& image, const Rect2d& boundingBox, Size minSize);
Rect2d getBoundingBox(){ return boundingBox_; }
void setBoudingBox(Rect2d boundingBox){ boundingBox_ = boundingBox; }
void integrateRelabeled(Mat& img, Mat& imgBlurred, const std::vector<TLDDetector::LabeledPatch>& patches);
void integrateAdditional(const std::vector<Mat_<uchar> >& eForModel, const std::vector<Mat_<uchar> >& eForEnsemble, bool isPositive);
Size getMinSize(){ return minSize_; }
void printme(FILE* port = stdout);
Ptr<TLDDetector> detector;
std::vector<Mat_<uchar> > positiveExamples, negativeExamples;
std::vector<int> timeStampsPositive, timeStampsNegative;
int timeStampPositiveNext, timeStampNegativeNext;
double originalVariance_;
double getOriginalVariance(){ return originalVariance_; }
protected:
Size minSize_;
TrackerTLD::Params params_;
void pushIntoModel(const Mat_<uchar>& example, bool positive);
void modelEstimationImpl(const std::vector<Mat>& /*responses*/){}
void modelUpdateImpl(){}
Rect2d boundingBox_;
RNG rng;
};
}
}
#endif
\ No newline at end of file
...@@ -39,87 +39,140 @@ ...@@ -39,87 +39,140 @@
// //
//M*/ //M*/
#ifndef OPENCV_TLD_TRACKER
#define OPENCV_TLD_TRACKER
#include "precomp.hpp" #include "precomp.hpp"
#include "opencv2/video/tracking.hpp" #include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "tldModel.hpp"
#include<algorithm> #include<algorithm>
#include<limits.h> #include<limits.h>
namespace cv {namespace tld namespace cv
{ {
//debug functions and variables TrackerTLD::Params::Params(){}
#define ALEX_DEBUG
#ifdef ALEX_DEBUG void TrackerTLD::Params::read(const cv::FileNode& /*fn*/){}
#define dfprintf(x) fprintf x
#define dprintf(x) printf x void TrackerTLD::Params::write(cv::FileStorage& /*fs*/) const {}
#else
#define dfprintf(x) namespace tld
#define dprintf(x) {
#endif
#define MEASURE_TIME(a)\
{\ class TrackerProxy
clock_t start; float milisec = 0.0; \
start = clock(); {a} milisec = 1000.0 * (clock() - start) / CLOCKS_PER_SEC; \
dprintf(("%-90s took %f milis\n", #a, milisec));\
}
#define HERE dprintf(("line %d\n", __LINE__)); fflush(stderr);
#define START_TICK(name)\
{ \
clock_t start; double milisec = 0.0; start = clock();
#define END_TICK(name) milisec = 1000.0 * (clock() - start) / CLOCKS_PER_SEC; \
dprintf(("%s took %f milis\n", name, milisec)); \
}
extern Rect2d etalon;
void myassert(const Mat& img);
void printPatch(const Mat_<uchar>& standardPatch);
std::string type2str(const Mat& mat);
void drawWithRects(const Mat& img, std::vector<Rect2d>& blackOnes, Rect2d whiteOne = Rect2d(-1.0, -1.0, -1.0, -1.0));
void drawWithRects(const Mat& img, std::vector<Rect2d>& blackOnes, std::vector<Rect2d>& whiteOnes, String fileName = "");
//aux functions and variables
template<typename T> inline T CLIP(T x, T a, T b){ return std::min(std::max(x, a), b); }
/** Computes overlap between the two given rectangles. Overlap is computed as ratio of rectangles' intersection to that
* of their union.*/
double overlap(const Rect2d& r1, const Rect2d& r2);
/** Resamples the area surrounded by r2 in img so it matches the size of samples, where it is written.*/
void resample(const Mat& img, const RotatedRect& r2, Mat_<uchar>& samples);
/** Specialization of resample() for rectangles without retation for better performance and simplicity.*/
void resample(const Mat& img, const Rect2d& r2, Mat_<uchar>& samples);
/** Computes the variance of single given image.*/
double variance(const Mat& img);
/** Computes normalized corellation coefficient between the two patches (they should be
* of the same size).*/
double NCC(const Mat_<uchar>& patch1, const Mat_<uchar>& patch2);
void getClosestN(std::vector<Rect2d>& scanGrid, Rect2d bBox, int n, std::vector<Rect2d>& res);
double scaleAndBlur(const Mat& originalImg, int scale, Mat& scaledImg, Mat& blurredImg, Size GaussBlurKernelSize, double scaleStep);
int getMedian(const std::vector<int>& values, int size = -1);
class TLDEnsembleClassifier
{ {
public: public:
static int makeClassifiers(Size size, int measurePerClassifier, int gridSize, std::vector<TLDEnsembleClassifier>& classifiers); virtual bool init(const Mat& image, const Rect2d& boundingBox) = 0;
void integrate(const Mat_<uchar>& patch, bool isPositive); virtual bool update(const Mat& image, Rect2d& boundingBox) = 0;
double posteriorProbability(const uchar* data, int rowstep) const; virtual ~TrackerProxy(){}
double posteriorProbabilityFast(const uchar* data) const; };
void prepareClassifier(int rowstep);
class MyMouseCallbackDEBUG
{
public:
MyMouseCallbackDEBUG(Mat& img, Mat& imgBlurred, TLDDetector* detector) :img_(img), imgBlurred_(imgBlurred), detector_(detector){}
static void onMouse(int event, int x, int y, int, void* obj){ ((MyMouseCallbackDEBUG*)obj)->onMouse(event, x, y); }
MyMouseCallbackDEBUG& operator = (const MyMouseCallbackDEBUG& /*other*/){ return *this; }
private: private:
TLDEnsembleClassifier(const std::vector<Vec4b>& meas, int beg, int end); void onMouse(int event, int x, int y);
static void stepPrefSuff(std::vector<Vec4b> & arr, int pos, int len, int gridSize); Mat& img_, imgBlurred_;
int code(const uchar* data, int rowstep) const; TLDDetector* detector_;
int codeFast(const uchar* data) const;
std::vector<Point2i> posAndNeg;
std::vector<Vec4b> measurements;
std::vector<Point2i> offset;
int lastStep_;
}; };
class TrackerProxy
class Data
{
public:
Data(Rect2d initBox);
Size getMinSize(){ return minSize; }
double getScale(){ return scale; }
bool confident;
bool failedLastTime;
int frameNum;
void printme(FILE* port = stdout);
private:
double scale;
Size minSize;
};
template<class T, class Tparams>
class TrackerProxyImpl : public TrackerProxy
{
public:
TrackerProxyImpl(Tparams params = Tparams()) :params_(params){}
bool init(const Mat& image, const Rect2d& boundingBox)
{
trackerPtr = T::createTracker();
return trackerPtr->init(image, boundingBox);
}
bool update(const Mat& image, Rect2d& boundingBox)
{
return trackerPtr->update(image, boundingBox);
}
private:
Ptr<T> trackerPtr;
Tparams params_;
Rect2d boundingBox_;
};
#define BLUR_AS_VADIM
#undef CLOSED_LOOP
class TrackerTLDImpl : public TrackerTLD
{ {
public: public:
virtual bool init(const Mat& image, const Rect2d& boundingBox) = 0; TrackerTLDImpl(const TrackerTLD::Params &parameters = TrackerTLD::Params());
virtual bool update(const Mat& image, Rect2d& boundingBox) = 0; void read(const FileNode& fn);
virtual ~TrackerProxy(){} void write(FileStorage& fs) const;
protected:
class Pexpert
{
public:
Pexpert(const Mat& img_in, const Mat& imgBlurred_in, Rect2d& resultBox_in,
const TLDDetector* detector_in, TrackerTLD::Params params_in, Size initSize_in) :
img_(img_in), imgBlurred_(imgBlurred_in), resultBox_(resultBox_in), detector_(detector_in), params_(params_in), initSize_(initSize_in){}
bool operator()(Rect2d /*box*/){ return false; }
int additionalExamples(std::vector<Mat_<uchar> >& examplesForModel, std::vector<Mat_<uchar> >& examplesForEnsemble);
protected:
Pexpert(){}
Mat img_, imgBlurred_;
Rect2d resultBox_;
const TLDDetector* detector_;
TrackerTLD::Params params_;
RNG rng;
Size initSize_;
};
class Nexpert : public Pexpert
{
public:
Nexpert(const Mat& img_in, Rect2d& resultBox_in, const TLDDetector* detector_in, TrackerTLD::Params params_in)
{
img_ = img_in; resultBox_ = resultBox_in; detector_ = detector_in; params_ = params_in;
}
bool operator()(Rect2d box);
int additionalExamples(std::vector<Mat_<uchar> >& examplesForModel, std::vector<Mat_<uchar> >& examplesForEnsemble)
{
examplesForModel.clear(); examplesForEnsemble.clear(); return 0;
}
};
bool initImpl(const Mat& image, const Rect2d& boundingBox);
bool updateImpl(const Mat& image, Rect2d& boundingBox);
TrackerTLD::Params params;
Ptr<Data> data;
Ptr<TrackerProxy> trackerProxy;
}; };
}} }
}
#endif
\ No newline at end of file
...@@ -39,20 +39,15 @@ ...@@ -39,20 +39,15 @@
// //
//M*/ //M*/
#include "precomp.hpp" #include "tldUtils.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "time.h"
#include<algorithm>
#include<limits.h>
#include<math.h>
#include<opencv2/highgui.hpp>
#include "tld_tracker.hpp"
namespace cv {namespace tld
namespace cv
{
namespace tld
{ {
//debug functions and variables //Debug functions and variables
Rect2d etalon(14.0, 110.0, 20.0, 20.0); Rect2d etalon(14.0, 110.0, 20.0, 20.0);
void drawWithRects(const Mat& img, std::vector<Rect2d>& blackOnes, Rect2d whiteOne) void drawWithRects(const Mat& img, std::vector<Rect2d>& blackOnes, Rect2d whiteOne)
{ {
...@@ -95,7 +90,6 @@ void myassert(const Mat& img) ...@@ -95,7 +90,6 @@ void myassert(const Mat& img)
} }
dprintf(("black: %d out of %d (%f)\n", count, img.rows * img.cols, 1.0 * count / img.rows / img.cols)); dprintf(("black: %d out of %d (%f)\n", count, img.rows * img.cols, 1.0 * count / img.rows / img.cols));
} }
void printPatch(const Mat_<uchar>& standardPatch) void printPatch(const Mat_<uchar>& standardPatch)
{ {
for( int i = 0; i < standardPatch.rows; i++ ) for( int i = 0; i < standardPatch.rows; i++ )
...@@ -105,7 +99,6 @@ void printPatch(const Mat_<uchar>& standardPatch) ...@@ -105,7 +99,6 @@ void printPatch(const Mat_<uchar>& standardPatch)
dprintf(("\n")); dprintf(("\n"));
} }
} }
std::string type2str(const Mat& mat) std::string type2str(const Mat& mat)
{ {
int type = mat.type(); int type = mat.type();
...@@ -131,7 +124,7 @@ std::string type2str(const Mat& mat) ...@@ -131,7 +124,7 @@ std::string type2str(const Mat& mat)
return r; return r;
} }
//generic functions //Scale & Blur image using scale Indx
double scaleAndBlur(const Mat& originalImg, int scale, Mat& scaledImg, Mat& blurredImg, Size GaussBlurKernelSize, double scaleStep) double scaleAndBlur(const Mat& originalImg, int scale, Mat& scaledImg, Mat& blurredImg, Size GaussBlurKernelSize, double scaleStep)
{ {
double dScale = 1.0; double dScale = 1.0;
...@@ -142,6 +135,8 @@ double scaleAndBlur(const Mat& originalImg, int scale, Mat& scaledImg, Mat& blur ...@@ -142,6 +135,8 @@ double scaleAndBlur(const Mat& originalImg, int scale, Mat& scaledImg, Mat& blur
GaussianBlur(scaledImg, blurredImg, GaussBlurKernelSize, 0.0); GaussianBlur(scaledImg, blurredImg, GaussBlurKernelSize, 0.0);
return dScale; return dScale;
} }
//Find N-closest BB to the target
void getClosestN(std::vector<Rect2d>& scanGrid, Rect2d bBox, int n, std::vector<Rect2d>& res) void getClosestN(std::vector<Rect2d>& scanGrid, Rect2d bBox, int n, std::vector<Rect2d>& res)
{ {
if( n >= (int)scanGrid.size() ) if( n >= (int)scanGrid.size() )
...@@ -180,6 +175,7 @@ void getClosestN(std::vector<Rect2d>& scanGrid, Rect2d bBox, int n, std::vector< ...@@ -180,6 +175,7 @@ void getClosestN(std::vector<Rect2d>& scanGrid, Rect2d bBox, int n, std::vector<
} }
} }
//Calculate patch variance
double variance(const Mat& img) double variance(const Mat& img)
{ {
double p = 0, p2 = 0; double p = 0, p2 = 0;
...@@ -196,6 +192,7 @@ double variance(const Mat& img) ...@@ -196,6 +192,7 @@ double variance(const Mat& img)
return p2 - p * p; return p2 - p * p;
} }
//Normalized Correlation Coefficient
double NCC(const Mat_<uchar>& patch1, const Mat_<uchar>& patch2) double NCC(const Mat_<uchar>& patch1, const Mat_<uchar>& patch2)
{ {
CV_Assert( patch1.rows == patch2.rows ); CV_Assert( patch1.rows == patch2.rows );
...@@ -217,6 +214,7 @@ double NCC(const Mat_<uchar>& patch1, const Mat_<uchar>& patch2) ...@@ -217,6 +214,7 @@ double NCC(const Mat_<uchar>& patch1, const Mat_<uchar>& patch2)
double ares = (sq2 == 0) ? sq1 / abs(sq1) : (prod - s1 * s2 / N) / sq1 / sq2; double ares = (sq2 == 0) ? sq1 / abs(sq1) : (prod - s1 * s2 / N) / sq1 / sq2;
return ares; return ares;
} }
int getMedian(const std::vector<int>& values, int size) int getMedian(const std::vector<int>& values, int size)
{ {
if( size == -1 ) if( size == -1 )
...@@ -229,6 +227,7 @@ int getMedian(const std::vector<int>& values, int size) ...@@ -229,6 +227,7 @@ int getMedian(const std::vector<int>& values, int size)
return copy[(size - 1) / 2]; return copy[(size - 1) / 2];
} }
//Overlap between two BB
double overlap(const Rect2d& r1, const Rect2d& r2) double overlap(const Rect2d& r1, const Rect2d& r2)
{ {
double a1 = r1.area(), a2 = r2.area(), a0 = (r1&r2).area(); double a1 = r1.area(), a2 = r2.area(), a0 = (r1&r2).area();
...@@ -251,6 +250,7 @@ void resample(const Mat& img, const RotatedRect& r2, Mat_<uchar>& samples) ...@@ -251,6 +250,7 @@ void resample(const Mat& img, const RotatedRect& r2, Mat_<uchar>& samples)
b.copyTo(M.colRange(Range(2, 3))); b.copyTo(M.colRange(Range(2, 3)));
warpAffine(img, samples, M, samples.size()); warpAffine(img, samples, M, samples.size());
} }
void resample(const Mat& img, const Rect2d& r2, Mat_<uchar>& samples) void resample(const Mat& img, const Rect2d& r2, Mat_<uchar>& samples)
{ {
Mat_<float> M(2, 3); Mat_<float> M(2, 3);
...@@ -259,146 +259,5 @@ void resample(const Mat& img, const Rect2d& r2, Mat_<uchar>& samples) ...@@ -259,146 +259,5 @@ void resample(const Mat& img, const Rect2d& r2, Mat_<uchar>& samples)
warpAffine(img, samples, M, samples.size()); warpAffine(img, samples, M, samples.size());
} }
//other stuff
void TLDEnsembleClassifier::stepPrefSuff(std::vector<Vec4b>& arr, int pos, int len, int gridSize)
{
#if 0
int step = len / (gridSize - 1), pref = (len - step * (gridSize - 1)) / 2;
for( int i = 0; i < (int)(sizeof(x1) / sizeof(x1[0])); i++ )
arr[i] = pref + arr[i] * step;
#else
int total = len - gridSize;
int quo = total / (gridSize - 1), rem = total % (gridSize - 1);
int smallStep = quo, bigStep = quo + 1;
int bigOnes = rem, smallOnes = gridSize - bigOnes - 1;
int bigOnes_front = bigOnes / 2, bigOnes_back = bigOnes - bigOnes_front;
for( int i = 0; i < (int)arr.size(); i++ )
{
if( arr[i].val[pos] < bigOnes_back )
{
arr[i].val[pos] = (uchar)(arr[i].val[pos] * bigStep + arr[i].val[pos]);
continue;
}
if( arr[i].val[pos] < (bigOnes_front + smallOnes) )
{
arr[i].val[pos] = (uchar)(bigOnes_front * bigStep + (arr[i].val[pos] - bigOnes_front) * smallStep + arr[i].val[pos]);
continue;
}
if( arr[i].val[pos] < (bigOnes_front + smallOnes + bigOnes_back) )
{
arr[i].val[pos] =
(uchar)(bigOnes_front * bigStep + smallOnes * smallStep +
(arr[i].val[pos] - (bigOnes_front + smallOnes)) * bigStep + arr[i].val[pos]);
continue;
}
arr[i].val[pos] = (uchar)(len - 1);
}
#endif
}
void TLDEnsembleClassifier::prepareClassifier(int rowstep)
{
if( lastStep_ != rowstep )
{
lastStep_ = rowstep;
for( int i = 0; i < (int)offset.size(); i++ )
{
offset[i].x = rowstep * measurements[i].val[2] + measurements[i].val[0];
offset[i].y = rowstep * measurements[i].val[3] + measurements[i].val[1];
}
}
}
TLDEnsembleClassifier::TLDEnsembleClassifier(const std::vector<Vec4b>& meas, int beg, int end):lastStep_(-1)
{
int posSize = 1, mpc = end - beg;
for( int i = 0; i < mpc; i++ )
posSize *= 2;
posAndNeg.assign(posSize, Point2i(0, 0));
measurements.assign(meas.begin() + beg, meas.begin() + end);
offset.assign(mpc, Point2i(0, 0));
}
void TLDEnsembleClassifier::integrate(const Mat_<uchar>& patch, bool isPositive)
{
int position = code(patch.data, (int)patch.step[0]);
if( isPositive )
posAndNeg[position].x++;
else
posAndNeg[position].y++;
}
double TLDEnsembleClassifier::posteriorProbability(const uchar* data, int rowstep) const
{
int position = code(data, rowstep);
double posNum = (double)posAndNeg[position].x, negNum = (double)posAndNeg[position].y;
if( posNum == 0.0 && negNum == 0.0 )
return 0.0;
else
return posNum / (posNum + negNum);
}
double TLDEnsembleClassifier::posteriorProbabilityFast(const uchar* data) const
{
int position = codeFast(data);
double posNum = (double)posAndNeg[position].x, negNum = (double)posAndNeg[position].y;
if( posNum == 0.0 && negNum == 0.0 )
return 0.0;
else
return posNum / (posNum + negNum);
}
int TLDEnsembleClassifier::codeFast(const uchar* data) const
{
int position = 0;
for( int i = 0; i < (int)measurements.size(); i++ )
{
position = position << 1;
if( data[offset[i].x] < data[offset[i].y] )
position++;
}
return position;
}
int TLDEnsembleClassifier::code(const uchar* data, int rowstep) const
{
int position = 0;
for( int i = 0; i < (int)measurements.size(); i++ )
{
position = position << 1;
if( *(data + rowstep * measurements[i].val[2] + measurements[i].val[0]) <
*(data + rowstep * measurements[i].val[3] + measurements[i].val[1]) )
{
position++;
}
}
return position;
}
int TLDEnsembleClassifier::makeClassifiers(Size size, int measurePerClassifier, int gridSize,
std::vector<TLDEnsembleClassifier>& classifiers)
{
std::vector<Vec4b> measurements;
for( int i = 0; i < gridSize; i++ )
{
for( int j = 0; j < gridSize; j++ )
{
for( int k = 0; k < j; k++ )
{
Vec4b m;
m.val[0] = m.val[2] = (uchar)i;
m.val[1] = (uchar)j; m.val[3] = (uchar)k;
measurements.push_back(m);
m.val[1] = m.val[3] = (uchar)i;
m.val[0] = (uchar)j; m.val[2] = (uchar)k;
measurements.push_back(m);
}
}
}
random_shuffle(measurements.begin(), measurements.end());
stepPrefSuff(measurements, 0, size.width, gridSize);
stepPrefSuff(measurements, 1, size.width, gridSize);
stepPrefSuff(measurements, 2, size.height, gridSize);
stepPrefSuff(measurements, 3, size.height, gridSize);
for( int i = 0, howMany = (int)measurements.size() / measurePerClassifier; i < howMany; i++ )
classifiers.push_back(TLDEnsembleClassifier(measurements, i * measurePerClassifier, (i + 1) * measurePerClassifier));
return (int)classifiers.size();
}
}} }}
#ifndef OPENCV_TLD_UTILS
#define OPENCV_TLD_UTILS
#include "precomp.hpp"
#include "opencv2/highgui.hpp"
namespace cv
{
namespace tld
{
//debug functions and variables
#define ALEX_DEBUG
#ifdef ALEX_DEBUG
#define dfprintf(x) fprintf x
#define dprintf(x) printf x
#else
#define dfprintf(x)
#define dprintf(x)
#endif
#define MEASURE_TIME(a)\
{\
clock_t start; float milisec = 0.0; \
start = clock(); {a} milisec = 1000.0 * (clock() - start) / CLOCKS_PER_SEC; \
dprintf(("%-90s took %f milis\n", #a, milisec));\
}
#define HERE dprintf(("line %d\n", __LINE__)); fflush(stderr);
#define START_TICK(name)\
{ \
clock_t start; double milisec = 0.0; start = clock();
#define END_TICK(name) milisec = 1000.0 * (clock() - start) / CLOCKS_PER_SEC; \
dprintf(("%s took %f milis\n", name, milisec)); \
}
extern Rect2d etalon;
void myassert(const Mat& img);
void printPatch(const Mat_<uchar>& standardPatch);
std::string type2str(const Mat& mat);
void drawWithRects(const Mat& img, std::vector<Rect2d>& blackOnes, Rect2d whiteOne = Rect2d(-1.0, -1.0, -1.0, -1.0));
void drawWithRects(const Mat& img, std::vector<Rect2d>& blackOnes, std::vector<Rect2d>& whiteOnes, String fileName = "");
//aux functions and variables
template<typename T> inline T CLIP(T x, T a, T b){ return std::min(std::max(x, a), b); }
/** Computes overlap between the two given rectangles. Overlap is computed as ratio of rectangles' intersection to that
* of their union.*/
double overlap(const Rect2d& r1, const Rect2d& r2);
/** Resamples the area surrounded by r2 in img so it matches the size of samples, where it is written.*/
void resample(const Mat& img, const RotatedRect& r2, Mat_<uchar>& samples);
/** Specialization of resample() for rectangles without retation for better performance and simplicity.*/
void resample(const Mat& img, const Rect2d& r2, Mat_<uchar>& samples);
/** Computes the variance of single given image.*/
double variance(const Mat& img);
/** Computes normalized corellation coefficient between the two patches (they should be
* of the same size).*/
double NCC(const Mat_<uchar>& patch1, const Mat_<uchar>& patch2);
void getClosestN(std::vector<Rect2d>& scanGrid, Rect2d bBox, int n, std::vector<Rect2d>& res);
double scaleAndBlur(const Mat& originalImg, int scale, Mat& scaledImg, Mat& blurredImg, Size GaussBlurKernelSize, double scaleStep);
int getMedian(const std::vector<int>& values, int size = -1);
}
}
#endif
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment