Commit 8d3470b5 authored by Vladimir's avatar Vladimir

Added optimization to Multi-target TLD update

parent 7dc95a3a
......@@ -565,7 +565,12 @@ class CV_EXPORTS_W Tracker : public virtual Algorithm
virtual void read( const FileNode& fn )=0;
virtual void write( FileStorage& fs ) const=0;
public:
Ptr<TrackerModel> getModel()
{
return model;
}
protected:
virtual bool initImpl( const Mat& image, const Rect2d& boundingBox ) = 0;
virtual bool updateImpl( const Mat& image, Rect2d& boundingBox ) = 0;
......@@ -1206,11 +1211,7 @@ class CV_EXPORTS_W TrackerKCF : public Tracker
- "GRAY" -- Use grayscale values as the feature
- "CN" -- Color-names feature
*/
enum MODE {
GRAY = (1u << 0),
CN = (1u << 1),
CUSTOM = (1u<<2)
};
enum MODE {GRAY, CN, CN2};
struct CV_EXPORTS Params
{
......@@ -1240,156 +1241,96 @@ class CV_EXPORTS_W TrackerKCF : public Tracker
bool compress_feature; //!< activate the pca method to compress the features
int max_patch_size; //!< threshold for the ROI size
int compressed_size; //!< feature size after compression
unsigned int desc_pca; //!< compressed descriptors of TrackerKCF::MODE
unsigned int desc_npca; //!< non-compressed descriptors of TrackerKCF::MODE
MODE descriptor; //!< descriptor type
};
virtual void setFeatureExtractor(void (*)(const Mat, const Rect, Mat&), bool pca_func = false);
/** @brief Constructor
@param parameters KCF parameters TrackerKCF::Params
*/
BOILERPLATE_CODE("KCF",TrackerKCF);
};
/************************************ MultiTracker Class ************************************/
/** @brief This class is used to track multiple objects using the specified tracker algorithm.
* The MultiTracker is naive implementation of multiple object tracking.
* It process the tracked objects independently without any optimization accross the tracked objects.
*/
/************************************ Multi-Tracker Classes ************************************/
/** @brief Base abstract class for the long-term Multi Object Trackers:
@sa Tracker, MultiTrackerTLD
*/
class CV_EXPORTS_W MultiTracker
{
public:
/**
* \brief Constructor.
* In the case of trackerType is given, it will be set as the default algorithm for all trackers.
* @param trackerType the name of the tracker algorithm to be used
public:
/** @brief Constructor for Multitracker
*/
MultiTracker(const String& trackerType = "" );
MultiTracker()
{
targetNum = 0;
}
/**
* \brief Destructor
*/
~MultiTracker();
/** @brief Add a new target to a tracking-list and initialize the tracker with a know bounding box that surrounding the target
@param image The initial frame
@param boundingBox The initial boundig box of target
@param tracker_algorithm_name Multi-tracker algorithm name
/**
* \brief Add a new object to be tracked.
* The defaultAlgorithm will be used the newly added tracker.
* @param image input image
* @param boundingBox a rectangle represents ROI of the tracked object
@return True if new target initialization went succesfully, false otherwise
*/
bool add( const Mat& image, const Rect2d& boundingBox );
bool addTarget(const Mat& image, const Rect2d& boundingBox, String tracker_algorithm_name);
/**
* \brief Add a new object to be tracked.
* @param trackerType the name of the tracker algorithm to be used
* @param image input image
* @param boundingBox a rectangle represents ROI of the tracked object
*/
bool add( const String& trackerType, const Mat& image, const Rect2d& boundingBox );
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets
@param image The current frame
/**
* \brief Add a set of objects to be tracked.
* @param trackerType the name of the tracker algorithm to be used
* @param image input image
* @param boundingBox list of the tracked objects
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool add(const String& trackerType, const Mat& image, std::vector<Rect2d> boundingBox);
bool update(const Mat& image);
/**
* \brief Add a set of objects to be tracked using the defaultAlgorithm tracker.
* @param image input image
* @param boundingBox list of the tracked objects
/** @brief Current number of targets in tracking-list
*/
bool add(const Mat& image, std::vector<Rect2d> boundingBox);
int targetNum;
/**
* \brief Update the current tracking status.
* The result will be saved in the internal storage.
* @param image input image
/** @brief Trackers list for Multi-Object-Tracker
*/
bool update( const Mat& image);
//!< storage for the tracked objects, each object corresponds to one tracker algorithm.
std::vector<Rect2d> objects;
std::vector <Ptr<Tracker> > trackers;
/**
* \brief Update the current tracking status.
* @param image input image
* @param boundingBox the tracking result, represent a list of ROIs of the tracked objects.
/** @brief Bounding Boxes list for Multi-Object-Tracker
*/
bool update( const Mat& image, std::vector<Rect2d> & boundingBox );
protected:
//!< storage for the tracker algorithms.
std::vector< Ptr<Tracker> > trackerList;
//!< default algorithm for the tracking method.
String defaultAlgorithm;
std::vector <Rect2d> boundingBoxes;
/** @brief List of randomly generated colors for bounding boxes display
*/
std::vector<Scalar> colors;
};
class ROISelector {
public:
Rect2d select(Mat img, bool fromCenter = true);
Rect2d select(const std::string& windowName, Mat img, bool showCrossair = true, bool fromCenter = true);
void select(const std::string& windowName, Mat img, std::vector<Rect2d> & boundingBox, bool fromCenter = true);
struct handlerT{
// basic parameters
bool isDrawing;
Rect2d box;
Mat image;
// parameters for drawing from the center
bool drawFromCenter;
Point2f center;
// initializer list
handlerT(): isDrawing(false), drawFromCenter(true) {};
}selectorParams;
// to store the tracked objects
std::vector<handlerT> objects;
private:
static void mouseHandler(int event, int x, int y, int flags, void *param);
void opencv_mouse_callback( int event, int x, int y, int , void *param );
// save the keypressed characted
int key;
};
/** @brief Multi Object Tracker for TLD. TLD is a novel tracking framework that explicitly decomposes
the long-term tracking task into tracking, learning and detection.
Rect2d CV_EXPORTS_W selectROI(Mat img, bool fromCenter = true);
Rect2d CV_EXPORTS_W selectROI(const std::string& windowName, Mat img, bool showCrossair = true, bool fromCenter = true);
void CV_EXPORTS_W selectROI(const std::string& windowName, Mat img, std::vector<Rect2d> & boundingBox, bool fromCenter = true);
The tracker follows the object from frame to frame. The detector localizes all appearances that
have been observed so far and corrects the tracker if necessary. The learning estimates detector’s
errors and updates it to avoid these errors in the future. The implementation is based on @cite TLD .
The Median Flow algorithm (see cv::TrackerMedianFlow) was chosen as a tracking component in this
implementation, following authors. Tracker is supposed to be able to handle rapid motions, partial
occlusions, object absence etc.
/************************************ Multi-Tracker Classes ************************************/
class CV_EXPORTS_W MultiTracker_Alt
@sa Tracker, MultiTracker, TrackerTLD
*/
class CV_EXPORTS_W MultiTrackerTLD : public MultiTracker
{
public:
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets by
optimized update method using some techniques to speedup calculations specifically for MO TLD. The only limitation
is that all target bounding boxes should have approximately same aspect ratios. Speed boost is around 20%
bool addTarget(const Mat& image, const Rect2d& boundingBox, char* tracker_algorithm_name);
@param image The current frame.
bool update(const Mat& image);
int targetNum = 0;
std::vector <Ptr<Tracker>> trackers;
std::vector <Rect2d> boundingBoxes;
std::vector<Scalar> colors;
};
class CV_EXPORTS_W MultiTrackerTLD : public MultiTracker
{
public:
bool update(const Mat& image);
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool update_opt(const Mat& image);
};
//! @}
} /* namespace cv */
//! @}
#endif
......@@ -44,7 +44,7 @@
namespace cv
{
//Multitracker
bool MultiTracker::addTarget(const Mat& image, const Rect2d& boundingBox, char* tracker_algorithm_name)
bool MultiTracker::addTarget(const Mat& image, const Rect2d& boundingBox, String tracker_algorithm_name)
{
Ptr<Tracker> tracker = Tracker::create(tracker_algorithm_name);
if (tracker == NULL)
......@@ -65,6 +65,8 @@ namespace cv
else
colors.push_back(Scalar(rand() % 256, rand() % 256, rand() % 256));
//Target counter
targetNum++;
......@@ -73,8 +75,7 @@ namespace cv
bool MultiTracker::update(const Mat& image)
{
printf("Naive-Loop MO-TLD Update....\n");
for (int i = 0; i < trackers.size(); i++)
for (int i = 0; i < (int)trackers.size(); i++)
if (!trackers[i]->update(image, boundingBoxes[i]))
return false;
......@@ -85,16 +86,12 @@ namespace cv
/*Optimized update method for TLD Multitracker */
bool MultiTrackerTLD::update_opt(const Mat& image)
{
printf("Optimized MO-TLD Update....\n");
//Get parameters from first object
//Set current target(tracker) parameters
Rect2d boundingBox = boundingBoxes[0];
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[0];
tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
Ptr<tld::Data> data = tracker->data;
double scale = data->getScale();
......@@ -130,11 +127,11 @@ namespace cv
for (int k = 0; k < targetNum; k++)
{
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[k];
tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
trackerPtr = trackers[k];
tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
Ptr<tld::Data> data = tracker->data;
tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
data = tracker->data;
data->frameNum++;
......@@ -186,16 +183,7 @@ namespace cv
#if 1
if (it != candidatesRes[k].end())
{
tld::resample(imageForDetector, candidates[k][it - candidatesRes[k].begin()], standardPatch);
//dfprintf((stderr, "%d %f %f\n", data->frameNum, tldModel->Sc(standardPatch), tldModel->Sr(standardPatch)));
//if( candidatesRes.size() == 2 && it == (candidatesRes.begin() + 1) )
//dfprintf((stderr, "detector WON\n"));
}
else
{
//dfprintf((stderr, "%d x x\n", data->frameNum));
}
#endif
if (*it > tld::CORE_THRESHOLD)
......@@ -226,7 +214,6 @@ namespace cv
detectorResults[k][i].isObject = expertResult;
}
tldModel->integrateRelabeled(imageForDetector, image_blurred, detectorResults[k]);
//dprintf(("%d relabeled by nExpert\n", negRelabeled));
pExpert.additionalExamples(examplesForModel, examplesForEnsemble);
if (ocl::haveOpenCL())
tldModel->ocl_integrateAdditional(examplesForModel, examplesForEnsemble, true);
......@@ -249,14 +236,7 @@ namespace cv
}
//Debug display candidates after Variance Filter
////////////////////////////////////////////////
Mat tmpImg = image;
for (int i = 0; i < debugStack[0].size(); i++)
//rectangle(tmpImg, debugStack[0][i], Scalar(255, 255, 255), 1, 1, 0);
debugStack[0].clear();
tmpImg.copyTo(image);
////////////////////////////////////////////////
return true;
}
......@@ -267,10 +247,10 @@ namespace cv
Tracker* trackerPtr = trackers[0];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
Size initSize = tldModel->getMinSize();
for (int k = 0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
patches[k].clear();
Mat_<uchar> standardPatch(tld::STANDARD_PATCH_SIZE, tld::STANDARD_PATCH_SIZE);
......@@ -290,10 +270,6 @@ namespace cv
std::vector <Point> tmpP;
std::vector <int> tmpI;
//int64 e1, e2;
//double t;
//e1 = getTickCount();
//Detection part
//Generate windows and filter by variance
scaleID = 0;
......@@ -329,13 +305,13 @@ namespace cv
double windowVar = p2 - p * p;
//Loop for on all objects
for (int k=0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
{
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[k];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
trackerPtr = trackers[k];
tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
//Optimized variance calculation
bool varPass = (windowVar > tld::VARIANCE_THRESHOLD * *tldModel->detector->originalVariancePtr);
......@@ -344,10 +320,6 @@ namespace cv
continue;
varBuffer[k].push_back(Point(dx * i, dy * j));
varScaleIDs[k].push_back(scaleID);
//Debug display candidates after Variance Filter
double curScale = pow(tld::SCALE_STEP, scaleID);
debugStack[0].push_back(Rect2d(dx * i* curScale, dy * j*curScale, tldModel->getMinSize().width*curScale, tldModel->getMinSize().height*curScale));
}
}
}
......@@ -361,23 +333,14 @@ namespace cv
blurred_imgs.push_back(tmp);
} while (size.width >= initSize.width && size.height >= initSize.height);
//e2 = getTickCount();
//t = (e2 - e1) / getTickFrequency()*1000.0;
//printf("Variance: %d\t%f\n", varBuffer.size(), t);
//printf("OrigVar 1: %f\n", *tldModel->detector->originalVariancePtr);
//Encsemble classification
//e1 = getTickCount();
for (int k = 0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
{
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[k];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
trackerPtr = trackers[k];
tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
for (int i = 0; i < (int)varBuffer[k].size(); i++)
......@@ -410,36 +373,16 @@ namespace cv
ensBuffer[k].push_back(varBuffer[k][i]);
ensScaleIDs[k].push_back(varScaleIDs[k][i]);
}
/*
for (int i = 0; i < (int)varBuffer[k].size(); i++)
{
tldModel->detector->prepareClassifiers(static_cast<int> (blurred_imgs[varScaleIDs[k][i]].step[0]));
if (tldModel->detector->ensembleClassifierNum(&blurred_imgs[varScaleIDs[k][i]].at<uchar>(varBuffer[k][i].y, varBuffer[k][i].x)) <= tld::ENSEMBLE_THRESHOLD)
continue;
ensBuffer[k].push_back(varBuffer[k][i]);
ensScaleIDs[k].push_back(varScaleIDs[k][i]);
}
*/
}
//e2 = getTickCount();
//t = (e2 - e1) / getTickFrequency()*1000.0;
//printf("Ensemble: %d\t%f\n", ensBuffer.size(), t);
//printf("varBuffer 1: %d\n", varBuffer[0].size());
//printf("ensBuffer 1: %d\n", ensBuffer[0].size());
//printf("varBuffer 2: %d\n", varBuffer[1].size());
//printf("ensBuffer 2: %d\n", ensBuffer[1].size());
//NN classification
//e1 = getTickCount();
for (int k = 0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
{
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[k];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
trackerPtr = trackers[k];
tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
npos = 0;
nneg = 0;
......@@ -477,7 +420,6 @@ namespace cv
maxSc = scValue;
maxScRect = labPatch.rect;
}
//printf("%d %f %f\n", k, srValue, scValue);
}
......@@ -487,13 +429,9 @@ namespace cv
else
{
res[k] = maxScRect;
//printf("%f %f %f %f\n", maxScRect.x, maxScRect.y, maxScRect.width, maxScRect.height);
detect_flgs[k] = true;
}
}
//e2 = getTickCount();
//t = (e2 - e1) / getTickFrequency()*1000.0;
//printf("NN: %d\t%f\n", patches.size(), t);
}
void ocl_detect_all(const Mat& img, const Mat& imgBlurred, std::vector<Rect2d>& res, std::vector < std::vector < tld::TLDDetector::LabeledPatch > > &patches, std::vector<bool> &detect_flgs,
......@@ -503,10 +441,10 @@ namespace cv
Tracker* trackerPtr = trackers[0];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
Size initSize = tldModel->getMinSize();
for (int k = 0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
patches[k].clear();
Mat_<uchar> standardPatch(tld::STANDARD_PATCH_SIZE, tld::STANDARD_PATCH_SIZE);
......@@ -526,10 +464,6 @@ namespace cv
std::vector <Point> tmpP;
std::vector <int> tmpI;
//int64 e1, e2;
//double t;
//e1 = getTickCount();
//Detection part
//Generate windows and filter by variance
scaleID = 0;
......@@ -565,13 +499,13 @@ namespace cv
double windowVar = p2 - p * p;
//Loop for on all objects
for (int k = 0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
{
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[k];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
trackerPtr = trackers[k];
tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
//Optimized variance calculation
bool varPass = (windowVar > tld::VARIANCE_THRESHOLD * *tldModel->detector->originalVariancePtr);
......@@ -580,10 +514,6 @@ namespace cv
continue;
varBuffer[k].push_back(Point(dx * i, dy * j));
varScaleIDs[k].push_back(scaleID);
//Debug display candidates after Variance Filter
double curScale = pow(tld::SCALE_STEP, scaleID);
debugStack[0].push_back(Rect2d(dx * i* curScale, dy * j*curScale, tldModel->getMinSize().width*curScale, tldModel->getMinSize().height*curScale));
}
}
}
......@@ -597,23 +527,14 @@ namespace cv
blurred_imgs.push_back(tmp);
} while (size.width >= initSize.width && size.height >= initSize.height);
//e2 = getTickCount();
//t = (e2 - e1) / getTickFrequency()*1000.0;
//printf("Variance: %d\t%f\n", varBuffer.size(), t);
//printf("OrigVar 1: %f\n", *tldModel->detector->originalVariancePtr);
//Encsemble classification
//e1 = getTickCount();
for (int k = 0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
{
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[k];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
trackerPtr = trackers[k];
tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
for (int i = 0; i < (int)varBuffer[k].size(); i++)
......@@ -646,36 +567,16 @@ namespace cv
ensBuffer[k].push_back(varBuffer[k][i]);
ensScaleIDs[k].push_back(varScaleIDs[k][i]);
}
/*
for (int i = 0; i < (int)varBuffer[k].size(); i++)
{
tldModel->detector->prepareClassifiers(static_cast<int> (blurred_imgs[varScaleIDs[k][i]].step[0]));
if (tldModel->detector->ensembleClassifierNum(&blurred_imgs[varScaleIDs[k][i]].at<uchar>(varBuffer[k][i].y, varBuffer[k][i].x)) <= tld::ENSEMBLE_THRESHOLD)
continue;
ensBuffer[k].push_back(varBuffer[k][i]);
ensScaleIDs[k].push_back(varScaleIDs[k][i]);
}
*/
}
//e2 = getTickCount();
//t = (e2 - e1) / getTickFrequency()*1000.0;
//printf("varBuffer 1: %d\n", varBuffer[0].size());
//printf("ensBuffer 1: %d\n", ensBuffer[0].size());
//printf("varBuffer 2: %d\n", varBuffer[1].size());
//printf("ensBuffer 2: %d\n", ensBuffer[1].size());
//NN classification
//e1 = getTickCount();
for (int k = 0; k < trackers.size(); k++)
for (int k = 0; k < (int)trackers.size(); k++)
{
//TLD Tracker data extraction
Tracker* trackerPtr = trackers[k];
cv::tld::TrackerTLDImpl* tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
trackerPtr = trackers[k];
tracker = static_cast<tld::TrackerTLDImpl*>(trackerPtr);
//TLD Model Extraction
tld::TrackerTLDModel* tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->model));
//Size InitSize = tldModel->getMinSize();
tldModel = ((tld::TrackerTLDModel*)static_cast<TrackerModel*>(tracker->getModel()));
npos = 0;
nneg = 0;
maxSc = -5.0;
......@@ -730,7 +631,6 @@ namespace cv
maxSc = scValue;
maxScRect = labPatch.rect;
}
//printf("%d %f %f\n", k, srValue, scValue);
}
......@@ -740,12 +640,9 @@ namespace cv
else
{
res[k] = maxScRect;
//printf("%f %f %f %f\n", maxScRect.x, maxScRect.y, maxScRect.width, maxScRect.height);
detect_flgs[k] = true;
}
}
//e2 = getTickCount();
//t = (e2 - e1) / getTickFrequency()*1000.0;
//printf("NN: %d\t%f\n", patches.size(), t);
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment