Commit 6308be2c authored by Evgeny Talanin's avatar Evgeny Talanin

Changed parallel_for to parallel_for_ in hog.cpp and cascadedetect.cpp

parent b8c185de
...@@ -943,10 +943,11 @@ void CascadeClassifier::setFaceDetectionMaskGenerator() ...@@ -943,10 +943,11 @@ void CascadeClassifier::setFaceDetectionMaskGenerator()
#endif #endif
} }
struct CascadeClassifierInvoker class CascadeClassifierInvoker : public ParallelLoopBody
{ {
public:
CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor, CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor,
ConcurrentRectVector& _vec, vector<int>& _levels, vector<double>& _weights, bool outputLevels, const Mat& _mask) vector<Rect>& _vec, vector<int>& _levels, vector<double>& _weights, bool outputLevels, const Mat& _mask, Mutex* _mtx)
{ {
classifier = &_cc; classifier = &_cc;
processingRectSize = _sz1; processingRectSize = _sz1;
...@@ -954,19 +955,20 @@ struct CascadeClassifierInvoker ...@@ -954,19 +955,20 @@ struct CascadeClassifierInvoker
yStep = _yStep; yStep = _yStep;
scalingFactor = _factor; scalingFactor = _factor;
rectangles = &_vec; rectangles = &_vec;
rejectLevels = outputLevels ? &_levels : 0; rejectLevels = outputLevels ? &_levels : 0;
levelWeights = outputLevels ? &_weights : 0; levelWeights = outputLevels ? &_weights : 0;
mask=_mask; mask = _mask;
mtx = _mtx;
} }
void operator()(const BlockedRange& range) const void operator()(const Range& range) const
{ {
Ptr<FeatureEvaluator> evaluator = classifier->featureEvaluator->clone(); Ptr<FeatureEvaluator> evaluator = classifier->featureEvaluator->clone();
Size winSize(cvRound(classifier->data.origWinSize.width * scalingFactor), cvRound(classifier->data.origWinSize.height * scalingFactor)); Size winSize(cvRound(classifier->data.origWinSize.width * scalingFactor), cvRound(classifier->data.origWinSize.height * scalingFactor));
int y1 = range.begin() * stripSize; int y1 = range.start * stripSize;
int y2 = min(range.end() * stripSize, processingRectSize.height); int y2 = min(range.end * stripSize, processingRectSize.height);
for( int y = y1; y < y2; y += yStep ) for( int y = y1; y < y2; y += yStep )
{ {
for( int x = 0; x < processingRectSize.width; x += yStep ) for( int x = 0; x < processingRectSize.width; x += yStep )
...@@ -988,14 +990,20 @@ struct CascadeClassifierInvoker ...@@ -988,14 +990,20 @@ struct CascadeClassifierInvoker
result = -(int)classifier->data.stages.size(); result = -(int)classifier->data.stages.size();
if( classifier->data.stages.size() + result < 4 ) if( classifier->data.stages.size() + result < 4 )
{ {
mtx->lock();
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height)); rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height));
mtx->unlock();
rejectLevels->push_back(-result); rejectLevels->push_back(-result);
levelWeights->push_back(gypWeight); levelWeights->push_back(gypWeight);
} }
} }
else if( result > 0 ) else if( result > 0 )
{
mtx->lock();
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor),
winSize.width, winSize.height)); winSize.width, winSize.height));
mtx->unlock();
}
if( result == 0 ) if( result == 0 )
x += yStep; x += yStep;
} }
...@@ -1003,13 +1011,14 @@ struct CascadeClassifierInvoker ...@@ -1003,13 +1011,14 @@ struct CascadeClassifierInvoker
} }
CascadeClassifier* classifier; CascadeClassifier* classifier;
ConcurrentRectVector* rectangles; vector<Rect>* rectangles;
Size processingRectSize; Size processingRectSize;
int stripSize, yStep; int stripSize, yStep;
double scalingFactor; double scalingFactor;
vector<int> *rejectLevels; vector<int> *rejectLevels;
vector<double> *levelWeights; vector<double> *levelWeights;
Mat mask; Mat mask;
Mutex* mtx;
}; };
struct getRect { Rect operator ()(const CvAvgComp& e) const { return e.rect; } }; struct getRect { Rect operator ()(const CvAvgComp& e) const { return e.rect; } };
...@@ -1031,22 +1040,23 @@ bool CascadeClassifier::detectSingleScale( const Mat& image, int stripCount, Siz ...@@ -1031,22 +1040,23 @@ bool CascadeClassifier::detectSingleScale( const Mat& image, int stripCount, Siz
currentMask=maskGenerator->generateMask(image); currentMask=maskGenerator->generateMask(image);
} }
ConcurrentRectVector concurrentCandidates; vector<Rect> candidatesVector;
vector<int> rejectLevels; vector<int> rejectLevels;
vector<double> levelWeights; vector<double> levelWeights;
Mutex mtx;
if( outputRejectLevels ) if( outputRejectLevels )
{ {
parallel_for(BlockedRange(0, stripCount), CascadeClassifierInvoker( *this, processingRectSize, stripSize, yStep, factor, parallel_for_(Range(0, stripCount), CascadeClassifierInvoker( *this, processingRectSize, stripSize, yStep, factor,
concurrentCandidates, rejectLevels, levelWeights, true, currentMask)); candidatesVector, rejectLevels, levelWeights, true, currentMask, &mtx));
levels.insert( levels.end(), rejectLevels.begin(), rejectLevels.end() ); levels.insert( levels.end(), rejectLevels.begin(), rejectLevels.end() );
weights.insert( weights.end(), levelWeights.begin(), levelWeights.end() ); weights.insert( weights.end(), levelWeights.begin(), levelWeights.end() );
} }
else else
{ {
parallel_for(BlockedRange(0, stripCount), CascadeClassifierInvoker( *this, processingRectSize, stripSize, yStep, factor, parallel_for_(Range(0, stripCount), CascadeClassifierInvoker( *this, processingRectSize, stripSize, yStep, factor,
concurrentCandidates, rejectLevels, levelWeights, false, currentMask)); candidatesVector, rejectLevels, levelWeights, false, currentMask, &mtx));
} }
candidates.insert( candidates.end(), concurrentCandidates.begin(), concurrentCandidates.end() ); candidates.insert( candidates.end(), candidatesVector.begin(), candidatesVector.end() );
#if defined (LOG_CASCADE_STATISTIC) #if defined (LOG_CASCADE_STATISTIC)
logger.write(); logger.write();
......
...@@ -939,12 +939,13 @@ void HOGDescriptor::detect(const Mat& img, vector<Point>& hits, double hitThresh ...@@ -939,12 +939,13 @@ void HOGDescriptor::detect(const Mat& img, vector<Point>& hits, double hitThresh
detect(img, hits, weightsV, hitThreshold, winStride, padding, locations); detect(img, hits, weightsV, hitThreshold, winStride, padding, locations);
} }
struct HOGInvoker class HOGInvoker : public ParallelLoopBody
{ {
public:
HOGInvoker( const HOGDescriptor* _hog, const Mat& _img, HOGInvoker( const HOGDescriptor* _hog, const Mat& _img,
double _hitThreshold, Size _winStride, Size _padding, double _hitThreshold, Size _winStride, Size _padding,
const double* _levelScale, ConcurrentRectVector* _vec, const double* _levelScale, std::vector<Rect> * _vec, Mutex* _mtx,
ConcurrentDoubleVector* _weights=0, ConcurrentDoubleVector* _scales=0 ) std::vector<double>* _weights=0, std::vector<double>* _scales=0 )
{ {
hog = _hog; hog = _hog;
img = _img; img = _img;
...@@ -955,11 +956,12 @@ struct HOGInvoker ...@@ -955,11 +956,12 @@ struct HOGInvoker
vec = _vec; vec = _vec;
weights = _weights; weights = _weights;
scales = _scales; scales = _scales;
mtx = _mtx;
} }
void operator()( const BlockedRange& range ) const void operator()( const Range& range ) const
{ {
int i, i1 = range.begin(), i2 = range.end(); int i, i1 = range.start, i2 = range.end;
double minScale = i1 > 0 ? levelScale[i1] : i2 > 1 ? levelScale[i1+1] : std::max(img.cols, img.rows); double minScale = i1 > 0 ? levelScale[i1] : i2 > 1 ? levelScale[i1+1] : std::max(img.cols, img.rows);
Size maxSz(cvCeil(img.cols/minScale), cvCeil(img.rows/minScale)); Size maxSz(cvCeil(img.cols/minScale), cvCeil(img.rows/minScale));
Mat smallerImgBuf(maxSz, img.type()); Mat smallerImgBuf(maxSz, img.type());
...@@ -977,23 +979,29 @@ struct HOGInvoker ...@@ -977,23 +979,29 @@ struct HOGInvoker
resize(img, smallerImg, sz); resize(img, smallerImg, sz);
hog->detect(smallerImg, locations, hitsWeights, hitThreshold, winStride, padding); hog->detect(smallerImg, locations, hitsWeights, hitThreshold, winStride, padding);
Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale)); Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale));
mtx->lock();
for( size_t j = 0; j < locations.size(); j++ ) for( size_t j = 0; j < locations.size(); j++ )
{ {
vec->push_back(Rect(cvRound(locations[j].x*scale), vec->push_back(Rect(cvRound(locations[j].x*scale),
cvRound(locations[j].y*scale), cvRound(locations[j].y*scale),
scaledWinSize.width, scaledWinSize.height)); scaledWinSize.width, scaledWinSize.height));
if (scales) { if (scales)
{
scales->push_back(scale); scales->push_back(scale);
} }
} }
mtx->unlock();
if (weights && (!hitsWeights.empty())) if (weights && (!hitsWeights.empty()))
{ {
mtx->lock();
for (size_t j = 0; j < locations.size(); j++) for (size_t j = 0; j < locations.size(); j++)
{ {
weights->push_back(hitsWeights[j]); weights->push_back(hitsWeights[j]);
} }
} mtx->unlock();
}
} }
} }
...@@ -1003,9 +1011,10 @@ struct HOGInvoker ...@@ -1003,9 +1011,10 @@ struct HOGInvoker
Size winStride; Size winStride;
Size padding; Size padding;
const double* levelScale; const double* levelScale;
ConcurrentRectVector* vec; std::vector<Rect>* vec;
ConcurrentDoubleVector* weights; std::vector<double>* weights;
ConcurrentDoubleVector* scales; std::vector<double>* scales;
Mutex* mtx;
}; };
...@@ -1030,13 +1039,14 @@ void HOGDescriptor::detectMultiScale( ...@@ -1030,13 +1039,14 @@ void HOGDescriptor::detectMultiScale(
levels = std::max(levels, 1); levels = std::max(levels, 1);
levelScale.resize(levels); levelScale.resize(levels);
ConcurrentRectVector allCandidates; std::vector<Rect> allCandidates;
ConcurrentDoubleVector tempScales; std::vector<double> tempScales;
ConcurrentDoubleVector tempWeights; std::vector<double> tempWeights;
vector<double> foundScales; std::vector<double> foundScales;
Mutex mtx;
parallel_for(BlockedRange(0, (int)levelScale.size()),
HOGInvoker(this, img, hitThreshold, winStride, padding, &levelScale[0], &allCandidates, &tempWeights, &tempScales)); parallel_for_(Range(0, (int)levelScale.size()),
HOGInvoker(this, img, hitThreshold, winStride, padding, &levelScale[0], &allCandidates, &mtx, &tempWeights, &tempScales));
std::copy(tempScales.begin(), tempScales.end(), back_inserter(foundScales)); std::copy(tempScales.begin(), tempScales.end(), back_inserter(foundScales));
foundLocations.clear(); foundLocations.clear();
...@@ -2382,12 +2392,13 @@ vector<float> HOGDescriptor::getDaimlerPeopleDetector() ...@@ -2382,12 +2392,13 @@ vector<float> HOGDescriptor::getDaimlerPeopleDetector()
return vector<float>(detector, detector + sizeof(detector)/sizeof(detector[0])); return vector<float>(detector, detector + sizeof(detector)/sizeof(detector[0]));
} }
struct HOGConfInvoker class HOGConfInvoker : public ParallelLoopBody
{ {
public:
HOGConfInvoker( const HOGDescriptor* _hog, const Mat& _img, HOGConfInvoker( const HOGDescriptor* _hog, const Mat& _img,
double _hitThreshold, Size _padding, double _hitThreshold, Size _padding,
std::vector<DetectionROI>* locs, std::vector<DetectionROI>* locs,
ConcurrentRectVector* _vec ) std::vector<Rect>* _vec, Mutex* _mtx )
{ {
hog = _hog; hog = _hog;
img = _img; img = _img;
...@@ -2395,11 +2406,12 @@ struct HOGConfInvoker ...@@ -2395,11 +2406,12 @@ struct HOGConfInvoker
padding = _padding; padding = _padding;
locations = locs; locations = locs;
vec = _vec; vec = _vec;
mtx = _mtx;
} }
void operator()( const BlockedRange& range ) const void operator()( const Range& range ) const
{ {
int i, i1 = range.begin(), i2 = range.end(); int i, i1 = range.start, i2 = range.end;
Size maxSz(cvCeil(img.cols/(*locations)[0].scale), cvCeil(img.rows/(*locations)[0].scale)); Size maxSz(cvCeil(img.cols/(*locations)[0].scale), cvCeil(img.rows/(*locations)[0].scale));
Mat smallerImgBuf(maxSz, img.type()); Mat smallerImgBuf(maxSz, img.type());
...@@ -2419,10 +2431,14 @@ struct HOGConfInvoker ...@@ -2419,10 +2431,14 @@ struct HOGConfInvoker
hog->detectROI(smallerImg, (*locations)[i].locations, dets, (*locations)[i].confidences, hitThreshold, Size(), padding); hog->detectROI(smallerImg, (*locations)[i].locations, dets, (*locations)[i].confidences, hitThreshold, Size(), padding);
Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale)); Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale));
mtx->lock();
for( size_t j = 0; j < dets.size(); j++ ) for( size_t j = 0; j < dets.size(); j++ )
{
vec->push_back(Rect(cvRound(dets[j].x*scale), vec->push_back(Rect(cvRound(dets[j].x*scale),
cvRound(dets[j].y*scale), cvRound(dets[j].y*scale),
scaledWinSize.width, scaledWinSize.height)); scaledWinSize.width, scaledWinSize.height));
}
mtx->unlock();
} }
} }
...@@ -2431,7 +2447,8 @@ struct HOGConfInvoker ...@@ -2431,7 +2447,8 @@ struct HOGConfInvoker
double hitThreshold; double hitThreshold;
std::vector<DetectionROI>* locations; std::vector<DetectionROI>* locations;
Size padding; Size padding;
ConcurrentRectVector* vec; std::vector<Rect>* vec;
Mutex* mtx;
}; };
void HOGDescriptor::detectROI(const cv::Mat& img, const vector<cv::Point> &locations, void HOGDescriptor::detectROI(const cv::Mat& img, const vector<cv::Point> &locations,
...@@ -2516,10 +2533,11 @@ void HOGDescriptor::detectMultiScaleROI(const cv::Mat& img, ...@@ -2516,10 +2533,11 @@ void HOGDescriptor::detectMultiScaleROI(const cv::Mat& img,
double hitThreshold, double hitThreshold,
int groupThreshold) const int groupThreshold) const
{ {
ConcurrentRectVector allCandidates; std::vector<Rect> allCandidates;
Mutex mtx;
parallel_for(BlockedRange(0, (int)locations.size()), parallel_for_(Range(0, (int)locations.size()),
HOGConfInvoker(this, img, hitThreshold, Size(8, 8), &locations, &allCandidates)); HOGConfInvoker(this, img, hitThreshold, Size(8, 8), &locations, &allCandidates, &mtx));
foundLocations.resize(allCandidates.size()); foundLocations.resize(allCandidates.size());
std::copy(allCandidates.begin(), allCandidates.end(), foundLocations.begin()); std::copy(allCandidates.begin(), allCandidates.end(), foundLocations.begin());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment