Commit d3630d0f authored by Maksim Shabunin's avatar Maksim Shabunin

Cleanup old TBB-related guards

parent 2baed7e3
set(the_description "Object Detection")
#uncomment the following line to enable parallel computing
#add_definitions(-DHAVE_TBB)
ocv_define_module(dpm opencv_core opencv_imgproc opencv_objdetect OPTIONAL opencv_highgui WRAP python)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4512) # disable warning on Win64
......@@ -97,16 +97,6 @@ int main( int argc, char** argv )
return -1;
}
#ifdef HAVE_TBB
cout << "Running with TBB" << endl;
#else
#ifdef _OPENMP
cout << "Running with OpenMP" << endl;
#else
cout << "Running without OpenMP and without TBB" << endl;
#endif
#endif
Mat frame;
namedWindow("DPM Cascade Detection", 1);
// the color of the rectangle
......
......@@ -113,16 +113,6 @@ int main( int argc, char** argv )
if ( !readImageLists(image_list, imgFileList) )
return -1;
#ifdef HAVE_TBB
cout << "Running with TBB" << endl;
#else
#ifdef _OPENMP
cout << "Running with OpenMP" << endl;
#else
cout << "Running without OpenMP and without TBB" << endl;
#endif
#endif
cv::Ptr<DPMDetector> detector = \
DPMDetector::create(vector<string>(1, model_path));
......
......@@ -220,36 +220,12 @@ void DPMCascade::computeRootPCAScores(vector< vector< Mat > > &rootScores)
for (int comp = 0; comp < model.numComponents; comp++)
{
rootScores[comp].resize(nlevels);
#ifdef HAVE_TBB // parallel computing
ParalComputeRootPCAScores paralTask(pcaPyramid, model.rootPCAFilters[comp],
model.pcaDim, rootScores[comp]);
parallel_for_(Range(interval, nlevels), paralTask);
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int level = interval; level < nlevels; level++)
{
Mat feat = pcaPyramid[level];
Mat filter = model.rootPCAFilters[comp];
// compute size of output
int height = feat.rows - filter.rows + 1;
int width = (feat.cols - filter.cols) / model.pcaDim + 1;
if (height < 1 || width < 1)
CV_Error(CV_StsBadArg,
"Invalid input, filter size should be smaller than feature size.");
Mat result = Mat::zeros(Size(width, height), CV_64F);
convolutionEngine.convolve(feat, filter, model.pcaDim, result);
rootScores[comp][level] = result;
}
#endif
}
}
#ifdef HAVE_TBB
ParalComputeRootPCAScores::ParalComputeRootPCAScores(
const vector< Mat > &pcaPyrad,
const Mat &f,
......@@ -279,7 +255,6 @@ void ParalComputeRootPCAScores::operator() (const Range &range) const
scores[level] = result;
}
}
#endif
void DPMCascade::process( vector< vector<double> > &dets)
{
......
......@@ -128,7 +128,6 @@ class DPMCascade
std::vector< std::vector<double> > detect(Mat &image);
};
#ifdef HAVE_TBB
/** @brief This class convolves root PCA feature pyramid
* and root PCA filters in parallel using Intel Threading
* Building Blocks (TBB)
......@@ -151,7 +150,6 @@ class ParalComputeRootPCAScores : public ParallelLoopBody
int pcaDim;
std::vector< Mat > &scores;
};
#endif
} // namespace dpm
} // namespace cv
......
......@@ -58,65 +58,11 @@ Feature::Feature (PyramidParameter p):params(p)
void Feature::computeFeaturePyramid(const Mat &imageM, vector< Mat > &pyramid)
{
#ifdef HAVE_TBB
ParalComputePyramid paralTask(imageM, pyramid, params);
paralTask.initialize();
// perform parallel computing
parallel_for_(Range(0, params.interval), paralTask);
#else
CV_Assert(params.interval > 0);
// scale factor between two levels
params.sfactor = pow(2.0, 1.0/params.interval);
const Size_<double> imSize = imageM.size();
params.maxScale = 1 + (int)floor(log(min(imSize.width, imSize.height)/
(float)(params.binSize*5.0))/log(params.sfactor));
if (params.maxScale < params.interval)
{
CV_Error(CV_StsBadArg, "The image is too small to create a pyramid");
return;
}
pyramid.resize(params.maxScale + params.interval);
params.scales.resize(params.maxScale + params.interval);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < params.interval; i++)
{
const double scale = (double)(1.0f/pow(params.sfactor, i));
Mat imScaled;
resize(imageM, imScaled, imSize * scale);
// First octave at twice the image resolution
computeHOG32D(imScaled, pyramid[i], params.binSize/2,
params.padx + 1, params.pady + 1);
params.scales[i] = 2*scale;
// Second octave at the original resolution
if (i + params.interval <= params.maxScale)
computeHOG32D(imScaled, pyramid[i+params.interval],
params.binSize, params.padx + 1, params.pady + 1);
params.scales[i+params.interval] = scale;
// Remaining octaves
for ( int j = i + params.interval; j < params.maxScale; j += params.interval)
{
Mat imScaled2;
Size_<double> imScaledSize = imScaled.size();
resize(imScaled, imScaled2, imScaledSize*0.5);
imScaled = imScaled2;
computeHOG32D(imScaled2, pyramid[j+params.interval],
params.binSize, params.padx + 1, params.pady + 1);
params.scales[j+params.interval] = params.scales[j]*0.5;
}
}
#endif
}
#ifdef HAVE_TBB
ParalComputePyramid::ParalComputePyramid(const Mat &inputImage, \
vector< Mat > &outputPyramid,\
PyramidParameter &p):
......@@ -177,7 +123,6 @@ void ParalComputePyramid::operator() (const Range &range) const
}
}
}
#endif
void Feature::computeHOG32D(const Mat &imageM, Mat &featM, const int sbin, const int pad_x, const int pad_y)
{
......
......@@ -137,7 +137,6 @@ class Feature
};
#ifdef HAVE_TBB
/** @brief This class computes feature pyramid in parallel
* using Intel Threading Building Blocks (TBB)
*/
......@@ -165,7 +164,6 @@ class ParalComputePyramid : public ParallelLoopBody
// pyramid parameters
PyramidParameter &params;
};
#endif
} // namespace dpm
} // namespace cv
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment