Commit 98d7d992 authored by Daniil Osokin's avatar Daniil Osokin Committed by Andrey Kamaev

Add threaded version of equalizeHist

parent 0bbba847
...@@ -2404,90 +2404,108 @@ cvCalcProbDensity( const CvHistogram* hist, const CvHistogram* hist_mask, ...@@ -2404,90 +2404,108 @@ cvCalcProbDensity( const CvHistogram* hist, const CvHistogram* hist_mask,
} }
} }
class EqualizeHistCalcHist_Invoker
CV_IMPL void cvEqualizeHist( const CvArr* srcarr, CvArr* dstarr )
{ {
cv::equalizeHist(cv::cvarrToMat(srcarr), cv::cvarrToMat(dstarr)); public:
} enum {HIST_SZ = 256};
void cv::equalizeHist( InputArray _src, OutputArray _dst ) #ifdef HAVE_TBB
{ typedef tbb::mutex* MutextPtr;
Mat src = _src.getMat(); #else
CV_Assert( src.type() == CV_8UC1 ); typedef void* MutextPtr;
#endif
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
if(src.empty()) EqualizeHistCalcHist_Invoker(cv::Mat& src, int* histogram, MutextPtr histogramLock)
return; : src_(src), globalHistogram_(histogram), histogramLock_(histogramLock)
{ }
const int hist_sz = (1 << (8*sizeof(uchar))); void operator()( const cv::BlockedRange& rowRange ) const
int hist[hist_sz] = {0,}; {
int localHistogram[HIST_SZ] = {0, };
const size_t sstep = src.step; const size_t sstep = src_.step;
const size_t dstep = dst.step;
int width = src.cols; int width = src_.cols;
int height = src.rows; int height = rowRange.end() - rowRange.begin();
if (src.isContinuous()) if (src_.isContinuous())
{ {
width *= height; width *= height;
height = 1; height = 1;
} }
for (const uchar* ptr = src.ptr<uchar>(); height--; ptr += sstep) for (const uchar* ptr = src_.ptr<uchar>(rowRange.begin()); height--; ptr += sstep)
{ {
int x = 0; int x = 0;
for (; x <= width - 4; x += 4) for (; x <= width - 4; x += 4)
{ {
int t0 = ptr[x], t1 = ptr[x+1]; int t0 = ptr[x], t1 = ptr[x+1];
hist[t0]++; hist[t1]++; localHistogram[t0]++; localHistogram[t1]++;
t0 = ptr[x+2]; t1 = ptr[x+3]; t0 = ptr[x+2]; t1 = ptr[x+3];
hist[t0]++; hist[t1]++; localHistogram[t0]++; localHistogram[t1]++;
} }
for (; x < width; ++x, ++ptr) for (; x < width; ++x, ++ptr)
hist[ptr[x]]++; localHistogram[ptr[x]]++;
} }
int i = 0; #ifdef HAVE_TBB
while (!hist[i]) ++i; tbb::mutex::scoped_lock lock(*histogramLock_);
#endif
int total = (int)src.total(); for( int i = 0; i < HIST_SZ; i++ )
if (hist[i] == total) globalHistogram_[i] += localHistogram[i];
}
static bool isWorthParallel( const cv::Mat& src )
{ {
dst.setTo(i); #ifdef HAVE_TBB
return; return ( src.total() >= 640*480 );
#else
(void)src;
return false;
#endif
} }
float scale = (hist_sz - 1.f)/(total - hist[i]); private:
int sum = 0; EqualizeHistCalcHist_Invoker& operator=(const EqualizeHistCalcHist_Invoker&);
int lut[hist_sz]; cv::Mat& src_;
int* globalHistogram_;
MutextPtr histogramLock_;
};
for (lut[i++] = 0; i < hist_sz; ++i) class EqualizeHistLut_Invoker
{
public:
EqualizeHistLut_Invoker( cv::Mat& src, cv::Mat& dst, int* lut )
: src_(src),
dst_(dst),
lut_(lut)
{ }
void operator()( const cv::BlockedRange& rowRange ) const
{ {
sum += hist[i]; const size_t sstep = src_.step;
lut[i] = saturate_cast<uchar>(sum * scale); const size_t dstep = dst_.step;
}
int cols = src.cols; int width = src_.cols;
int rows = src.rows; int height = rowRange.end() - rowRange.begin();
int* lut = lut_;
if (src.isContinuous() && dst.isContinuous()) if (src_.isContinuous() && dst_.isContinuous())
{ {
cols *= rows; width *= height;
rows = 1; height = 1;
} }
const uchar* sptr = src.ptr<uchar>(); const uchar* sptr = src_.ptr<uchar>(rowRange.begin());
uchar* dptr = dst.ptr<uchar>(); uchar* dptr = dst_.ptr<uchar>(rowRange.begin());
for (; rows--; sptr += sstep, dptr += dstep) for (; height--; sptr += sstep, dptr += dstep)
{ {
int x = 0; int x = 0;
for (; x <= cols - 4; x += 4) for (; x <= width - 4; x += 4)
{ {
int v0 = sptr[x]; int v0 = sptr[x];
int v1 = sptr[x+1]; int v1 = sptr[x+1];
...@@ -2504,9 +2522,88 @@ void cv::equalizeHist( InputArray _src, OutputArray _dst ) ...@@ -2504,9 +2522,88 @@ void cv::equalizeHist( InputArray _src, OutputArray _dst )
dptr[x+3] = (uchar)x1; dptr[x+3] = (uchar)x1;
} }
for (; x < cols; ++x) for (; x < width; ++x)
dptr[x] = (uchar)lut[sptr[x]]; dptr[x] = (uchar)lut[sptr[x]];
} }
}
static bool isWorthParallel( const cv::Mat& src )
{
#ifdef HAVE_TBB
return ( src.total() >= 640*480 );
#else
(void)src;
return false;
#endif
}
private:
EqualizeHistLut_Invoker& operator=(const EqualizeHistLut_Invoker&);
cv::Mat& src_;
cv::Mat& dst_;
int* lut_;
};
CV_IMPL void cvEqualizeHist( const CvArr* srcarr, CvArr* dstarr )
{
cv::equalizeHist(cv::cvarrToMat(srcarr), cv::cvarrToMat(dstarr));
}
void cv::equalizeHist( InputArray _src, OutputArray _dst )
{
Mat src = _src.getMat();
CV_Assert( src.type() == CV_8UC1 );
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
if(src.empty())
return;
#ifdef HAVE_TBB
tbb::mutex histogramLockInstance;
EqualizeHistCalcHist_Invoker::MutextPtr histogramLock = &histogramLockInstance;
#else
EqualizeHistCalcHist_Invoker::MutextPtr histogramLock = 0;
#endif
const int hist_sz = EqualizeHistCalcHist_Invoker::HIST_SZ;
int hist[hist_sz] = {0,};
int lut[hist_sz];
EqualizeHistCalcHist_Invoker calcBody(src, hist, histogramLock);
EqualizeHistLut_Invoker lutBody(src, dst, lut);
cv::BlockedRange heightRange(0, src.rows);
if(EqualizeHistCalcHist_Invoker::isWorthParallel(src))
parallel_for(heightRange, calcBody);
else
calcBody(heightRange);
int i = 0;
while (!hist[i]) ++i;
int total = (int)src.total();
if (hist[i] == total)
{
dst.setTo(i);
return;
}
float scale = (hist_sz - 1.f)/(total - hist[i]);
int sum = 0;
for (lut[i++] = 0; i < hist_sz; ++i)
{
sum += hist[i];
lut[i] = saturate_cast<uchar>(sum * scale);
}
if(EqualizeHistLut_Invoker::isWorthParallel(src))
parallel_for(heightRange, lutBody);
else
lutBody(heightRange);
} }
/* Implementation of RTTI and Generic Functions for CvHistogram */ /* Implementation of RTTI and Generic Functions for CvHistogram */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment