Commit f41f633d authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

fixed various warnings from the “doc” and other builders

parent a1784b73
......@@ -426,7 +426,7 @@ public:
int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5);
i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); set(i, a11); return *this;
}
template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,
typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,
typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12>
......@@ -439,7 +439,7 @@ public:
i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11);
set(i, a12); return *this;
}
template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,
typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,
typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12,
......@@ -453,7 +453,7 @@ public:
i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11);
i = set(i, a12); set(i, a13); return *this;
}
template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,
typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,
typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12,
......@@ -467,7 +467,7 @@ public:
i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11);
i = set(i, a12); i = set(i, a13); set(i, a14); return *this;
}
template<typename _Tp0, typename _Tp1, typename _Tp2, typename _Tp3,
typename _Tp4, typename _Tp5, typename _Tp6, typename _Tp7,
typename _Tp8, typename _Tp9, typename _Tp10, typename _Tp11, typename _Tp12,
......
......@@ -1075,7 +1075,7 @@ CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
Size ksize, Point anchor = Point(-1,-1),
bool normalize = true,
int borderType = BORDER_DEFAULT );
CV_EXPORTS_W void sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor = Point(-1, -1),
bool normalize = true,
......
......@@ -910,13 +910,13 @@ template<typename T, typename ST> struct SqrRowSum : public BaseRowFilter
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
const T* S = (const T*)src;
ST* D = (ST*)dst;
int i = 0, k, ksz_cn = ksize*cn;
width = (width - 1)*cn;
for( k = 0; k < cn; k++, S++, D++ )
{
......@@ -941,10 +941,10 @@ static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize
{
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
if( anchor < 0 )
anchor = ksize/2;
if( sdepth == CV_8U && ddepth == CV_32S )
return makePtr<SqrRowSum<uchar, int> >(ksize, anchor);
if( sdepth == CV_8U && ddepth == CV_64F )
......@@ -957,11 +957,11 @@ static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize
return makePtr<SqrRowSum<float, double> >(ksize, anchor);
if( sdepth == CV_64F && ddepth == CV_64F )
return makePtr<SqrRowSum<double, double> >(ksize, anchor);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, sumType));
return Ptr<BaseRowFilter>();
}
......@@ -984,19 +984,19 @@ void cv::sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
if( src.cols == 1 )
ksize.width = 1;
}
int sumType = CV_64F;
if( sdepth == CV_8U )
sumType = CV_32S;
sumType = CV_MAKETYPE( sumType, cn );
int srcType = CV_MAKETYPE(sdepth, cn);
int dstType = CV_MAKETYPE(ddepth, cn);
Ptr<BaseRowFilter> rowFilter = getSqrRowSumFilter(srcType, sumType, ksize.width, anchor.x );
Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
dstType, ksize.height, anchor.y,
normalize ? 1./(ksize.width*ksize.height) : 1);
Ptr<FilterEngine> f = makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
srcType, dstType, sumType, borderType );
f->apply( src, dst );
......
......@@ -86,11 +86,13 @@ FeatureEvaluator::setImage
------------------------------
Assigns an image to feature evaluator.
.. ocv:function:: bool FeatureEvaluator::setImage(const Mat& img, Size origWinSize)
.. ocv:function:: bool FeatureEvaluator::setImage(InputArray img, Size origWinSize, Size sumSize)
:param img: Matrix of the type ``CV_8UC1`` containing an image where the features are computed.
:param img: Matrix of the type ``CV_8UC1`` containing an image where the features are computed.
:param origWinSize: Size of training images.
:param sumSize: The requested size of integral images (so if the integral image is smaller, it resides in the top-left corner of the larger image of requested size). Because the features are represented using offsets from the image origin, using the same sumSize for all scales helps to avoid constant readjustments of the features to different scales.
The method assigns an image, where the features will be computed, to the feature evaluator.
......
This diff is collapsed.
......@@ -49,14 +49,14 @@ public:
protected:
enum { SUM_ALIGN = 64 };
bool detectSingleScale( InputArray image, Size processingRectSize,
int yStep, double factor, std::vector<Rect>& candidates,
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
Size sumSize0, bool outputRejectLevels = false );
bool ocl_detectSingleScale( InputArray image, Size processingRectSize,
int yStep, double factor, Size sumSize0 );
void detectMultiScaleNoGrouping( InputArray image, std::vector<Rect>& candidates,
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
......@@ -109,19 +109,19 @@ protected:
int ntrees;
float threshold;
};
struct Stump
{
Stump() {};
Stump(int _featureIdx, float _threshold, float _left, float _right)
: featureIdx(_featureIdx), threshold(_threshold), left(_left), right(_right) {}
int featureIdx;
float threshold;
float left;
float right;
};
Data();
bool read(const FileNode &node);
......@@ -151,7 +151,7 @@ protected:
UMat ufacepos, ustages, ustumps, usubsets;
ocl::Kernel cascadeKernel;
bool tryOpenCL;
Mutex mtx;
};
......@@ -240,7 +240,7 @@ protected:
#define CALC_SUM_OFS_(p0, p1, p2, p3, ptr) \
((ptr)[p0] - (ptr)[p1] - (ptr)[p2] + (ptr)[p3])
#define CALC_SUM_OFS(rect, ptr) CALC_SUM_OFS_((rect)[0], (rect)[1], (rect)[2], (rect)[3], ptr)
//---------------------------------------------- HaarEvaluator ---------------------------------------
......@@ -343,7 +343,7 @@ inline void HaarEvaluator::OptFeature :: setOffsets( const Feature& _f, int step
weight[0] = _f.rect[0].weight;
weight[1] = _f.rect[1].weight;
weight[2] = _f.rect[2].weight;
Rect r2 = weight[2] > 0 ? _f.rect[2].r : Rect(0,0,0,0);
if (_f.tilted)
{
......@@ -615,7 +615,7 @@ inline int predictOrderedStump( CascadeClassifierImpl& cascade,
int nstages = (int)cascade.data.stages.size();
double tmp = 0;
for( int stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
const CascadeClassifierImpl::Data::Stage& stage = cascadeStages[stageIdx];
......@@ -677,7 +677,7 @@ inline int predictCategoricalStump( CascadeClassifierImpl& cascade,
sum = (double)tmp;
return -si;
}
cascadeStumps += ntrees;
cascadeSubsets += ntrees*subsetSize;
}
......
......@@ -121,16 +121,15 @@ static bool convert(const String& oldcascade, const String& newcascade)
FileNode sznode = oldroot[ICV_HAAR_SIZE_NAME];
if( sznode.empty() )
return false;
int maxdepth = 0;
Size cascadesize;
cascadesize.width = (int)sznode[0];
cascadesize.height = (int)sznode[1];
std::vector<HaarFeature> features;
size_t i, j, k, n;
int i, j, k, n;
FileNode stages_seq = oldroot[ICV_HAAR_STAGES_NAME];
size_t nstages = stages_seq.size();
int nstages = (int)stages_seq.size();
std::vector<HaarStageClassifier> stages(nstages);
for( i = 0; i < nstages; i++ )
......@@ -139,14 +138,14 @@ static bool convert(const String& oldcascade, const String& newcascade)
HaarStageClassifier& stage = stages[i];
stage.threshold = (double)stagenode[ICV_HAAR_STAGE_THRESHOLD_NAME];
FileNode weaks_seq = stagenode[ICV_HAAR_TREES_NAME];
size_t nweaks = weaks_seq.size();
int nweaks = (int)weaks_seq.size();
stage.weaks.resize(nweaks);
for( j = 0; j < nweaks; j++ )
{
HaarClassifier& weak = stage.weaks[j];
FileNode weaknode = weaks_seq[j];
size_t nnodes = weaknode.size();
int nnodes = (int)weaknode.size();
for( n = 0; n < nnodes; n++ )
{
......@@ -157,7 +156,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
node.f = (int)features.size();
f.tilted = (int)fnode[ICV_HAAR_TILTED_NAME] != 0;
FileNode rects_seq = fnode[ICV_HAAR_RECTS_NAME];
size_t nrects = rects_seq.size();
int nrects = (int)rects_seq.size();
for( k = 0; k < nrects; k++ )
{
......@@ -199,9 +198,9 @@ static bool convert(const String& oldcascade, const String& newcascade)
if( !newfs.isOpened() )
return false;
size_t maxWeakCount = 0, nfeatures = features.size();
int maxWeakCount = 0, nfeatures = (int)features.size();
for( i = 0; i < nstages; i++ )
maxWeakCount = std::max(maxWeakCount, stages[i].weaks.size());
maxWeakCount = std::max(maxWeakCount, (int)stages[i].weaks.size());
newfs << "cascade" << "{:opencv-cascade-classifier"
<< "stageType" << "BOOST"
......@@ -219,7 +218,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
for( i = 0; i < nstages; i++ )
{
size_t nweaks = stages[i].weaks.size();
int nweaks = (int)stages[i].weaks.size();
newfs << "{" << "maxWeakCount" << (int)nweaks
<< "stageThreshold" << stages[i].threshold
<< "weakClassifiers" << "[";
......@@ -227,7 +226,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
{
const HaarClassifier& c = stages[i].weaks[j];
newfs << "{" << "internalNodes" << "[";
size_t nnodes = c.nodes.size(), nleaves = c.leaves.size();
int nnodes = (int)c.nodes.size(), nleaves = (int)c.leaves.size();
for( k = 0; k < nnodes; k++ )
newfs << c.nodes[k].left << c.nodes[k].right
<< c.nodes[k].f << c.nodes[k].threshold;
......@@ -246,7 +245,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
{
const HaarFeature& f = features[i];
newfs << "{" << "rects" << "[";
for( j = 0; j < (size_t)HaarFeature::RECT_NUM; j++ )
for( j = 0; j < HaarFeature::RECT_NUM; j++ )
{
if( j >= 2 && fabs(f.rect[j].weight) < FLT_EPSILON )
break;
......
......@@ -44,7 +44,7 @@ __kernel void runHaarClassifierStump(
int iy = get_global_id(1)*xyscale;
sumstep /= sizeof(int);
sqsumstep /= sizeof(int);
if( ix < imgsize.x && iy < imgsize.y )
{
int ntrees;
......@@ -52,7 +52,7 @@ __kernel void runHaarClassifierStump(
float s = 0.f;
__global const Stump* stump = stumps;
__global const OptFeature* f;
__global const int* psum = sum + mad24(iy, sumstep, ix);
__global const int* pnsum = psum + mad24(normrect.y, sumstep, normrect.x);
int normarea = normrect.z * normrect.w;
......@@ -64,7 +64,7 @@ __kernel void runHaarClassifierStump(
float4 weight, vsval;
int4 ofs, ofs0, ofs1, ofs2;
nf = nf > 0 ? nf : 1.f;
for( stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
ntrees = stages[stageIdx].ntrees;
......@@ -73,7 +73,7 @@ __kernel void runHaarClassifierStump(
{
f = optfeatures + stump->featureIdx;
weight = f->weight;
ofs = f->ofs[0];
sval = (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.x;
ofs = f->ofs[1];
......@@ -83,14 +83,14 @@ __kernel void runHaarClassifierStump(
ofs = f->ofs[2];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.z;
}
s += (sval < stump->threshold*nf) ? stump->left : stump->right;
}
if( s < stages[stageIdx].threshold )
break;
}
if( stageIdx == nstages )
{
int nfaces = atomic_inc(facepos);
......@@ -128,7 +128,7 @@ __kernel void runLBPClassifierStump(
int iy = get_global_id(1)*xyscale;
sumstep /= sizeof(int);
sqsumstep /= sizeof(int);
if( ix < imgsize.x && iy < imgsize.y )
{
int ntrees;
......@@ -137,7 +137,7 @@ __kernel void runLBPClassifierStump(
__global const Stump* stump = stumps;
__global const int* bitset = bitsets;
__global const OptFeature* f;
__global const int* psum = sum + mad24(iy, sumstep, ix);
__global const int* pnsum = psum + mad24(normrect.y, sumstep, normrect.x);
int normarea = normrect.z * normrect.w;
......@@ -149,7 +149,7 @@ __kernel void runLBPClassifierStump(
float4 weight;
int4 ofs;
nf = nf > 0 ? nf : 1.f;
for( stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
ntrees = stages[stageIdx].ntrees;
......@@ -157,17 +157,17 @@ __kernel void runLBPClassifierStump(
for( i = 0; i < ntrees; i++, stump++, bitset += bitsetSize )
{
f = optfeatures + stump->featureIdx;
weight = f->weight;
// compute LBP feature to val
s += (bitset[val >> 5] & (1 << (val & 31))) ? stump->left : stump->right;
}
if( s < stages[stageIdx].threshold )
break;
}
if( stageIdx == nstages )
{
int nfaces = atomic_inc(facepos);
......@@ -183,4 +183,3 @@ __kernel void runLBPClassifierStump(
}
}
#endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment