Commit eeb786fc authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

fixed compile warnings from MSVC; fixed warnings in Python bindings; added some debugging code

parent 9cb79b9a
...@@ -205,7 +205,7 @@ public: ...@@ -205,7 +205,7 @@ public:
class CV_EXPORTS_W NormalBayesClassifier : public StatModel class CV_EXPORTS_W NormalBayesClassifier : public StatModel
{ {
public: public:
class CV_EXPORTS_W_MAP Params class CV_EXPORTS_W Params
{ {
public: public:
Params(); Params();
...@@ -231,8 +231,8 @@ public: ...@@ -231,8 +231,8 @@ public:
public: public:
Params(int defaultK=10, bool isclassifier=true); Params(int defaultK=10, bool isclassifier=true);
int defaultK; CV_PROP_RW int defaultK;
bool isclassifier; CV_PROP_RW bool isclassifier;
}; };
virtual void setParams(const Params& p) = 0; virtual void setParams(const Params& p) = 0;
virtual Params getParams() const = 0; virtual Params getParams() const = 0;
...@@ -328,9 +328,9 @@ public: ...@@ -328,9 +328,9 @@ public:
explicit Params(int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, explicit Params(int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, 1e-6)); EM::DEFAULT_MAX_ITERS, 1e-6));
int nclusters; CV_PROP_RW int nclusters;
int covMatType; CV_PROP_RW int covMatType;
TermCriteria termCrit; CV_PROP_RW TermCriteria termCrit;
}; };
virtual void setParams(const Params& p) = 0; virtual void setParams(const Params& p) = 0;
......
...@@ -123,7 +123,7 @@ public: ...@@ -123,7 +123,7 @@ public:
void clear() void clear()
{ {
min_val = max_val = min_val1 = max_val1 = 0.; min_val = max_val = min_val1 = max_val1 = 0.;
rng = RNG(-1); rng = RNG((uint64)-1);
weights.clear(); weights.clear();
trained = false; trained = false;
} }
...@@ -300,7 +300,7 @@ public: ...@@ -300,7 +300,7 @@ public:
{ {
int maxIdx[] = {0, 0}; int maxIdx[] = {0, 0};
minMaxIdx(outputs, 0, 0, 0, maxIdx); minMaxIdx(outputs, 0, 0, 0, maxIdx);
return maxIdx[0] + maxIdx[1]; return (float)(maxIdx[0] + maxIdx[1]);
} }
return 0.f; return 0.f;
...@@ -702,9 +702,8 @@ public: ...@@ -702,9 +702,8 @@ public:
train_backprop( inputs, outputs, sw, termcrit ) : train_backprop( inputs, outputs, sw, termcrit ) :
train_rprop( inputs, outputs, sw, termcrit ); train_rprop( inputs, outputs, sw, termcrit );
trained = true; trained = iter > 0;
return trained;
return iter;
} }
int train_backprop( const Mat& inputs, const Mat& outputs, const Mat& _sw, TermCriteria termCrit ) int train_backprop( const Mat& inputs, const Mat& outputs, const Mat& _sw, TermCriteria termCrit )
......
...@@ -220,12 +220,13 @@ public: ...@@ -220,12 +220,13 @@ public:
void updateWeightsAndTrim( int treeidx, vector<int>& sidx ) void updateWeightsAndTrim( int treeidx, vector<int>& sidx )
{ {
putchar('<');
int i, n = (int)w->sidx.size(); int i, n = (int)w->sidx.size();
int nvars = (int)varIdx.size(); int nvars = (int)varIdx.size();
double sumw = 0., C = 1.; double sumw = 0., C = 1.;
cv::AutoBuffer<double> buf(n*3 + nvars); cv::AutoBuffer<double> buf(n + nvars);
double* result = buf; double* result = buf;
float* sbuf = (float*)(result + n*3); float* sbuf = (float*)(result + n);
Mat sample(1, nvars, CV_32F, sbuf); Mat sample(1, nvars, CV_32F, sbuf);
int predictFlags = bparams.boostType == Boost::DISCRETE ? (PREDICT_MAX_VOTE | RAW_OUTPUT) : PREDICT_SUM; int predictFlags = bparams.boostType == Boost::DISCRETE ? (PREDICT_MAX_VOTE | RAW_OUTPUT) : PREDICT_SUM;
predictFlags |= COMPRESSED_INPUT; predictFlags |= COMPRESSED_INPUT;
...@@ -373,6 +374,7 @@ public: ...@@ -373,6 +374,7 @@ public:
if( w->sample_weights[si] >= threshold ) if( w->sample_weights[si] >= threshold )
sidx.push_back(si); sidx.push_back(si);
} }
putchar('>'); fflush(stdout);
} }
float predictTrees( const Range& range, const Mat& sample, int flags0 ) const float predictTrees( const Range& range, const Mat& sample, int flags0 ) const
......
...@@ -310,7 +310,7 @@ public: ...@@ -310,7 +310,7 @@ public:
varType.create(1, nvars, CV_8U); varType.create(1, nvars, CV_8U);
varType = Scalar::all(VAR_ORDERED); varType = Scalar::all(VAR_ORDERED);
if( noutputvars == 1 ) if( noutputvars == 1 )
varType.at<uchar>(ninputvars) = responses.type() < CV_32F ? VAR_CATEGORICAL : VAR_ORDERED; varType.at<uchar>(ninputvars) = (uchar)(responses.type() < CV_32F ? VAR_CATEGORICAL : VAR_ORDERED);
} }
if( noutputvars > 1 ) if( noutputvars > 1 )
...@@ -558,7 +558,7 @@ public: ...@@ -558,7 +558,7 @@ public:
if( tp == VAR_MISSED ) if( tp == VAR_MISSED )
haveMissed = true; haveMissed = true;
rowvals.push_back(val); rowvals.push_back(val);
rowtypes.push_back(tp); rowtypes.push_back((uchar)tp);
token = strtok(NULL, delimiters); token = strtok(NULL, delimiters);
if (!token) if (!token)
break; break;
...@@ -880,7 +880,7 @@ public: ...@@ -880,7 +880,7 @@ public:
if( s ) if( s )
{ {
j = s[i]; j = s[i];
CV_DbgAssert( 0 <= j && j < nsamples ); CV_Assert( 0 <= j && j < nsamples );
} }
values[i] = src[j*sstep]; values[i] = src[j*sstep];
if( values[i] == MISSED_VAL ) if( values[i] == MISSED_VAL )
...@@ -955,7 +955,7 @@ public: ...@@ -955,7 +955,7 @@ public:
if( vptr ) if( vptr )
{ {
j = vptr[i]; j = vptr[i];
CV_DbgAssert( 0 <= j && j < nvars ); CV_Assert( 0 <= j && j < nvars );
} }
buf[i] = src[j*vstep]; buf[i] = src[j*vstep];
} }
......
...@@ -108,7 +108,7 @@ float StatModel::calcError( const Ptr<TrainData>& data, bool testerr, OutputArra ...@@ -108,7 +108,7 @@ float StatModel::calcError( const Ptr<TrainData>& data, bool testerr, OutputArra
if( _resp.needed() ) if( _resp.needed() )
resp.copyTo(_resp); resp.copyTo(_resp);
return err / n * (isclassifier ? 100 : 1); return (float)(err / n * (isclassifier ? 100 : 1));
} }
void StatModel::save(const String& filename) const void StatModel::save(const String& filename) const
......
...@@ -173,7 +173,7 @@ public: ...@@ -173,7 +173,7 @@ public:
} }
float result = 0.f; float result = 0.f;
float inv_scale = 1./k; float inv_scale = 1.f/k;
for( testidx = 0; testidx < testcount; testidx++ ) for( testidx = 0; testidx < testcount; testidx++ )
{ {
......
...@@ -111,7 +111,7 @@ namespace ml ...@@ -111,7 +111,7 @@ namespace ml
termCrit.type |= TermCriteria::EPS; termCrit.type |= TermCriteria::EPS;
termCrit.epsilon = epsilon; termCrit.epsilon = epsilon;
} }
int iters = (double)fn["iterations"]; int iters = (int)fn["iterations"];
if( iters > 0 ) if( iters > 0 )
{ {
termCrit.type |= TermCriteria::COUNT; termCrit.type |= TermCriteria::COUNT;
...@@ -134,7 +134,7 @@ namespace ml ...@@ -134,7 +134,7 @@ namespace ml
} }
int class_idx; int class_idx;
int Tn; double Tn;
double value; double value;
int parent; int parent;
...@@ -164,7 +164,7 @@ namespace ml ...@@ -164,7 +164,7 @@ namespace ml
} }
int varIdx; int varIdx;
int inversed; bool inversed;
float quality; float quality;
int next; int next;
float c; float c;
...@@ -179,7 +179,7 @@ namespace ml ...@@ -179,7 +179,7 @@ namespace ml
vector<WNode> wnodes; vector<WNode> wnodes;
vector<WSplit> wsplits; vector<WSplit> wsplits;
vector<int> wsubsets; vector<int> wsubsets;
vector<int> cv_Tn; vector<double> cv_Tn;
vector<double> cv_node_risk; vector<double> cv_node_risk;
vector<double> cv_node_error; vector<double> cv_node_error;
vector<int> cv_labels; vector<int> cv_labels;
......
...@@ -90,7 +90,7 @@ public: ...@@ -90,7 +90,7 @@ public:
{ {
DTreesImpl::clear(); DTreesImpl::clear();
oobError = 0.; oobError = 0.;
rng = RNG(-1); rng = RNG((uint64)-1);
} }
const vector<int>& getActiveVars() const vector<int>& getActiveVars()
...@@ -177,7 +177,6 @@ public: ...@@ -177,7 +177,6 @@ public:
for( treeidx = 0; treeidx < ntrees; treeidx++ ) for( treeidx = 0; treeidx < ntrees; treeidx++ )
{ {
putchar('.'); fflush(stdout);
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
oobmask[i] = (uchar)1; oobmask[i] = (uchar)1;
......
...@@ -1587,7 +1587,7 @@ public: ...@@ -1587,7 +1587,7 @@ public:
bool balanced ) bool balanced )
{ {
int svmType = params.svmType; int svmType = params.svmType;
RNG rng(-1); RNG rng((uint64)-1);
if( svmType == ONE_CLASS ) if( svmType == ONE_CLASS )
// current implementation of "auto" svm does not support the 1-class case. // current implementation of "auto" svm does not support the 1-class case.
......
...@@ -730,7 +730,7 @@ DTreesImpl::WSplit DTreesImpl::findSplitOrdClass( int vi, const vector<int>& _si ...@@ -730,7 +730,7 @@ DTreesImpl::WSplit DTreesImpl::findSplitOrdClass( int vi, const vector<int>& _si
{ {
split.varIdx = vi; split.varIdx = vi;
split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f; split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f;
split.inversed = 0; split.inversed = false;
split.quality = (float)best_val; split.quality = (float)best_val;
} }
return split; return split;
...@@ -744,12 +744,12 @@ void DTreesImpl::clusterCategories( const double* vectors, int n, int m, double* ...@@ -744,12 +744,12 @@ void DTreesImpl::clusterCategories( const double* vectors, int n, int m, double*
cv::AutoBuffer<double> buf(n + k); cv::AutoBuffer<double> buf(n + k);
double *v_weights = buf, *c_weights = buf + n; double *v_weights = buf, *c_weights = buf + n;
bool modified = true; bool modified = true;
RNG r(-1); RNG r((uint64)-1);
// assign labels randomly // assign labels randomly
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
{ {
int sum = 0; double sum = 0;
const double* v = vectors + i*m; const double* v = vectors + i*m;
labels[i] = i < k ? i : r.uniform(0, k); labels[i] = i < k ? i : r.uniform(0, k);
...@@ -1063,7 +1063,7 @@ DTreesImpl::WSplit DTreesImpl::findSplitOrdReg( int vi, const vector<int>& _sidx ...@@ -1063,7 +1063,7 @@ DTreesImpl::WSplit DTreesImpl::findSplitOrdReg( int vi, const vector<int>& _sidx
{ {
split.varIdx = vi; split.varIdx = vi;
split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f; split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f;
split.inversed = 0; split.inversed = false;
split.quality = (float)best_val; split.quality = (float)best_val;
} }
return split; return split;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment