Commit 74c87a26 authored by Marina Noskova's avatar Marina Noskova

Delete function areClassesEmpty().

parent 068677ad
...@@ -1507,7 +1507,7 @@ public: ...@@ -1507,7 +1507,7 @@ public:
SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach, SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach,
as presented in @cite bottou2010large. as presented in @cite bottou2010large.
The classifier has 5 parameters. These are The classifier has following parameters:
- model type, - model type,
- margin type, - margin type,
- margin regularization (\f$\lambda\f$), - margin regularization (\f$\lambda\f$),
...@@ -1567,11 +1567,8 @@ To use SVMSGD algorithm do as follows: ...@@ -1567,11 +1567,8 @@ To use SVMSGD algorithm do as follows:
// Create empty object // Create empty object
cv::Ptr<SVMSGD> svmsgd = SVMSGD::create(); cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();
// Set parameters
svmsgd->setOptimalParameters();
// Train the Stochastic Gradient Descent SVM // Train the Stochastic Gradient Descent SVM
SvmSgd->train(trainData); svmsgd->train(trainData);
// Predict labels for the new samples // Predict labels for the new samples
svmsgd->predict(samples, responses); svmsgd->predict(samples, responses);
......
...@@ -99,8 +99,6 @@ public: ...@@ -99,8 +99,6 @@ public:
private: private:
void updateWeights(InputArray sample, bool isPositive, float stepSize, Mat &weights); void updateWeights(InputArray sample, bool isPositive, float stepSize, Mat &weights);
std::pair<bool,bool> areClassesEmpty(Mat responses);
void writeParams( FileStorage &fs ) const; void writeParams( FileStorage &fs ) const;
void readParams( const FileNode &fn ); void readParams( const FileNode &fn );
...@@ -138,26 +136,6 @@ Ptr<SVMSGD> SVMSGD::create() ...@@ -138,26 +136,6 @@ Ptr<SVMSGD> SVMSGD::create()
return makePtr<SVMSGDImpl>(); return makePtr<SVMSGDImpl>();
} }
std::pair<bool,bool> SVMSGDImpl::areClassesEmpty(Mat responses)
{
CV_Assert(responses.cols == 1 || responses.rows == 1);
std::pair<bool,bool> emptyInClasses(true, true);
int limitIndex = responses.rows;
for(int index = 0; index < limitIndex; index++)
{
if (isPositive(responses.at<float>(index)))
emptyInClasses.first = false;
else
emptyInClasses.second = false;
if (!emptyInClasses.first && ! emptyInClasses.second)
break;
}
return emptyInClasses;
}
void SVMSGDImpl::normalizeSamples(Mat &samples, Mat &average, float &multiplier) void SVMSGDImpl::normalizeSamples(Mat &samples, Mat &average, float &multiplier)
{ {
int featuresCount = samples.cols; int featuresCount = samples.cols;
...@@ -248,16 +226,20 @@ bool SVMSGDImpl::train(const Ptr<TrainData>& data, int) ...@@ -248,16 +226,20 @@ bool SVMSGDImpl::train(const Ptr<TrainData>& data, int)
int featureCount = trainSamples.cols; int featureCount = trainSamples.cols;
Mat trainResponses = data->getTrainResponses(); // (trainSamplesCount x 1) matrix Mat trainResponses = data->getTrainResponses(); // (trainSamplesCount x 1) matrix
std::pair<bool,bool> areEmpty = areClassesEmpty(trainResponses); CV_Assert(trainResponses.rows == trainSamples.rows);
if ( areEmpty.first && areEmpty.second ) if (trainResponses.empty())
{ {
return false; return false;
} }
if ( areEmpty.first || areEmpty.second )
int positiveCount = countNonZero(trainResponses >= 0);
int negativeCount = countNonZero(trainResponses < 0);
if ( positiveCount <= 0 || negativeCount <= 0 )
{ {
weights_ = Mat::zeros(1, featureCount, CV_32F); weights_ = Mat::zeros(1, featureCount, CV_32F);
shift_ = areEmpty.first ? -1.f : 1.f; shift_ = (positiveCount > 0) ? 1.f : -1.f;
return true; return true;
} }
...@@ -340,7 +322,7 @@ float SVMSGDImpl::predict( InputArray _samples, OutputArray _results, int ) cons ...@@ -340,7 +322,7 @@ float SVMSGDImpl::predict( InputArray _samples, OutputArray _results, int ) cons
int nSamples = samples.rows; int nSamples = samples.rows;
cv::Mat results; cv::Mat results;
CV_Assert( samples.cols == weights_.cols && samples.type() == CV_32F ); CV_Assert( samples.cols == weights_.cols && samples.type() == CV_32FC1);
if( _results.needed() ) if( _results.needed() )
{ {
...@@ -498,17 +480,7 @@ void SVMSGDImpl::clear() ...@@ -498,17 +480,7 @@ void SVMSGDImpl::clear()
SVMSGDImpl::SVMSGDImpl() SVMSGDImpl::SVMSGDImpl()
{ {
clear(); clear();
setOptimalParameters();
params.svmsgdType = -1;
params.marginType = -1;
// Parameters for learning
params.marginRegularization = 0; // regularization
params.initialStepSize = 0; // learning rate (ideally should be large at beginning and decay each iteration)
params.stepDecreasingPower = 0;
TermCriteria _termCrit(TermCriteria::COUNT + TermCriteria::EPS, 0, 0);
params.termCrit = _termCrit;
} }
void SVMSGDImpl::setOptimalParameters(int svmsgdType, int marginType) void SVMSGDImpl::setOptimalParameters(int svmsgdType, int marginType)
......
...@@ -182,8 +182,6 @@ void CV_SVMSGDTrainTest::run( int /*start_from*/ ) ...@@ -182,8 +182,6 @@ void CV_SVMSGDTrainTest::run( int /*start_from*/ )
{ {
cv::Ptr<SVMSGD> svmsgd = SVMSGD::create(); cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();
svmsgd->setOptimalParameters();
svmsgd->train(data); svmsgd->train(data);
Mat responses; Mat responses;
......
...@@ -46,7 +46,6 @@ void addPointRetrainAndRedraw(Data &data, int x, int y, int response); ...@@ -46,7 +46,6 @@ void addPointRetrainAndRedraw(Data &data, int x, int y, int response);
bool doTrain( const Mat samples, const Mat responses, Mat &weights, float &shift) bool doTrain( const Mat samples, const Mat responses, Mat &weights, float &shift)
{ {
cv::Ptr<SVMSGD> svmsgd = SVMSGD::create(); cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();
svmsgd->setOptimalParameters();
cv::Ptr<TrainData> trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses); cv::Ptr<TrainData> trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses);
svmsgd->train( trainData ); svmsgd->train( trainData );
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment