Commit 3e26086f authored by Maksim Shabunin's avatar Maksim Shabunin

Reworked ML logistic regression implementation, initial version

parent 71770eb7
......@@ -571,56 +571,43 @@ public:
/****************************************************************************************\
* Logistic Regression *
\****************************************************************************************/
struct CV_EXPORTS LogisticRegressionParams
{
double alpha;
int num_iters;
int norm;
int regularized;
int train_method;
int mini_batch_size;
cv::TermCriteria term_crit;
LogisticRegressionParams();
LogisticRegressionParams(double learning_rate, int iters, int train_method, int normlization, int reg, int mini_batch_size);
};
class CV_EXPORTS LogisticRegression
class CV_EXPORTS LogisticRegression : public StatModel
{
public:
LogisticRegression( const LogisticRegressionParams& params = LogisticRegressionParams());
LogisticRegression(cv::InputArray data_ip, cv::InputArray labels_ip, const LogisticRegressionParams& params);
virtual ~LogisticRegression();
class CV_EXPORTS Params
{
public:
Params(double learning_rate = 0.001,
int iters = 1000,
int method = LogisticRegression::BATCH,
int normlization = LogisticRegression::REG_L2,
int reg = 1,
int batch_size = 1);
double alpha;
int num_iters;
int norm;
int regularized;
int train_method;
int mini_batch_size;
cv::TermCriteria term_crit;
};
enum { REG_L1 = 0, REG_L2 = 1};
enum { BATCH = 0, MINI_BATCH = 1};
virtual bool train(cv::InputArray data_ip, cv::InputArray label_ip);
virtual void predict( cv::InputArray data, cv::OutputArray predicted_labels ) const;
virtual void write(FileStorage& fs) const;
virtual void read(const FileNode& fn);
// Algorithm interface
virtual void write( FileStorage &fs ) const = 0;
virtual void read( const FileNode &fn ) = 0;
const cv::Mat get_learnt_thetas() const;
virtual void clear();
protected:
LogisticRegressionParams params;
cv::Mat learnt_thetas;
std::string default_model_name;
std::map<int, int> forward_mapper;
std::map<int, int> reverse_mapper;
// StatModel interface
virtual bool train( const Ptr<TrainData>& trainData, int flags=0 ) = 0;
virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
virtual void clear() = 0;
cv::Mat labels_o;
cv::Mat labels_n;
virtual Mat get_learnt_thetas() const = 0;
static cv::Mat calc_sigmoid(const cv::Mat& data);
virtual double compute_cost(const cv::Mat& data, const cv::Mat& labels, const cv::Mat& init_theta);
virtual cv::Mat compute_batch_gradient(const cv::Mat& data, const cv::Mat& labels, const cv::Mat& init_theta);
virtual cv::Mat compute_mini_batch_gradient(const cv::Mat& data, const cv::Mat& labels, const cv::Mat& init_theta);
virtual bool set_label_map(const cv::Mat& labels);
static cv::Mat remap_labels(const cv::Mat& labels, const std::map<int, int>& lmap);
static Ptr<LogisticRegression> create( const Params& params = Params() );
};
/****************************************************************************************\
......
......@@ -55,55 +55,72 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::ml;
using namespace std;
LogisticRegressionParams::LogisticRegressionParams()
{
term_crit = cv::TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.001);
alpha = 0.001;
num_iters = 1000;
norm = LogisticRegression::REG_L2;
regularized = 1;
train_method = LogisticRegression::BATCH;
mini_batch_size = 1;
}
LogisticRegressionParams::LogisticRegressionParams( double learning_rate, int iters, int train_algo = LogisticRegression::BATCH, int normlization = LogisticRegression::REG_L2, int reg = 1, int mb_size = 5)
namespace cv {
namespace ml {
LogisticRegression::Params::Params(double learning_rate,
int iters,
int method,
int normlization,
int reg,
int batch_size)
{
term_crit = cv::TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, iters, learning_rate);
alpha = learning_rate;
num_iters = iters;
norm = normlization;
regularized = reg;
train_method = train_algo;
mini_batch_size = mb_size;
train_method = method;
mini_batch_size = batch_size;
term_crit = cv::TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
}
LogisticRegression::LogisticRegression(const LogisticRegressionParams& pms)
class LogisticRegressionImpl : public LogisticRegression
{
default_model_name = "my_lr";
this->params = pms;
}
LogisticRegression::LogisticRegression(cv::InputArray data, cv::InputArray labels, const LogisticRegressionParams& pms)
{
default_model_name = "my_lr";
this->params = pms;
train(data, labels);
}
LogisticRegression::~LogisticRegression()
public:
LogisticRegressionImpl(const Params& pms)
: params(pms)
{
}
virtual ~LogisticRegressionImpl() {}
virtual bool train( const Ptr<TrainData>& trainData, int=0 );
virtual float predict(InputArray samples, OutputArray results, int) const;
virtual void clear();
virtual void write(FileStorage& fs) const;
virtual void read(const FileNode& fn);
virtual cv::Mat get_learnt_thetas() const;
virtual int getVarCount() const { return learnt_thetas.cols; }
virtual bool isTrained() const { return !learnt_thetas.empty(); }
virtual bool isClassifier() const { return true; }
virtual String getDefaultModelName() const { return "opencv_ml_lr"; }
protected:
cv::Mat calc_sigmoid(const cv::Mat& data) const;
double compute_cost(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta);
cv::Mat compute_batch_gradient(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta);
cv::Mat compute_mini_batch_gradient(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta);
bool set_label_map(const cv::Mat& _labels_i);
cv::Mat remap_labels(const cv::Mat& _labels_i, const map<int, int>& lmap) const;
protected:
Params params;
cv::Mat learnt_thetas;
map<int, int> forward_mapper;
map<int, int> reverse_mapper;
cv::Mat labels_o;
cv::Mat labels_n;
};
Ptr<LogisticRegression> LogisticRegression::create(const Params& params)
{
clear();
return makePtr<LogisticRegressionImpl>(params);
}
bool LogisticRegression::train(cv::InputArray data_ip, cv::InputArray labels_ip)
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
{
clear();
cv::Mat _data_i = data_ip.getMat();
cv::Mat _labels_i = labels_ip.getMat();
cv::Mat _data_i = trainData->getSamples();
cv::Mat _labels_i = trainData->getResponses();
CV_Assert( !_labels_i.empty() && !_data_i.empty());
......@@ -194,13 +211,12 @@ bool LogisticRegression::train(cv::InputArray data_ip, cv::InputArray labels_ip)
return ok;
}
void LogisticRegression::predict( cv::InputArray _ip_data, cv::OutputArray _output_predicted_labels ) const
float LogisticRegressionImpl::predict(InputArray samples, OutputArray results, int) const
{
/* returns a class of the predicted class
class names can be 1,2,3,4, .... etc */
cv::Mat thetas, data, pred_labs;
data = _ip_data.getMat();
data = samples.getMat();
// check if learnt_mats array is populated
if(this->learnt_thetas.total()<=0)
......@@ -266,19 +282,20 @@ void LogisticRegression::predict( cv::InputArray _ip_data, cv::OutputArray _outp
pred_labs = remap_labels(labels_c, this->reverse_mapper);
// convert pred_labs to integer type
pred_labs.convertTo(pred_labs, CV_32S);
pred_labs.copyTo(_output_predicted_labels);
pred_labs.copyTo(results);
// TODO: determine
return 0;
}
cv::Mat LogisticRegression::calc_sigmoid(const Mat& data)
cv::Mat LogisticRegressionImpl::calc_sigmoid(const cv::Mat& data) const
{
cv::Mat dest;
cv::exp(-data, dest);
return 1.0/(1.0+dest);
}
double LogisticRegression::compute_cost(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta)
double LogisticRegressionImpl::compute_cost(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta)
{
int llambda = 0;
int m;
int n;
......@@ -328,7 +345,7 @@ double LogisticRegression::compute_cost(const cv::Mat& _data, const cv::Mat& _la
return cost;
}
cv::Mat LogisticRegression::compute_batch_gradient(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta)
cv::Mat LogisticRegressionImpl::compute_batch_gradient(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta)
{
// implements batch gradient descent
if(this->params.alpha<=0)
......@@ -397,7 +414,7 @@ cv::Mat LogisticRegression::compute_batch_gradient(const cv::Mat& _data, const c
return theta_p;
}
cv::Mat LogisticRegression::compute_mini_batch_gradient(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta)
cv::Mat LogisticRegressionImpl::compute_mini_batch_gradient(const cv::Mat& _data, const cv::Mat& _labels, const cv::Mat& _init_theta)
{
// implements batch gradient descent
int lambda_l = 0;
......@@ -488,7 +505,7 @@ cv::Mat LogisticRegression::compute_mini_batch_gradient(const cv::Mat& _data, co
return theta_p;
}
bool LogisticRegression::set_label_map(const cv::Mat& _labels_i)
bool LogisticRegressionImpl::set_label_map(const cv::Mat &_labels_i)
{
// this function creates two maps to map user defined labels to program friendly labels two ways.
int ii = 0;
......@@ -522,7 +539,7 @@ bool LogisticRegression::set_label_map(const cv::Mat& _labels_i)
return ok;
}
cv::Mat LogisticRegression::remap_labels(const Mat& _labels_i, const std::map<int, int>& lmap)
cv::Mat LogisticRegressionImpl::remap_labels(const cv::Mat& _labels_i, const map<int, int>& lmap) const
{
cv::Mat labels;
_labels_i.convertTo(labels, CV_32S);
......@@ -538,14 +555,14 @@ cv::Mat LogisticRegression::remap_labels(const Mat& _labels_i, const std::map<in
return new_labels;
}
void LogisticRegression::clear()
void LogisticRegressionImpl::clear()
{
this->learnt_thetas.release();
this->labels_o.release();
this->labels_n.release();
}
void LogisticRegression::write(FileStorage& fs) const
void LogisticRegressionImpl::write(FileStorage& fs) const
{
// check if open
if(fs.isOpened() == 0)
......@@ -568,7 +585,7 @@ void LogisticRegression::write(FileStorage& fs) const
fs<<"o_labels"<<this->labels_o;
}
void LogisticRegression::read(const FileNode& fn )
void LogisticRegressionImpl::read(const FileNode& fn)
{
// check if empty
if(fn.empty())
......@@ -598,8 +615,12 @@ void LogisticRegression::read(const FileNode& fn )
}
}
const cv::Mat LogisticRegression::get_learnt_thetas() const
cv::Mat LogisticRegressionImpl::get_learnt_thetas() const
{
return this->learnt_thetas;
}
}
}
/* End of file. */
This diff is collapsed.
///////////////////////////////////////////////////////////////////////////////////////
/*//////////////////////////////////////////////////////////////////////////////////////
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
// By downloading, copying, installing or using the software you agree to this license.
......@@ -11,7 +11,8 @@
// Rahul Kavi rahulkavi[at]live[at]com
//
// contains a subset of data from the popular Iris Dataset (taken from "http://archive.ics.uci.edu/ml/datasets/Iris")
// contains a subset of data from the popular Iris Dataset (taken from
// "http://archive.ics.uci.edu/ml/datasets/Iris")
// # You are free to use, change, or redistribute the code in any way you wish for
// # non-commercial purposes, but please maintain the name of the original author.
......@@ -24,7 +25,6 @@
// # Logistic Regression ALGORITHM
// License Agreement
// For Open Source Computer Vision Library
......@@ -54,7 +54,7 @@
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
// the use of this software, even if advised of the possibility of such damage.*/
#include <iostream>
......@@ -62,42 +62,45 @@
#include <opencv2/ml/ml.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
using namespace cv::ml;
int main()
{
Mat data_temp, labels_temp;
const String filename = "data01.xml";
cout << "**********************************************************************" << endl;
cout << filename
<< " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl;
cout << "Each of the collected images are of size 28 x 28 re-arranged to 1 x 784 matrix"
<< endl;
cout << "**********************************************************************" << endl;
Mat data, labels;
{
cout << "loading the dataset" << endl;
FileStorage f;
if(f.open(filename, FileStorage::READ))
{
f["datamat"] >> data;
f["labelsmat"] >> labels;
f.release();
}
else
{
cerr << "File can not be opened: " << filename << endl;
return 1;
}
data.convertTo(data, CV_32F);
labels.convertTo(labels, CV_32F);
cout << "read " << data.rows << " rows of data" << endl;
}
Mat data_train, data_test;
Mat labels_train, labels_test;
Mat responses, result;
FileStorage fs1, fs2;
FileStorage f;
cout<<"*****************************************************************************************"<<endl;
cout<<"\"data01.xml\" contains digits 0 and 1 of 20 samples each, collected on an Android device"<<endl;
cout<<"Each of the collected images are of size 28 x 28 re-arranged to 1 x 784 matrix"<<endl;
cout<<"*****************************************************************************************\n\n"<<endl;
cout<<"loading the dataset\n"<<endl;
f.open("data01.xml", FileStorage::READ);
f["datamat"] >> data_temp;
f["labelsmat"] >> labels_temp;
data_temp.convertTo(data, CV_32F);
labels_temp.convertTo(labels, CV_32F);
for(int i =0;i<data.rows;i++)
for(int i = 0; i < data.rows; i++)
{
if(i%2 ==0)
if(i % 2 == 0)
{
data_train.push_back(data.row(i));
labels_train.push_back(labels.row(i));
......@@ -108,66 +111,66 @@ int main()
labels_test.push_back(labels.row(i));
}
}
cout<<"training samples per class: "<<data_train.rows/2<<endl;
cout<<"testing samples per class: "<<data_test.rows/2<<endl;
cout << "training/testing samples count: " << data_train.rows << "/" << data_test.rows << endl;
// display sample image
Mat img_disp1 = data_train.row(2).reshape(0,28).t();
Mat img_disp2 = data_train.row(18).reshape(0,28).t();
imshow("digit 0", img_disp1);
imshow("digit 1", img_disp2);
cout<<"initializing Logisitc Regression Parameters\n"<<endl;
// LogisticRegressionParams params1 = LogisticRegressionParams(0.001, 10, LogisticRegression::BATCH, LogisticRegression::REG_L2, 1, 1);
// params1 (above) with batch gradient performs better than mini batch gradient below with same parameters
LogisticRegressionParams params1 = LogisticRegressionParams(0.001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
// Mat bigImage;
// for(int i = 0; i < data_train.rows; ++i)
// {
// bigImage.push_back(data_train.row(i).reshape(0, 28));
// }
// imshow("digits", bigImage.t());
// however mini batch gradient descent parameters with slower learning rate(below) can be used to get higher accuracy than with parameters mentioned above
// LogisticRegressionParams params1 = LogisticRegressionParams(0.000001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
cout<<"training Logisitc Regression classifier\n"<<endl;
Mat responses, result;
LogisticRegression lr1(data_train, labels_train, params1);
lr1.predict(data_test, responses);
// LogisticRegression::Params params = LogisticRegression::Params(
// 0.001, 10, LogisticRegression::BATCH, LogisticRegression::REG_L2, 1, 1);
// params1 (above) with batch gradient performs better than mini batch
// gradient below with same parameters
LogisticRegression::Params params = LogisticRegression::Params(
0.001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
// however mini batch gradient descent parameters with slower learning
// rate(below) can be used to get higher accuracy than with parameters
// mentioned above
// LogisticRegression::Params params = LogisticRegression::Params(
// 0.000001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
cout << "training...";
Ptr<StatModel> lr1 = LogisticRegression::create(params);
lr1->train(data_train, ROW_SAMPLE, labels_train);
cout << "done!" << endl;
cout << "predicting...";
lr1->predict(data_test, responses);
cout << "done!" << endl;
// show prediction report
cout << "original vs predicted:" << endl;
labels_test.convertTo(labels_test, CV_32S);
cout<<"Original Label :: Predicted Label"<<endl;
result = (labels_test == responses)/255;
for(int i=0;i<labels_test.rows;i++)
{
cout<<labels_test.at<int>(i,0)<<" :: "<< responses.at<int>(i,0)<<endl;
}
// calculate accuracy
cout<<"accuracy: "<<((double)cv::sum(result)[0]/result.rows)*100<<"%\n";
cout<<"saving the classifier"<<endl;
cout << labels_test.t() << endl;
cout << responses.t() << endl;
result = (labels_test == responses) / 255;
cout << "accuracy: " << ((double)cv::sum(result)[0] / result.rows) * 100 << "%\n";
// save the classfier
fs1.open("NewLR_Trained.xml",FileStorage::WRITE);
lr1.write(fs1);
fs1.release();
cout << "saving the classifier" << endl;
const String saveFilename = "NewLR_Trained.xml";
lr1->save(saveFilename);
// load the classifier onto new object
LogisticRegressionParams params2 = LogisticRegressionParams();
LogisticRegression lr2(params2);
cout<<"loading a new classifier"<<endl;
fs2.open("NewLR_Trained.xml",FileStorage::READ);
FileNode fn2 = fs2.root();
lr2.read(fn2);
fs2.release();
Mat responses2;
cout << "loading a new classifier" << endl;
Ptr<LogisticRegression> lr2 = StatModel::load<LogisticRegression>(saveFilename);
// predict using loaded classifier
cout<<"predicting the dataset using the loaded classfier\n"<<endl;
lr2.predict(data_test, responses2);
cout << "predicting the dataset using the loaded classfier" << endl;
Mat responses2;
lr2->predict(data_test, responses2);
// calculate accuracy
cout<<"accuracy using loaded classifier: "<<100 * (float)cv::countNonZero(labels_test == responses2)/responses2.rows<<"%"<<endl;
waitKey(0);
cout << "accuracy using loaded classifier: "
<< 100 * (float)cv::countNonZero(labels_test == responses2) / responses2.rows << "%"
<< endl;
waitKey(0);
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment