Commit 0ea009f6 authored by Olexa Bilaniuk's avatar Olexa Bilaniuk

Merge remote-tracking branch 'refs/remotes/upstream/master'

parents f148f233 7b270f4c
...@@ -122,7 +122,6 @@ CV_INLINE CvParamLattice cvDefaultParamLattice( void ) ...@@ -122,7 +122,6 @@ CV_INLINE CvParamLattice cvDefaultParamLattice( void )
#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm" #define CV_TYPE_NAME_ML_SVM "opencv-ml-svm"
#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn" #define CV_TYPE_NAME_ML_KNN "opencv-ml-knn"
#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian" #define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian"
#define CV_TYPE_NAME_ML_EM "opencv-ml-em"
#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree" #define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree"
#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree" #define CV_TYPE_NAME_ML_TREE "opencv-ml-tree"
#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp" #define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp"
...@@ -562,100 +561,6 @@ private: ...@@ -562,100 +561,6 @@ private:
CvSVM& operator = (const CvSVM&); CvSVM& operator = (const CvSVM&);
}; };
/****************************************************************************************\
* Expectation - Maximization *
\****************************************************************************************/
namespace cv
{
class EM : public Algorithm
{
public:
// Type of covariation matrices
enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL};
// Default parameters
enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
// The initial step
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
virtual ~EM();
CV_WRAP virtual void clear();
CV_WRAP virtual bool train(InputArray samples,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP virtual bool trainE(InputArray samples,
InputArray means0,
InputArray covs0=noArray(),
InputArray weights0=noArray(),
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP virtual bool trainM(InputArray samples,
InputArray probs0,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP Vec2d predict(InputArray sample,
OutputArray probs=noArray()) const;
CV_WRAP bool isTrained() const;
AlgorithmInfo* info() const;
virtual void read(const FileNode& fn);
protected:
virtual void setTrainData(int startStep, const Mat& samples,
const Mat* probs0,
const Mat* means0,
const std::vector<Mat>* covs0,
const Mat* weights0);
bool doTrain(int startStep,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs);
virtual void eStep();
virtual void mStep();
void clusterTrainSamples();
void decomposeCovs();
void computeLogWeightDivDet();
Vec2d computeProbabilities(const Mat& sample, Mat* probs) const;
// all inner matrices have type CV_64FC1
CV_PROP_RW int nclusters;
CV_PROP_RW int covMatType;
CV_PROP_RW int maxIters;
CV_PROP_RW double epsilon;
Mat trainSamples;
Mat trainProbs;
Mat trainLogLikelihoods;
Mat trainLabels;
CV_PROP Mat weights;
CV_PROP Mat means;
CV_PROP std::vector<Mat> covs;
std::vector<Mat> covsEigenValues;
std::vector<Mat> covsRotateMats;
std::vector<Mat> invCovsEigenValues;
Mat logWeightDivDet;
};
} // namespace cv
/****************************************************************************************\ /****************************************************************************************\
* Decision Tree * * Decision Tree *
\****************************************************************************************/\ \****************************************************************************************/\
...@@ -2155,8 +2060,6 @@ typedef CvGBTreesParams GradientBoostingTreeParams; ...@@ -2155,8 +2060,6 @@ typedef CvGBTreesParams GradientBoostingTreeParams;
typedef CvGBTrees GradientBoostingTrees; typedef CvGBTrees GradientBoostingTrees;
template<> void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const; template<> void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const;
bool initModule_ml(void);
} }
#endif // __cplusplus #endif // __cplusplus
......
...@@ -100,7 +100,7 @@ RECURSIVE = YES ...@@ -100,7 +100,7 @@ RECURSIVE = YES
EXCLUDE = EXCLUDE =
EXCLUDE_SYMLINKS = NO EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp
EXCLUDE_SYMBOLS = cv::DataType<*> int EXCLUDE_SYMBOLS = cv::DataType<*> int void
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@ EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = * EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = YES EXAMPLE_RECURSIVE = YES
...@@ -243,7 +243,11 @@ PREDEFINED = __cplusplus=1 \ ...@@ -243,7 +243,11 @@ PREDEFINED = __cplusplus=1 \
CV_NORETURN= \ CV_NORETURN= \
CV_DEFAULT(x)=" = x" \ CV_DEFAULT(x)=" = x" \
CV_NEON=1 \ CV_NEON=1 \
FLANN_DEPRECATED= FLANN_DEPRECATED= \
"CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \
"CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;"
EXPAND_AS_DEFINED = EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES SKIP_FUNCTION_MACROS = YES
TAGFILES = TAGFILES =
......
...@@ -31,3 +31,7 @@ div.contents { ...@@ -31,3 +31,7 @@ div.contents {
span.arrow { span.arrow {
height: 13px; height: 13px;
} }
div.image img{
max-width: 900px;
}
Introduction to Support Vector Machines {#tutorial_introduction_to_svm} Introduction to Support Vector Machines {#tutorial_introduction_to_svm}
======================================= =======================================
@todo update this tutorial
Goal Goal
---- ----
...@@ -31,13 +29,11 @@ understand that this is done only because our intuition is better built from exa ...@@ -31,13 +29,11 @@ understand that this is done only because our intuition is better built from exa
to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space
whose dimension is higher than two. whose dimension is higher than two.
In the above picture you can see that there exists multiple In the above picture you can see that there exists multiple lines that offer a solution to the
lines that offer a solution to the problem. Is any of them better than the others? We can problem. Is any of them better than the others? We can intuitively define a criterion to estimate
intuitively define a criterion to estimate the worth of the lines: the worth of the lines: <em> A line is bad if it passes too close to the points because it will be
noise sensitive and it will not generalize correctly. </em> Therefore, our goal should be to find
- A line is bad if it passes too close to the points because it will be noise sensitive and it will the line passing as far as possible from all points.
not generalize correctly. Therefore, our goal should be to find the line passing as far as
possible from all points.
Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest
minimum distance to the training examples. Twice, this distance receives the important name of minimum distance to the training examples. Twice, this distance receives the important name of
...@@ -57,7 +53,7 @@ where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bi ...@@ -57,7 +53,7 @@ where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bi
@sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating @sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating
Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H. Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H.
Friedman. Friedman (@cite HTF01).
The optimal hyperplane can be represented in an infinite number of different ways by The optimal hyperplane can be represented in an infinite number of different ways by
scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible
...@@ -107,17 +103,14 @@ Explanation ...@@ -107,17 +103,14 @@ Explanation
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
two different classes; one of the classes consists of one point and the other of three points. two different classes; one of the classes consists of one point and the other of three points.
@code{.cpp}
float labels[4] = {1.0, -1.0, -1.0, -1.0}; @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1
float trainingData[4][2] = {{501, 10}, {255, 10}, {501, 255}, {10, 501}};
@endcode
The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be
stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays
defined above: defined above:
@code{.cpp}
Mat trainingDataMat(4, 2, CV_32FC1, trainingData); @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2
Mat labelsMat (4, 1, CV_32FC1, labels);
@endcode
-# **Set up SVM's parameters** -# **Set up SVM's parameters**
...@@ -126,42 +119,35 @@ Explanation ...@@ -126,42 +119,35 @@ Explanation
used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using
a kernel function to raise the dimensionality of the examples, etc). As a consequence of this, a kernel function to raise the dimensionality of the examples, etc). As a consequence of this,
we have to define some parameters before training the SVM. These parameters are stored in an we have to define some parameters before training the SVM. These parameters are stored in an
object of the class @ref cv::ml::SVM::Params . object of the class @ref cv::ml::SVM.
@code{.cpp}
ml::SVM::Params params; @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init
params.svmType = ml::SVM::C_SVC;
params.kernelType = ml::SVM::LINEAR; Here:
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6); - *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for
@endcode n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals
- *Type of SVM*. We choose here the type **ml::SVM::C_SVC** that can be used for n-class with imperfect separation of classes (i.e. when the training data is non-linearly separable).
classification (n \f$\geq\f$ 2). This parameter is defined in the attribute This feature is not important here since the data is linearly separable and we chose this SVM
*ml::SVM::Params.svmType*. type only for being the most commonly used.
The important feature of the type of SVM **CvSVM::C_SVC** deals with imperfect separation of classes (i.e. when the training data is non-linearly separable). This feature is not important here since the data is linearly separable and we chose this SVM type only for being the most commonly used.
- *Type of SVM kernel*. We have not talked about kernel functions since they are not - *Type of SVM kernel*. We have not talked about kernel functions since they are not
interesting for the training data we are dealing with. Nevertheless, let's explain briefly interesting for the training data we are dealing with. Nevertheless, let's explain briefly now
now the main idea behind a kernel function. It is a mapping done to the training data to the main idea behind a kernel function. It is a mapping done to the training data to improve
improve its resemblance to a linearly separable set of data. This mapping consists of its resemblance to a linearly separable set of data. This mapping consists of increasing the
increasing the dimensionality of the data and is done efficiently using a kernel function. dimensionality of the data and is done efficiently using a kernel function. We choose here the
We choose here the type **ml::SVM::LINEAR** which means that no mapping is done. This type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is
parameter is defined in the attribute *ml::SVMParams.kernel_type*. defined using cv::ml::SVM::setKernel.
- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a - *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a
constrained quadratic optimization problem in an **iterative** fashion. Here we specify a constrained quadratic optimization problem in an **iterative** fashion. Here we specify a
maximum number of iterations and a tolerance error so we allow the algorithm to finish in maximum number of iterations and a tolerance error so we allow the algorithm to finish in
less number of steps even if the optimal hyperplane has not been computed yet. This less number of steps even if the optimal hyperplane has not been computed yet. This
parameter is defined in a structure @ref cv::cvTermCriteria . parameter is defined in a structure @ref cv::TermCriteria .
-# **Train the SVM** -# **Train the SVM**
We call the method @ref cv::ml::SVM::train to build the SVM model.
We call the method @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train
[CvSVM::train](http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#cvsvm-train)
to build the SVM model.
@code{.cpp}
CvSVM SVM;
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);
@endcode
-# **Regions classified by the SVM** -# **Regions classified by the SVM**
...@@ -170,22 +156,8 @@ Explanation ...@@ -170,22 +156,8 @@ Explanation
by the SVM. In other words, an image is traversed interpreting its pixels as points of the by the SVM. In other words, an image is traversed interpreting its pixels as points of the
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
green if it is the class with label 1 and in blue if it is the class with label -1. green if it is the class with label 1 and in blue if it is the class with label -1.
@code{.cpp}
Vec3b green(0,255,0), blue (255,0,0); @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show
for (int i = 0; i < image.rows; ++i)
for (int j = 0; j < image.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i,j);
float response = SVM.predict(sampleMat);
if (response == 1)
image.at<Vec3b>(j, i) = green;
else
if (response == -1)
image.at<Vec3b>(j, i) = blue;
}
@endcode
-# **Support vectors** -# **Support vectors**
...@@ -193,15 +165,8 @@ Explanation ...@@ -193,15 +165,8 @@ Explanation
The method @ref cv::ml::SVM::getSupportVectors obtain all of the support The method @ref cv::ml::SVM::getSupportVectors obtain all of the support
vectors. We have used this methods here to find the training examples that are vectors. We have used this methods here to find the training examples that are
support vectors and highlight them. support vectors and highlight them.
@code{.cpp}
int c = SVM.get_support_vector_count(); @snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors
for (int i = 0; i < c; ++i)
{
const float* v = SVM.get_support_vector(i); // get and then highlight with grayscale
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
}
@endcode
Results Results
------- -------
......
...@@ -200,8 +200,6 @@ public: ...@@ -200,8 +200,6 @@ public:
void setCallback(const Ptr<LMSolver::Callback>& _cb) { cb = _cb; } void setCallback(const Ptr<LMSolver::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<LMSolver::Callback> cb; Ptr<LMSolver::Callback> cb;
double epsx; double epsx;
...@@ -211,15 +209,8 @@ public: ...@@ -211,15 +209,8 @@ public:
}; };
CV_INIT_ALGORITHM(LMSolverImpl, "LMSolver",
obj.info()->addParam(obj, "epsx", obj.epsx);
obj.info()->addParam(obj, "epsf", obj.epsf);
obj.info()->addParam(obj, "maxIters", obj.maxIters);
obj.info()->addParam(obj, "printInterval", obj.printInterval))
Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters) Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
{ {
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
return makePtr<LMSolverImpl>(cb, maxIters); return makePtr<LMSolverImpl>(cb, maxIters);
} }
......
...@@ -256,8 +256,6 @@ public: ...@@ -256,8 +256,6 @@ public:
void setCallback(const Ptr<PointSetRegistrator::Callback>& _cb) { cb = _cb; } void setCallback(const Ptr<PointSetRegistrator::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<PointSetRegistrator::Callback> cb; Ptr<PointSetRegistrator::Callback> cb;
int modelPoints; int modelPoints;
bool checkPartialSubsets; bool checkPartialSubsets;
...@@ -378,25 +376,12 @@ public: ...@@ -378,25 +376,12 @@ public:
return result; return result;
} }
AlgorithmInfo* info() const;
}; };
CV_INIT_ALGORITHM(RANSACPointSetRegistrator, "PointSetRegistrator.RANSAC",
obj.info()->addParam(obj, "threshold", obj.threshold);
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters))
CV_INIT_ALGORITHM(LMeDSPointSetRegistrator, "PointSetRegistrator.LMeDS",
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters))
Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb, Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _threshold, int _modelPoints, double _threshold,
double _confidence, int _maxIters) double _confidence, int _maxIters)
{ {
CV_Assert( !RANSACPointSetRegistrator_info_auto.name().empty() );
return Ptr<PointSetRegistrator>( return Ptr<PointSetRegistrator>(
new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters)); new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters));
} }
...@@ -405,7 +390,6 @@ Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegis ...@@ -405,7 +390,6 @@ Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegis
Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb, Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _confidence, int _maxIters) int _modelPoints, double _confidence, int _maxIters)
{ {
CV_Assert( !LMeDSPointSetRegistrator_info_auto.name().empty() );
return Ptr<PointSetRegistrator>( return Ptr<PointSetRegistrator>(
new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters)); new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters));
} }
......
...@@ -1010,8 +1010,6 @@ public: ...@@ -1010,8 +1010,6 @@ public:
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0); disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
} }
AlgorithmInfo* info() const { return 0; }
int getMinDisparity() const { return params.minDisparity; } int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; } void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
......
...@@ -865,8 +865,6 @@ public: ...@@ -865,8 +865,6 @@ public:
StereoMatcher::DISP_SCALE*params.speckleRange, buffer); StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
} }
AlgorithmInfo* info() const { return 0; }
int getMinDisparity() const { return params.minDisparity; } int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; } void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
......
This diff is collapsed.
...@@ -412,84 +412,6 @@ int print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout) ...@@ -412,84 +412,6 @@ int print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout)
return print(Formatter::get()->format(cv::Mat(matx)), stream); return print(Formatter::get()->format(cv::Mat(matx)), stream);
} }
////////////////////////////////////////// Algorithm //////////////////////////////////////////
template<typename _Tp> inline
Ptr<_Tp> Algorithm::create(const String& name)
{
return _create(name).dynamicCast<_Tp>();
}
template<typename _Tp> inline
void Algorithm::set(const char* _name, const Ptr<_Tp>& value)
{
Ptr<Algorithm> algo_ptr = value. template dynamicCast<cv::Algorithm>();
if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
}
template<typename _Tp> inline
void Algorithm::set(const String& _name, const Ptr<_Tp>& value)
{
this->set<_Tp>(_name.c_str(), value);
}
template<typename _Tp> inline
void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value)
{
Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
}
template<typename _Tp> inline
void Algorithm::setAlgorithm(const String& _name, const Ptr<_Tp>& value)
{
this->set<_Tp>(_name.c_str(), value);
}
template<typename _Tp> inline
typename ParamType<_Tp>::member_type Algorithm::get(const String& _name) const
{
typename ParamType<_Tp>::member_type value;
info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp> inline
typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const
{
typename ParamType<_Tp>::member_type value;
info()->get(this, _name, ParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp, typename _Base> inline
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
const String& help)
{
//TODO: static assert: _Tp inherits from _Base
addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
template<typename _Tp> inline
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
const String& help)
{
//TODO: static assert: _Tp inherits from Algorithm
addParam_(algo, parameter, ParamType<Algorithm>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
//! @endcond //! @endcond
/****************************************************************************************\ /****************************************************************************************\
......
...@@ -129,40 +129,6 @@ namespace cv ...@@ -129,40 +129,6 @@ namespace cv
CV_EXPORTS const char* currentParallelFramework(); CV_EXPORTS const char* currentParallelFramework();
} //namespace cv } //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
static inline ::cv::Algorithm* create##classname##_hidden() \
{ \
return new classname; \
} \
\
static inline ::cv::Ptr< ::cv::Algorithm> create##classname##_ptr_hidden() \
{ \
return ::cv::makePtr<classname>(); \
} \
\
static inline ::cv::AlgorithmInfo& classname##_info() \
{ \
static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \
return classname##_info_var; \
} \
\
static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \
\
::cv::AlgorithmInfo* classname::info() const \
{ \
static volatile bool initialized = false; \
\
if( !initialized ) \
{ \
initialized = true; \
classname obj; \
memberinit; \
} \
return &classname##_info(); \
}
/****************************************************************************************\ /****************************************************************************************\
* Common declarations * * Common declarations *
\****************************************************************************************/ \****************************************************************************************/
......
This diff is collapsed.
...@@ -140,8 +140,6 @@ namespace ...@@ -140,8 +140,6 @@ namespace
public: public:
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8); CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
cv::AlgorithmInfo* info() const;
void apply(cv::InputArray src, cv::OutputArray dst); void apply(cv::InputArray src, cv::OutputArray dst);
void apply(InputArray src, OutputArray dst, Stream& stream); void apply(InputArray src, OutputArray dst, Stream& stream);
...@@ -167,11 +165,6 @@ namespace ...@@ -167,11 +165,6 @@ namespace
{ {
} }
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_CUDA",
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst) void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
{ {
apply(_src, _dst, Stream::Null()); apply(_src, _dst, Stream::Null());
......
...@@ -310,10 +310,10 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, ...@@ -310,10 +310,10 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1,
{ {
cv::Mat flow; cv::Mat flow;
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1(); cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
alg->set("medianFiltering", 1); alg->setMedianFiltering(1);
alg->set("innerIterations", 1); alg->setInnerIterations(1);
alg->set("outerIterations", 300); alg->setOuterIterations(300);
TEST_CYCLE() alg->calc(frame0, frame1, flow); TEST_CYCLE() alg->calc(frame0, frame1, flow);
CPU_SANITY_CHECK(flow); CPU_SANITY_CHECK(flow);
......
...@@ -369,11 +369,11 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy) ...@@ -369,11 +369,11 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy)
cv::cuda::GpuMat d_flow; cv::cuda::GpuMat d_flow;
d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow); d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow);
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1(); cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
alg->set("medianFiltering", 1); alg->setMedianFiltering(1);
alg->set("innerIterations", 1); alg->setInnerIterations(1);
alg->set("outerIterations", d_alg->getNumIterations()); alg->setOuterIterations(d_alg->getNumIterations());
alg->set("gamma", gamma); alg->setGamma(gamma);
cv::Mat flow; cv::Mat flow;
alg->calc(frame0, frame1, flow); alg->calc(frame0, frame1, flow);
......
...@@ -320,8 +320,6 @@ namespace ...@@ -320,8 +320,6 @@ namespace
public: public:
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8); CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
cv::AlgorithmInfo* info() const;
void apply(cv::InputArray src, cv::OutputArray dst); void apply(cv::InputArray src, cv::OutputArray dst);
void setClipLimit(double clipLimit); void setClipLimit(double clipLimit);
...@@ -351,11 +349,6 @@ namespace ...@@ -351,11 +349,6 @@ namespace
{ {
} }
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE",
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst) void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
{ {
CV_Assert( _src.type() == CV_8UC1 || _src.type() == CV_16UC1 ); CV_Assert( _src.type() == CV_8UC1 || _src.type() == CV_16UC1 );
......
...@@ -449,40 +449,33 @@ classes 0 and 1, one can determine that the given data instance belongs to class ...@@ -449,40 +449,33 @@ classes 0 and 1, one can determine that the given data instance belongs to class
\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ . \geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ .
In Logistic Regression, choosing the right parameters is of utmost importance for reducing the In Logistic Regression, choosing the right parameters is of utmost importance for reducing the
training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the training error and ensuring high training accuracy:
structure that defines parameters that are required to train a Logistic Regression classifier.
- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate"
The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast method. It determines how fast we approach the solution. It is a positive real number.
we approach the solution. It is a positive real number.
- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported
Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in in LogisticRegression. It is important that we mention the number of iterations these optimization
LogisticRegression. It is important that we mention the number of iterations these optimization algorithms have to run. The number of iterations can be set with @ref
algorithms have to run. The number of iterations are mentioned by cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought
cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of as number of steps taken and learning rate specifies if it is a long step or a short step. This
steps taken and learning rate specifies if it is a long step or a short step. These two parameters and previous parameter define how fast we arrive at a possible solution.
define how fast we arrive at a possible solution.
- In order to compensate for overfitting regularization is performed, which can be enabled with
In order to compensate for overfitting regularization is performed, which can be enabled by setting @ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what
cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can kind of regularization has to be performed by passing one of @ref
specify what kind of regularization has to be performed by setting cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method.
cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values.
- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient
LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini- Descent or the MiniBatch Gradient Descent. To specify this, call @ref
Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref
either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref
to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size. cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is
set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has
A sample set of training parameters for the Logistic Regression classifier can be initialized as to be to a postive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
follows: "setMiniBatchSize".
@code{.cpp}
using namespace cv::ml; A sample set of training parameters for the Logistic Regression classifier can be initialized as follows:
LogisticRegression::Params params; @snippet samples/cpp/logistic_regression.cpp init
params.alpha = 0.5;
params.num_iters = 10000;
params.norm = LogisticRegression::REG_L2;
params.regularized = 1;
params.train_method = LogisticRegression::MINI_BATCH;
params.mini_batch_size = 10;
@endcode
@sa cv::ml::LogisticRegression @sa cv::ml::LogisticRegression
This diff is collapsed.
...@@ -42,84 +42,57 @@ ...@@ -42,84 +42,57 @@
namespace cv { namespace ml { namespace cv { namespace ml {
ANN_MLP::Params::Params() struct AnnParams
{ {
layerSizes = Mat(); AnnParams()
activateFunc = SIGMOID_SYM; {
fparam1 = fparam2 = 0; termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 ); trainMethod = ANN_MLP::RPROP;
trainMethod = RPROP; bpDWScale = bpMomentScale = 0.1;
bpDWScale = bpMomentScale = 0.1; rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5; rpDWMin = FLT_EPSILON; rpDWMax = 50.;
rpDWMin = FLT_EPSILON; rpDWMax = 50.; }
}
TermCriteria termCrit;
int trainMethod;
double bpDWScale;
double bpMomentScale;
double rpDW0;
double rpDWPlus;
double rpDWMinus;
double rpDWMin;
double rpDWMax;
};
ANN_MLP::Params::Params( const Mat& _layerSizes, int _activateFunc, double _fparam1, double _fparam2, template <typename T>
TermCriteria _termCrit, int _trainMethod, double _param1, double _param2 ) inline T inBounds(T val, T min_val, T max_val)
{ {
layerSizes = _layerSizes; return std::min(std::max(val, min_val), max_val);
activateFunc = _activateFunc;
fparam1 = _fparam1;
fparam2 = _fparam2;
termCrit = _termCrit;
trainMethod = _trainMethod;
bpDWScale = bpMomentScale = 0.1;
rpDW0 = 1.; rpDWPlus = 1.2; rpDWMinus = 0.5;
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
if( trainMethod == RPROP )
{
rpDW0 = _param1;
if( rpDW0 < FLT_EPSILON )
rpDW0 = 1.;
rpDWMin = _param2;
rpDWMin = std::max( rpDWMin, 0. );
}
else if( trainMethod == BACKPROP )
{
bpDWScale = _param1;
if( bpDWScale <= 0 )
bpDWScale = 0.1;
bpDWScale = std::max( bpDWScale, 1e-3 );
bpDWScale = std::min( bpDWScale, 1. );
bpMomentScale = _param2;
if( bpMomentScale < 0 )
bpMomentScale = 0.1;
bpMomentScale = std::min( bpMomentScale, 1. );
}
else
trainMethod = RPROP;
} }
class ANN_MLPImpl : public ANN_MLP class ANN_MLPImpl : public ANN_MLP
{ {
public: public:
ANN_MLPImpl() ANN_MLPImpl()
{ {
clear(); clear();
} setActivationFunction( SIGMOID_SYM, 0, 0 );
setLayerSizes(Mat());
ANN_MLPImpl( const Params& p ) setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON);
{
clear();
setParams(p);
} }
virtual ~ANN_MLPImpl() {} virtual ~ANN_MLPImpl() {}
void setParams(const Params& p) CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.termCrit)
{ CV_IMPL_PROPERTY(double, BackpropWeightScale, params.bpDWScale)
params = p; CV_IMPL_PROPERTY(double, BackpropMomentumScale, params.bpMomentScale)
create( params.layerSizes ); CV_IMPL_PROPERTY(double, RpropDW0, params.rpDW0)
set_activ_func( params.activateFunc, params.fparam1, params.fparam2 ); CV_IMPL_PROPERTY(double, RpropDWPlus, params.rpDWPlus)
} CV_IMPL_PROPERTY(double, RpropDWMinus, params.rpDWMinus)
CV_IMPL_PROPERTY(double, RpropDWMin, params.rpDWMin)
Params getParams() const CV_IMPL_PROPERTY(double, RpropDWMax, params.rpDWMax)
{
return params;
}
void clear() void clear()
{ {
...@@ -132,7 +105,35 @@ public: ...@@ -132,7 +105,35 @@ public:
int layer_count() const { return (int)layer_sizes.size(); } int layer_count() const { return (int)layer_sizes.size(); }
void set_activ_func( int _activ_func, double _f_param1, double _f_param2 ) void setTrainMethod(int method, double param1, double param2)
{
if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP)
method = ANN_MLP::RPROP;
params.trainMethod = method;
if(method == ANN_MLP::RPROP )
{
if( param1 < FLT_EPSILON )
param1 = 1.;
params.rpDW0 = param1;
params.rpDWMin = std::max( param2, 0. );
}
else if(method == ANN_MLP::BACKPROP )
{
if( param1 <= 0 )
param1 = 0.1;
params.bpDWScale = inBounds<double>(param1, 1e-3, 1.);
if( param2 < 0 )
param2 = 0.1;
params.bpMomentScale = std::min( param2, 1. );
}
}
int getTrainMethod() const
{
return params.trainMethod;
}
void setActivationFunction(int _activ_func, double _f_param1, double _f_param2 )
{ {
if( _activ_func < 0 || _activ_func > GAUSSIAN ) if( _activ_func < 0 || _activ_func > GAUSSIAN )
CV_Error( CV_StsOutOfRange, "Unknown activation function" ); CV_Error( CV_StsOutOfRange, "Unknown activation function" );
...@@ -201,7 +202,12 @@ public: ...@@ -201,7 +202,12 @@ public:
} }
} }
void create( InputArray _layer_sizes ) Mat getLayerSizes() const
{
return Mat_<int>(layer_sizes, true);
}
void setLayerSizes( InputArray _layer_sizes )
{ {
clear(); clear();
...@@ -700,7 +706,7 @@ public: ...@@ -700,7 +706,7 @@ public:
termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1); termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1);
termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON); termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON);
int iter = params.trainMethod == Params::BACKPROP ? int iter = params.trainMethod == ANN_MLP::BACKPROP ?
train_backprop( inputs, outputs, sw, termcrit ) : train_backprop( inputs, outputs, sw, termcrit ) :
train_rprop( inputs, outputs, sw, termcrit ); train_rprop( inputs, outputs, sw, termcrit );
...@@ -1113,13 +1119,13 @@ public: ...@@ -1113,13 +1119,13 @@ public:
fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1; fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1;
fs << "training_params" << "{"; fs << "training_params" << "{";
if( params.trainMethod == Params::BACKPROP ) if( params.trainMethod == ANN_MLP::BACKPROP )
{ {
fs << "train_method" << "BACKPROP"; fs << "train_method" << "BACKPROP";
fs << "dw_scale" << params.bpDWScale; fs << "dw_scale" << params.bpDWScale;
fs << "moment_scale" << params.bpMomentScale; fs << "moment_scale" << params.bpMomentScale;
} }
else if( params.trainMethod == Params::RPROP ) else if( params.trainMethod == ANN_MLP::RPROP )
{ {
fs << "train_method" << "RPROP"; fs << "train_method" << "RPROP";
fs << "dw0" << params.rpDW0; fs << "dw0" << params.rpDW0;
...@@ -1186,7 +1192,7 @@ public: ...@@ -1186,7 +1192,7 @@ public:
f_param1 = (double)fn["f_param1"]; f_param1 = (double)fn["f_param1"];
f_param2 = (double)fn["f_param2"]; f_param2 = (double)fn["f_param2"];
set_activ_func( activ_func, f_param1, f_param2 ); setActivationFunction( activ_func, f_param1, f_param2 );
min_val = (double)fn["min_val"]; min_val = (double)fn["min_val"];
max_val = (double)fn["max_val"]; max_val = (double)fn["max_val"];
...@@ -1194,7 +1200,7 @@ public: ...@@ -1194,7 +1200,7 @@ public:
max_val1 = (double)fn["max_val1"]; max_val1 = (double)fn["max_val1"];
FileNode tpn = fn["training_params"]; FileNode tpn = fn["training_params"];
params = Params(); params = AnnParams();
if( !tpn.empty() ) if( !tpn.empty() )
{ {
...@@ -1202,13 +1208,13 @@ public: ...@@ -1202,13 +1208,13 @@ public:
if( tmethod_name == "BACKPROP" ) if( tmethod_name == "BACKPROP" )
{ {
params.trainMethod = Params::BACKPROP; params.trainMethod = ANN_MLP::BACKPROP;
params.bpDWScale = (double)tpn["dw_scale"]; params.bpDWScale = (double)tpn["dw_scale"];
params.bpMomentScale = (double)tpn["moment_scale"]; params.bpMomentScale = (double)tpn["moment_scale"];
} }
else if( tmethod_name == "RPROP" ) else if( tmethod_name == "RPROP" )
{ {
params.trainMethod = Params::RPROP; params.trainMethod = ANN_MLP::RPROP;
params.rpDW0 = (double)tpn["dw0"]; params.rpDW0 = (double)tpn["dw0"];
params.rpDWPlus = (double)tpn["dw_plus"]; params.rpDWPlus = (double)tpn["dw_plus"];
params.rpDWMinus = (double)tpn["dw_minus"]; params.rpDWMinus = (double)tpn["dw_minus"];
...@@ -1244,7 +1250,7 @@ public: ...@@ -1244,7 +1250,7 @@ public:
vector<int> _layer_sizes; vector<int> _layer_sizes;
readVectorOrMat(fn["layer_sizes"], _layer_sizes); readVectorOrMat(fn["layer_sizes"], _layer_sizes);
create( _layer_sizes ); setLayerSizes( _layer_sizes );
int i, l_count = layer_count(); int i, l_count = layer_count();
read_params(fn); read_params(fn);
...@@ -1267,11 +1273,6 @@ public: ...@@ -1267,11 +1273,6 @@ public:
trained = true; trained = true;
} }
Mat getLayerSizes() const
{
return Mat_<int>(layer_sizes, true);
}
Mat getWeights(int layerIdx) const Mat getWeights(int layerIdx) const
{ {
CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() ); CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() );
...@@ -1304,17 +1305,16 @@ public: ...@@ -1304,17 +1305,16 @@ public:
double min_val, max_val, min_val1, max_val1; double min_val, max_val, min_val1, max_val1;
int activ_func; int activ_func;
int max_lsize, max_buf_sz; int max_lsize, max_buf_sz;
Params params; AnnParams params;
RNG rng; RNG rng;
Mutex mtx; Mutex mtx;
bool trained; bool trained;
}; };
Ptr<ANN_MLP> ANN_MLP::create(const ANN_MLP::Params& params) Ptr<ANN_MLP> ANN_MLP::create()
{ {
Ptr<ANN_MLPImpl> ann = makePtr<ANN_MLPImpl>(params); return makePtr<ANN_MLPImpl>();
return ann;
} }
}} }}
......
...@@ -54,47 +54,32 @@ log_ratio( double val ) ...@@ -54,47 +54,32 @@ log_ratio( double val )
} }
Boost::Params::Params() BoostTreeParams::BoostTreeParams()
{ {
boostType = Boost::REAL; boostType = Boost::REAL;
weakCount = 100; weakCount = 100;
weightTrimRate = 0.95; weightTrimRate = 0.95;
CVFolds = 0;
maxDepth = 1;
} }
BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count,
Boost::Params::Params( int _boostType, int _weak_count, double _weightTrimRate)
double _weightTrimRate, int _maxDepth,
bool _use_surrogates, const Mat& _priors )
{ {
boostType = _boostType; boostType = _boostType;
weakCount = _weak_count; weakCount = _weak_count;
weightTrimRate = _weightTrimRate; weightTrimRate = _weightTrimRate;
CVFolds = 0;
maxDepth = _maxDepth;
useSurrogates = _use_surrogates;
priors = _priors;
} }
class DTreesImplForBoost : public DTreesImpl class DTreesImplForBoost : public DTreesImpl
{ {
public: public:
DTreesImplForBoost() {} DTreesImplForBoost()
virtual ~DTreesImplForBoost() {}
bool isClassifier() const { return true; }
void setBParams(const Boost::Params& p)
{ {
bparams = p; params.setCVFolds(0);
params.setMaxDepth(1);
} }
virtual ~DTreesImplForBoost() {}
Boost::Params getBParams() const bool isClassifier() const { return true; }
{
return bparams;
}
void clear() void clear()
{ {
...@@ -199,10 +184,6 @@ public: ...@@ -199,10 +184,6 @@ public:
bool train( const Ptr<TrainData>& trainData, int flags ) bool train( const Ptr<TrainData>& trainData, int flags )
{ {
Params dp(bparams.maxDepth, bparams.minSampleCount, bparams.regressionAccuracy,
bparams.useSurrogates, bparams.maxCategories, 0,
false, false, bparams.priors);
setDParams(dp);
startTraining(trainData, flags); startTraining(trainData, flags);
int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000; int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
vector<int> sidx = w->sidx; vector<int> sidx = w->sidx;
...@@ -426,12 +407,6 @@ public: ...@@ -426,12 +407,6 @@ public:
void readParams( const FileNode& fn ) void readParams( const FileNode& fn )
{ {
DTreesImpl::readParams(fn); DTreesImpl::readParams(fn);
bparams.maxDepth = params0.maxDepth;
bparams.minSampleCount = params0.minSampleCount;
bparams.regressionAccuracy = params0.regressionAccuracy;
bparams.useSurrogates = params0.useSurrogates;
bparams.maxCategories = params0.maxCategories;
bparams.priors = params0.priors;
FileNode tparams_node = fn["training_params"]; FileNode tparams_node = fn["training_params"];
// check for old layout // check for old layout
...@@ -465,7 +440,7 @@ public: ...@@ -465,7 +440,7 @@ public:
} }
} }
Boost::Params bparams; BoostTreeParams bparams;
vector<double> sumResult; vector<double> sumResult;
}; };
...@@ -476,6 +451,20 @@ public: ...@@ -476,6 +451,20 @@ public:
BoostImpl() {} BoostImpl() {}
virtual ~BoostImpl() {} virtual ~BoostImpl() {}
CV_IMPL_PROPERTY(int, BoostType, impl.bparams.boostType)
CV_IMPL_PROPERTY(int, WeakCount, impl.bparams.weakCount)
CV_IMPL_PROPERTY(double, WeightTrimRate, impl.bparams.weightTrimRate)
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
String getDefaultModelName() const { return "opencv_ml_boost"; } String getDefaultModelName() const { return "opencv_ml_boost"; }
bool train( const Ptr<TrainData>& trainData, int flags ) bool train( const Ptr<TrainData>& trainData, int flags )
...@@ -498,9 +487,6 @@ public: ...@@ -498,9 +487,6 @@ public:
impl.read(fn); impl.read(fn);
} }
void setBParams(const Params& p) { impl.setBParams(p); }
Params getBParams() const { return impl.getBParams(); }
int getVarCount() const { return impl.getVarCount(); } int getVarCount() const { return impl.getVarCount(); }
bool isTrained() const { return impl.isTrained(); } bool isTrained() const { return impl.isTrained(); }
...@@ -515,11 +501,9 @@ public: ...@@ -515,11 +501,9 @@ public:
}; };
Ptr<Boost> Boost::create(const Params& params) Ptr<Boost> Boost::create()
{ {
Ptr<BoostImpl> p = makePtr<BoostImpl>(); return makePtr<BoostImpl>();
p->setBParams(params);
return p;
} }
}} }}
......
This diff is collapsed.
...@@ -50,46 +50,33 @@ ...@@ -50,46 +50,33 @@
namespace cv { namespace cv {
namespace ml { namespace ml {
KNearest::Params::Params(int k, bool isclassifier_, int Emax_, int algorithmType_) : const String NAME_BRUTE_FORCE = "opencv_ml_knn";
defaultK(k), const String NAME_KDTREE = "opencv_ml_knn_kd";
isclassifier(isclassifier_),
Emax(Emax_),
algorithmType(algorithmType_)
{
}
class KNearestImpl : public KNearest class Impl
{ {
public: public:
KNearestImpl(const Params& p) Impl()
{
params = p;
}
virtual ~KNearestImpl() {}
Params getParams() const { return params; }
void setParams(const Params& p) { params = p; }
bool isClassifier() const { return params.isclassifier; }
bool isTrained() const { return !samples.empty(); }
String getDefaultModelName() const { return "opencv_ml_knn"; }
void clear()
{ {
samples.release(); defaultK = 10;
responses.release(); isclassifier = true;
Emax = INT_MAX;
} }
int getVarCount() const { return samples.cols; } virtual ~Impl() {}
virtual String getModelName() const = 0;
virtual int getType() const = 0;
virtual float findNearest( InputArray _samples, int k,
OutputArray _results,
OutputArray _neighborResponses,
OutputArray _dists ) const = 0;
bool train( const Ptr<TrainData>& data, int flags ) bool train( const Ptr<TrainData>& data, int flags )
{ {
Mat new_samples = data->getTrainSamples(ROW_SAMPLE); Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
Mat new_responses; Mat new_responses;
data->getTrainResponses().convertTo(new_responses, CV_32F); data->getTrainResponses().convertTo(new_responses, CV_32F);
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty(); bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty();
CV_Assert( new_samples.type() == CV_32F ); CV_Assert( new_samples.type() == CV_32F );
...@@ -106,9 +93,53 @@ public: ...@@ -106,9 +93,53 @@ public:
samples.push_back(new_samples); samples.push_back(new_samples);
responses.push_back(new_responses); responses.push_back(new_responses);
doTrain(samples);
return true; return true;
} }
virtual void doTrain(InputArray points) { (void)points; }
void clear()
{
samples.release();
responses.release();
}
void read( const FileNode& fn )
{
clear();
isclassifier = (int)fn["is_classifier"] != 0;
defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
}
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)isclassifier;
fs << "default_k" << defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
}
public:
int defaultK;
bool isclassifier;
int Emax;
Mat samples;
Mat responses;
};
class BruteForceImpl : public Impl
{
public:
String getModelName() const { return NAME_BRUTE_FORCE; }
int getType() const { return ml::KNearest::BRUTE_FORCE; }
void findNearestCore( const Mat& _samples, int k0, const Range& range, void findNearestCore( const Mat& _samples, int k0, const Range& range,
Mat* results, Mat* neighbor_responses, Mat* results, Mat* neighbor_responses,
Mat* dists, float* presult ) const Mat* dists, float* presult ) const
...@@ -199,7 +230,7 @@ public: ...@@ -199,7 +230,7 @@ public:
if( results || testidx+range.start == 0 ) if( results || testidx+range.start == 0 )
{ {
if( !params.isclassifier || k == 1 ) if( !isclassifier || k == 1 )
{ {
float s = 0.f; float s = 0.f;
for( j = 0; j < k; j++ ) for( j = 0; j < k; j++ )
...@@ -251,7 +282,7 @@ public: ...@@ -251,7 +282,7 @@ public:
struct findKNearestInvoker : public ParallelLoopBody struct findKNearestInvoker : public ParallelLoopBody
{ {
findKNearestInvoker(const KNearestImpl* _p, int _k, const Mat& __samples, findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples,
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult) Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
{ {
p = _p; p = _p;
...@@ -273,7 +304,7 @@ public: ...@@ -273,7 +304,7 @@ public:
} }
} }
const KNearestImpl* p; const BruteForceImpl* p;
int k; int k;
const Mat* _samples; const Mat* _samples;
Mat* _results; Mat* _results;
...@@ -324,88 +355,18 @@ public: ...@@ -324,88 +355,18 @@ public:
//invoker(Range(0, testcount)); //invoker(Range(0, testcount));
return result; return result;
} }
float predict(InputArray inputs, OutputArray outputs, int) const
{
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
}
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)params.isclassifier;
fs << "default_k" << params.defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
}
void read( const FileNode& fn )
{
clear();
params.isclassifier = (int)fn["is_classifier"] != 0;
params.defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
}
Mat samples;
Mat responses;
Params params;
}; };
class KNearestKDTreeImpl : public KNearest class KDTreeImpl : public Impl
{ {
public: public:
KNearestKDTreeImpl(const Params& p) String getModelName() const { return NAME_KDTREE; }
{ int getType() const { return ml::KNearest::KDTREE; }
params = p;
}
virtual ~KNearestKDTreeImpl() {}
Params getParams() const { return params; }
void setParams(const Params& p) { params = p; }
bool isClassifier() const { return params.isclassifier; }
bool isTrained() const { return !samples.empty(); }
String getDefaultModelName() const { return "opencv_ml_knn_kd"; } void doTrain(InputArray points)
void clear()
{
samples.release();
responses.release();
}
int getVarCount() const { return samples.cols; }
bool train( const Ptr<TrainData>& data, int flags )
{ {
Mat new_samples = data->getTrainSamples(ROW_SAMPLE); tr.build(points);
Mat new_responses;
data->getTrainResponses().convertTo(new_responses, CV_32F);
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
CV_Assert( new_samples.type() == CV_32F );
if( !update )
{
clear();
}
else
{
CV_Assert( new_samples.cols == samples.cols &&
new_responses.cols == responses.cols );
}
samples.push_back(new_samples);
responses.push_back(new_responses);
tr.build(samples);
return true;
} }
float findNearest( InputArray _samples, int k, float findNearest( InputArray _samples, int k,
...@@ -460,51 +421,97 @@ public: ...@@ -460,51 +421,97 @@ public:
{ {
_d = d.row(i); _d = d.row(i);
} }
tr.findNearest(test_samples.row(i), k, params.Emax, _res, _nr, _d, noArray()); tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray());
} }
return result; // currently always 0 return result; // currently always 0
} }
float predict(InputArray inputs, OutputArray outputs, int) const KDTree tr;
};
//================================================================
class KNearestImpl : public KNearest
{
CV_IMPL_PROPERTY(int, DefaultK, impl->defaultK)
CV_IMPL_PROPERTY(bool, IsClassifier, impl->isclassifier)
CV_IMPL_PROPERTY(int, Emax, impl->Emax)
public:
int getAlgorithmType() const
{
return impl->getType();
}
void setAlgorithmType(int val)
{ {
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() ); if (val != BRUTE_FORCE && val != KDTREE)
val = BRUTE_FORCE;
initImpl(val);
} }
void write( FileStorage& fs ) const public:
KNearestImpl()
{
initImpl(BRUTE_FORCE);
}
~KNearestImpl()
{ {
fs << "is_classifier" << (int)params.isclassifier; }
fs << "default_k" << params.defaultK;
fs << "samples" << samples; bool isClassifier() const { return impl->isclassifier; }
fs << "responses" << responses; bool isTrained() const { return !impl->samples.empty(); }
int getVarCount() const { return impl->samples.cols; }
void write( FileStorage& fs ) const
{
impl->write(fs);
} }
void read( const FileNode& fn ) void read( const FileNode& fn )
{ {
clear(); int algorithmType = BRUTE_FORCE;
params.isclassifier = (int)fn["is_classifier"] != 0; if (fn.name() == NAME_KDTREE)
params.defaultK = (int)fn["default_k"]; algorithmType = KDTREE;
initImpl(algorithmType);
impl->read(fn);
}
fn["samples"] >> samples; float findNearest( InputArray samples, int k,
fn["responses"] >> responses; OutputArray results,
OutputArray neighborResponses=noArray(),
OutputArray dist=noArray() ) const
{
return impl->findNearest(samples, k, results, neighborResponses, dist);
} }
KDTree tr; float predict(InputArray inputs, OutputArray outputs, int) const
{
return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() );
}
Mat samples; bool train( const Ptr<TrainData>& data, int flags )
Mat responses; {
Params params; return impl->train(data, flags);
}; }
Ptr<KNearest> KNearest::create(const Params& p) String getDefaultModelName() const { return impl->getModelName(); }
{
if (KDTREE==p.algorithmType) protected:
void initImpl(int algorithmType)
{ {
return makePtr<KNearestKDTreeImpl>(p); if (algorithmType != KDTREE)
impl = makePtr<BruteForceImpl>();
else
impl = makePtr<KDTreeImpl>();
} }
Ptr<Impl> impl;
};
return makePtr<KNearestImpl>(p); Ptr<KNearest> KNearest::create()
{
return makePtr<KNearestImpl>();
} }
} }
......
...@@ -60,31 +60,41 @@ using namespace std; ...@@ -60,31 +60,41 @@ using namespace std;
namespace cv { namespace cv {
namespace ml { namespace ml {
LogisticRegression::Params::Params(double learning_rate, class LrParams
int iters,
int method,
int normlization,
int reg,
int batch_size)
{ {
alpha = learning_rate; public:
num_iters = iters; LrParams()
norm = normlization; {
regularized = reg; alpha = 0.001;
train_method = method; num_iters = 1000;
mini_batch_size = batch_size; norm = LogisticRegression::REG_L2;
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha); train_method = LogisticRegression::BATCH;
} mini_batch_size = 1;
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
}
double alpha; //!< learning rate.
int num_iters; //!< number of iterations.
int norm;
int train_method;
int mini_batch_size;
TermCriteria term_crit;
};
class LogisticRegressionImpl : public LogisticRegression class LogisticRegressionImpl : public LogisticRegression
{ {
public: public:
LogisticRegressionImpl(const Params& pms)
: params(pms) LogisticRegressionImpl() { }
{
}
virtual ~LogisticRegressionImpl() {} virtual ~LogisticRegressionImpl() {}
CV_IMPL_PROPERTY(double, LearningRate, params.alpha)
CV_IMPL_PROPERTY(int, Iterations, params.num_iters)
CV_IMPL_PROPERTY(int, Regularization, params.norm)
CV_IMPL_PROPERTY(int, TrainMethod, params.train_method)
CV_IMPL_PROPERTY(int, MiniBatchSize, params.mini_batch_size)
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.term_crit)
virtual bool train( const Ptr<TrainData>& trainData, int=0 ); virtual bool train( const Ptr<TrainData>& trainData, int=0 );
virtual float predict(InputArray samples, OutputArray results, int) const; virtual float predict(InputArray samples, OutputArray results, int) const;
virtual void clear(); virtual void clear();
...@@ -103,7 +113,7 @@ protected: ...@@ -103,7 +113,7 @@ protected:
bool set_label_map(const Mat& _labels_i); bool set_label_map(const Mat& _labels_i);
Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const; Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
protected: protected:
Params params; LrParams params;
Mat learnt_thetas; Mat learnt_thetas;
map<int, int> forward_mapper; map<int, int> forward_mapper;
map<int, int> reverse_mapper; map<int, int> reverse_mapper;
...@@ -111,9 +121,9 @@ protected: ...@@ -111,9 +121,9 @@ protected:
Mat labels_n; Mat labels_n;
}; };
Ptr<LogisticRegression> LogisticRegression::create(const Params& params) Ptr<LogisticRegression> LogisticRegression::create()
{ {
return makePtr<LogisticRegressionImpl>(params); return makePtr<LogisticRegressionImpl>();
} }
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int) bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
...@@ -312,7 +322,7 @@ double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels ...@@ -312,7 +322,7 @@ double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels
theta_b = _init_theta(Range(1, n), Range::all()); theta_b = _init_theta(Range(1, n), Range::all());
multiply(theta_b, theta_b, theta_c, 1); multiply(theta_b, theta_b, theta_c, 1);
if(this->params.regularized > 0) if(params.norm != REG_NONE)
{ {
llambda = 1; llambda = 1;
} }
...@@ -367,7 +377,7 @@ Mat LogisticRegressionImpl::compute_batch_gradient(const Mat& _data, const Mat& ...@@ -367,7 +377,7 @@ Mat LogisticRegressionImpl::compute_batch_gradient(const Mat& _data, const Mat&
m = _data.rows; m = _data.rows;
n = _data.cols; n = _data.cols;
if(this->params.regularized > 0) if(params.norm != REG_NONE)
{ {
llambda = 1; llambda = 1;
} }
...@@ -439,7 +449,7 @@ Mat LogisticRegressionImpl::compute_mini_batch_gradient(const Mat& _data, const ...@@ -439,7 +449,7 @@ Mat LogisticRegressionImpl::compute_mini_batch_gradient(const Mat& _data, const
Mat data_d; Mat data_d;
Mat labels_l; Mat labels_l;
if(this->params.regularized > 0) if(params.norm != REG_NONE)
{ {
lambda_l = 1; lambda_l = 1;
} }
...@@ -570,7 +580,6 @@ void LogisticRegressionImpl::write(FileStorage& fs) const ...@@ -570,7 +580,6 @@ void LogisticRegressionImpl::write(FileStorage& fs) const
fs<<"alpha"<<this->params.alpha; fs<<"alpha"<<this->params.alpha;
fs<<"iterations"<<this->params.num_iters; fs<<"iterations"<<this->params.num_iters;
fs<<"norm"<<this->params.norm; fs<<"norm"<<this->params.norm;
fs<<"regularized"<<this->params.regularized;
fs<<"train_method"<<this->params.train_method; fs<<"train_method"<<this->params.train_method;
if(this->params.train_method == LogisticRegression::MINI_BATCH) if(this->params.train_method == LogisticRegression::MINI_BATCH)
{ {
...@@ -592,7 +601,6 @@ void LogisticRegressionImpl::read(const FileNode& fn) ...@@ -592,7 +601,6 @@ void LogisticRegressionImpl::read(const FileNode& fn)
this->params.alpha = (double)fn["alpha"]; this->params.alpha = (double)fn["alpha"];
this->params.num_iters = (int)fn["iterations"]; this->params.num_iters = (int)fn["iterations"];
this->params.norm = (int)fn["norm"]; this->params.norm = (int)fn["norm"];
this->params.regularized = (int)fn["regularized"];
this->params.train_method = (int)fn["train_method"]; this->params.train_method = (int)fn["train_method"];
if(this->params.train_method == LogisticRegression::MINI_BATCH) if(this->params.train_method == LogisticRegression::MINI_BATCH)
......
...@@ -43,7 +43,6 @@ ...@@ -43,7 +43,6 @@
namespace cv { namespace cv {
namespace ml { namespace ml {
NormalBayesClassifier::Params::Params() {}
class NormalBayesClassifierImpl : public NormalBayesClassifier class NormalBayesClassifierImpl : public NormalBayesClassifier
{ {
...@@ -53,9 +52,6 @@ public: ...@@ -53,9 +52,6 @@ public:
nallvars = 0; nallvars = 0;
} }
void setParams(const Params&) {}
Params getParams() const { return Params(); }
bool train( const Ptr<TrainData>& trainData, int flags ) bool train( const Ptr<TrainData>& trainData, int flags )
{ {
const float min_variation = FLT_EPSILON; const float min_variation = FLT_EPSILON;
...@@ -455,7 +451,7 @@ public: ...@@ -455,7 +451,7 @@ public:
}; };
Ptr<NormalBayesClassifier> NormalBayesClassifier::create(const Params&) Ptr<NormalBayesClassifier> NormalBayesClassifier::create()
{ {
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>(); Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
return p; return p;
......
...@@ -120,6 +120,91 @@ namespace ml ...@@ -120,6 +120,91 @@ namespace ml
return termCrit; return termCrit;
} }
struct TreeParams
{
TreeParams();
TreeParams( int maxDepth, int minSampleCount,
double regressionAccuracy, bool useSurrogates,
int maxCategories, int CVFolds,
bool use1SERule, bool truncatePrunedTree,
const Mat& priors );
inline void setMaxCategories(int val)
{
if( val < 2 )
CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" );
maxCategories = std::min(val, 15 );
}
inline void setMaxDepth(int val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" );
maxDepth = std::min( val, 25 );
}
inline void setMinSampleCount(int val)
{
minSampleCount = std::max(val, 1);
}
inline void setCVFolds(int val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange,
"params.CVFolds should be =0 (the tree is not pruned) "
"or n>0 (tree is pruned using n-fold cross-validation)" );
if( val == 1 )
val = 0;
CVFolds = val;
}
inline void setRegressionAccuracy(float val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
regressionAccuracy = val;
}
inline int getMaxCategories() const { return maxCategories; }
inline int getMaxDepth() const { return maxDepth; }
inline int getMinSampleCount() const { return minSampleCount; }
inline int getCVFolds() const { return CVFolds; }
inline float getRegressionAccuracy() const { return regressionAccuracy; }
CV_IMPL_PROPERTY(bool, UseSurrogates, useSurrogates)
CV_IMPL_PROPERTY(bool, Use1SERule, use1SERule)
CV_IMPL_PROPERTY(bool, TruncatePrunedTree, truncatePrunedTree)
CV_IMPL_PROPERTY_S(cv::Mat, Priors, priors)
public:
bool useSurrogates;
bool use1SERule;
bool truncatePrunedTree;
Mat priors;
protected:
int maxCategories;
int maxDepth;
int minSampleCount;
int CVFolds;
float regressionAccuracy;
};
struct RTreeParams
{
RTreeParams();
RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit );
bool calcVarImportance;
int nactiveVars;
TermCriteria termCrit;
};
struct BoostTreeParams
{
BoostTreeParams();
BoostTreeParams(int boostType, int weakCount, double weightTrimRate);
int boostType;
int weakCount;
double weightTrimRate;
};
class DTreesImpl : public DTrees class DTreesImpl : public DTrees
{ {
public: public:
...@@ -191,6 +276,16 @@ namespace ml ...@@ -191,6 +276,16 @@ namespace ml
int maxSubsetSize; int maxSubsetSize;
}; };
CV_WRAP_SAME_PROPERTY(int, MaxCategories, params)
CV_WRAP_SAME_PROPERTY(int, MaxDepth, params)
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, params)
CV_WRAP_SAME_PROPERTY(int, CVFolds, params)
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, params)
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, params)
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, params)
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, params)
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, params)
DTreesImpl(); DTreesImpl();
virtual ~DTreesImpl(); virtual ~DTreesImpl();
virtual void clear(); virtual void clear();
...@@ -202,8 +297,7 @@ namespace ml ...@@ -202,8 +297,7 @@ namespace ml
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; } int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; } int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
virtual void setDParams(const Params& _params); virtual void setDParams(const TreeParams& _params);
virtual Params getDParams() const;
virtual void startTraining( const Ptr<TrainData>& trainData, int flags ); virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
virtual void endTraining(); virtual void endTraining();
virtual void initCompVarIdx(); virtual void initCompVarIdx();
...@@ -250,7 +344,7 @@ namespace ml ...@@ -250,7 +344,7 @@ namespace ml
virtual const std::vector<Split>& getSplits() const { return splits; } virtual const std::vector<Split>& getSplits() const { return splits; }
virtual const std::vector<int>& getSubsets() const { return subsets; } virtual const std::vector<int>& getSubsets() const { return subsets; }
Params params0, params; TreeParams params;
vector<int> varIdx; vector<int> varIdx;
vector<int> compVarIdx; vector<int> compVarIdx;
......
...@@ -48,21 +48,16 @@ namespace ml { ...@@ -48,21 +48,16 @@ namespace ml {
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
// Random trees // // Random trees //
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
RTrees::Params::Params() RTreeParams::RTreeParams()
: DTrees::Params(5, 10, 0.f, false, 10, 0, false, false, Mat())
{ {
calcVarImportance = false; calcVarImportance = false;
nactiveVars = 0; nactiveVars = 0;
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1); termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
} }
RTrees::Params::Params( int _maxDepth, int _minSampleCount, RTreeParams::RTreeParams(bool _calcVarImportance,
double _regressionAccuracy, bool _useSurrogates, int _nactiveVars,
int _maxCategories, const Mat& _priors, TermCriteria _termCrit )
bool _calcVarImportance, int _nactiveVars,
TermCriteria _termCrit )
: DTrees::Params(_maxDepth, _minSampleCount, _regressionAccuracy, _useSurrogates,
_maxCategories, 0, false, false, _priors)
{ {
calcVarImportance = _calcVarImportance; calcVarImportance = _calcVarImportance;
nactiveVars = _nactiveVars; nactiveVars = _nactiveVars;
...@@ -73,18 +68,19 @@ RTrees::Params::Params( int _maxDepth, int _minSampleCount, ...@@ -73,18 +68,19 @@ RTrees::Params::Params( int _maxDepth, int _minSampleCount,
class DTreesImplForRTrees : public DTreesImpl class DTreesImplForRTrees : public DTreesImpl
{ {
public: public:
DTreesImplForRTrees() {} DTreesImplForRTrees()
virtual ~DTreesImplForRTrees() {}
void setRParams(const RTrees::Params& p)
{
rparams = p;
}
RTrees::Params getRParams() const
{ {
return rparams; params.setMaxDepth(5);
params.setMinSampleCount(10);
params.setRegressionAccuracy(0.f);
params.useSurrogates = false;
params.setMaxCategories(10);
params.setCVFolds(0);
params.use1SERule = false;
params.truncatePrunedTree = false;
params.priors = Mat();
} }
virtual ~DTreesImplForRTrees() {}
void clear() void clear()
{ {
...@@ -129,10 +125,6 @@ public: ...@@ -129,10 +125,6 @@ public:
bool train( const Ptr<TrainData>& trainData, int flags ) bool train( const Ptr<TrainData>& trainData, int flags )
{ {
Params dp(rparams.maxDepth, rparams.minSampleCount, rparams.regressionAccuracy,
rparams.useSurrogates, rparams.maxCategories, rparams.CVFolds,
rparams.use1SERule, rparams.truncatePrunedTree, rparams.priors);
setDParams(dp);
startTraining(trainData, flags); startTraining(trainData, flags);
int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ? int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
rparams.termCrit.maxCount : 10000; rparams.termCrit.maxCount : 10000;
...@@ -326,12 +318,6 @@ public: ...@@ -326,12 +318,6 @@ public:
void readParams( const FileNode& fn ) void readParams( const FileNode& fn )
{ {
DTreesImpl::readParams(fn); DTreesImpl::readParams(fn);
rparams.maxDepth = params0.maxDepth;
rparams.minSampleCount = params0.minSampleCount;
rparams.regressionAccuracy = params0.regressionAccuracy;
rparams.useSurrogates = params0.useSurrogates;
rparams.maxCategories = params0.maxCategories;
rparams.priors = params0.priors;
FileNode tparams_node = fn["training_params"]; FileNode tparams_node = fn["training_params"];
rparams.nactiveVars = (int)tparams_node["nactive_vars"]; rparams.nactiveVars = (int)tparams_node["nactive_vars"];
...@@ -361,7 +347,7 @@ public: ...@@ -361,7 +347,7 @@ public:
} }
} }
RTrees::Params rparams; RTreeParams rparams;
double oobError; double oobError;
vector<float> varImportance; vector<float> varImportance;
vector<int> allVars, activeVars; vector<int> allVars, activeVars;
...@@ -372,6 +358,20 @@ public: ...@@ -372,6 +358,20 @@ public:
class RTreesImpl : public RTrees class RTreesImpl : public RTrees
{ {
public: public:
CV_IMPL_PROPERTY(bool, CalculateVarImportance, impl.rparams.calcVarImportance)
CV_IMPL_PROPERTY(int, ActiveVarCount, impl.rparams.nactiveVars)
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, impl.rparams.termCrit)
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
RTreesImpl() {} RTreesImpl() {}
virtual ~RTreesImpl() {} virtual ~RTreesImpl() {}
...@@ -397,9 +397,6 @@ public: ...@@ -397,9 +397,6 @@ public:
impl.read(fn); impl.read(fn);
} }
void setRParams(const Params& p) { impl.setRParams(p); }
Params getRParams() const { return impl.getRParams(); }
Mat getVarImportance() const { return Mat_<float>(impl.varImportance, true); } Mat getVarImportance() const { return Mat_<float>(impl.varImportance, true); }
int getVarCount() const { return impl.getVarCount(); } int getVarCount() const { return impl.getVarCount(); }
...@@ -415,11 +412,9 @@ public: ...@@ -415,11 +412,9 @@ public:
}; };
Ptr<RTrees> RTrees::create(const Params& params) Ptr<RTrees> RTrees::create()
{ {
Ptr<RTreesImpl> p = makePtr<RTreesImpl>(); return makePtr<RTreesImpl>();
p->setRParams(params);
return p;
} }
}} }}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -95,16 +95,13 @@ void CV_LRTest::run( int /*start_from*/ ) ...@@ -95,16 +95,13 @@ void CV_LRTest::run( int /*start_from*/ )
string dataFileName = ts->get_data_path() + "iris.data"; string dataFileName = ts->get_data_path() + "iris.data";
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0); Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
LogisticRegression::Params params = LogisticRegression::Params();
params.alpha = 1.0;
params.num_iters = 10001;
params.norm = LogisticRegression::REG_L2;
params.regularized = 1;
params.train_method = LogisticRegression::BATCH;
params.mini_batch_size = 10;
// run LR classifier train classifier // run LR classifier train classifier
Ptr<LogisticRegression> p = LogisticRegression::create(params); Ptr<LogisticRegression> p = LogisticRegression::create();
p->setLearningRate(1.0);
p->setIterations(10001);
p->setRegularization(LogisticRegression::REG_L2);
p->setTrainMethod(LogisticRegression::BATCH);
p->setMiniBatchSize(10);
p->train(tdata); p->train(tdata);
// predict using the same data // predict using the same data
...@@ -157,20 +154,17 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ ) ...@@ -157,20 +154,17 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
Mat responses1, responses2; Mat responses1, responses2;
Mat learnt_mat1, learnt_mat2; Mat learnt_mat1, learnt_mat2;
LogisticRegression::Params params1 = LogisticRegression::Params();
params1.alpha = 1.0;
params1.num_iters = 10001;
params1.norm = LogisticRegression::REG_L2;
params1.regularized = 1;
params1.train_method = LogisticRegression::BATCH;
params1.mini_batch_size = 10;
// train and save the classifier // train and save the classifier
String filename = tempfile(".xml"); String filename = tempfile(".xml");
try try
{ {
// run LR classifier train classifier // run LR classifier train classifier
Ptr<LogisticRegression> lr1 = LogisticRegression::create(params1); Ptr<LogisticRegression> lr1 = LogisticRegression::create();
lr1->setLearningRate(1.0);
lr1->setIterations(10001);
lr1->setRegularization(LogisticRegression::REG_L2);
lr1->setTrainMethod(LogisticRegression::BATCH);
lr1->setMiniBatchSize(10);
lr1->train(tdata); lr1->train(tdata);
lr1->predict(tdata->getSamples(), responses1); lr1->predict(tdata->getSamples(), responses1);
learnt_mat1 = lr1->get_learnt_thetas(); learnt_mat1 = lr1->get_learnt_thetas();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment