ml.hpp 85.5 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
10 11
//                           License Agreement
//                For Open Source Computer Vision Library
12 13
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
14
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
15
// Copyright (C) 2014, Itseez Inc, all rights reserved.
16 17 18 19 20 21 22 23 24 25 26 27
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
28
//   * The name of the copyright holders may not be used to endorse or promote products
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

sourin's avatar
sourin committed
44 45
#ifndef OPENCV_ML_HPP
#define OPENCV_ML_HPP
46

47 48 49 50
#ifdef __cplusplus
#  include "opencv2/core.hpp"
#endif

51 52
#ifdef __cplusplus

53
#include <float.h>
54 55 56
#include <map>
#include <iostream>

57 58 59
/**
  @defgroup ml Machine Learning

60 61
  The Machine Learning Library (MLL) is a set of classes and functions for statistical
  classification, regression, and clustering of data.
62

63 64 65 66
  Most of the classification and regression algorithms are implemented as C++ classes. As the
  algorithms have different sets of features (like an ability to handle missing measurements or
  categorical input variables), there is a little common ground between the classes. This common
  ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from.
67

68
  See detailed overview here: @ref ml_intro.
69 70
 */

71 72
namespace cv
{
73

74 75
namespace ml
{
76

77 78 79
//! @addtogroup ml
//! @{

80 81
/** @brief Variable types */
enum VariableTypes
82
{
83 84 85
    VAR_NUMERICAL    =0, //!< same as VAR_ORDERED
    VAR_ORDERED      =0, //!< ordered variables
    VAR_CATEGORICAL  =1  //!< categorical variables
86 87
};

88 89
/** @brief %Error types */
enum ErrorTypes
90
{
91 92 93
    TEST_ERROR = 0,
    TRAIN_ERROR = 1
};
94

95 96
/** @brief Sample types */
enum SampleTypes
97
{
98 99
    ROW_SAMPLE = 0, //!< each training sample is a row of samples
    COL_SAMPLE = 1  //!< each training sample occupies a column of samples
100
};
101

102 103 104 105 106
/** @brief The structure represents the logarithmic grid range of statmodel parameters.

It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate
being computed by cross-validation.
 */
107
class CV_EXPORTS_W ParamGrid
108
{
109
public:
110
    /** @brief Default constructor */
111
    ParamGrid();
112
    /** @brief Constructor with parameters */
113
    ParamGrid(double _minVal, double _maxVal, double _logStep);
114

115 116
    CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0.
    CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0.
117 118 119 120 121 122 123 124
    /** @brief Logarithmic step for iterating the statmodel parameter.

    The grid determines the following iteration sequence of the statmodel parameter values:
    \f[(minVal, minVal*step, minVal*{step}^2, \dots,  minVal*{logStep}^n),\f]
    where \f$n\f$ is the maximal index satisfying
    \f[\texttt{minVal} * \texttt{logStep} ^n <  \texttt{maxVal}\f]
    The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.
    */
125 126 127 128 129 130 131 132 133
    CV_PROP_RW double logStep;

    /** @brief Creates a ParamGrid Ptr that can be given to the %SVM::trainAuto method

    @param minVal minimum value of the parameter grid
    @param maxVal maximum value of the parameter grid
    @param logstep Logarithmic step for iterating the statmodel parameter
    */
    CV_WRAP static Ptr<ParamGrid> create(double minVal=0., double maxVal=0., double logstep=1.);
134 135
};

136 137 138
/** @brief Class encapsulating training data.

Please note that the class only specifies the interface of training data, but not implementation.
139 140 141 142 143
All the statistical model classes in _ml_ module accepts Ptr\<TrainData\> as parameter. In other
words, you can create your own class derived from TrainData and pass smart pointer to the instance
of this class into StatModel::train.

@sa @ref ml_intro_data
144
 */
145
class CV_EXPORTS_W TrainData
146 147
{
public:
148 149 150
    static inline float missingValue() { return FLT_MAX; }
    virtual ~TrainData();

151 152 153 154 155 156
    CV_WRAP virtual int getLayout() const = 0;
    CV_WRAP virtual int getNTrainSamples() const = 0;
    CV_WRAP virtual int getNTestSamples() const = 0;
    CV_WRAP virtual int getNSamples() const = 0;
    CV_WRAP virtual int getNVars() const = 0;
    CV_WRAP virtual int getNAllVars() const = 0;
157

158 159 160
    CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0;
    CV_WRAP virtual Mat getSamples() const = 0;
    CV_WRAP virtual Mat getMissing() const = 0;
161 162 163 164

    /** @brief Returns matrix of train samples

    @param layout The requested layout. If it's different from the initial one, the matrix is
165
        transposed. See ml::SampleTypes.
166
    @param compressSamples if true, the function returns only the training samples (specified by
167
        sampleIdx)
168
    @param compressVars if true, the function returns the shorter training samples, containing only
169
        the active variables.
170

171 172
    In current implementation the function tries to avoid physical data copying and returns the
    matrix stored inside TrainData (unless the transposition or compression is needed).
173
     */
174
    CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE,
175 176
                                bool compressSamples=true,
                                bool compressVars=true) const = 0;
177 178 179

    /** @brief Returns the vector of responses

180 181
    The function returns ordered or the original categorical responses. Usually it's used in
    regression algorithms.
182
     */
183
    CV_WRAP virtual Mat getTrainResponses() const = 0;
184 185 186

    /** @brief Returns the vector of normalized categorical responses

187 188
    The function returns vector of responses. Each response is integer from `0` to `<number of
    classes>-1`. The actual label value can be retrieved then from the class label vector, see
189 190
    TrainData::getClassLabels.
     */
191 192 193 194 195 196 197 198 199 200
    CV_WRAP virtual Mat getTrainNormCatResponses() const = 0;
    CV_WRAP virtual Mat getTestResponses() const = 0;
    CV_WRAP virtual Mat getTestNormCatResponses() const = 0;
    CV_WRAP virtual Mat getResponses() const = 0;
    CV_WRAP virtual Mat getNormCatResponses() const = 0;
    CV_WRAP virtual Mat getSampleWeights() const = 0;
    CV_WRAP virtual Mat getTrainSampleWeights() const = 0;
    CV_WRAP virtual Mat getTestSampleWeights() const = 0;
    CV_WRAP virtual Mat getVarIdx() const = 0;
    CV_WRAP virtual Mat getVarType() const = 0;
201
    CV_WRAP Mat getVarSymbolFlags() const;
202 203 204 205
    CV_WRAP virtual int getResponseType() const = 0;
    CV_WRAP virtual Mat getTrainSampleIdx() const = 0;
    CV_WRAP virtual Mat getTestSampleIdx() const = 0;
    CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0;
206
    virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0;
207 208 209
    CV_WRAP virtual Mat getDefaultSubstValues() const = 0;

    CV_WRAP virtual int getCatCount(int vi) const = 0;
210 211 212 213 214

    /** @brief Returns the vector of class labels

    The function returns vector of unique labels occurred in the responses.
     */
215
    CV_WRAP virtual Mat getClassLabels() const = 0;
216

217 218
    CV_WRAP virtual Mat getCatOfs() const = 0;
    CV_WRAP virtual Mat getCatMap() const = 0;
219

220 221 222
    /** @brief Splits the training data into the training and test parts
    @sa TrainData::setTrainTestSplitRatio
     */
223
    CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0;
224 225 226

    /** @brief Splits the training data into the training and test parts

227 228 229 230 231
    The function selects a subset of specified relative size and then returns it as the training
    set. If the function is not called, all the data is used for training. Please, note that for
    each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test
    subset can be retrieved and processed as well.
    @sa TrainData::setTrainTestSplit
232
     */
233 234
    CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0;
    CV_WRAP virtual void shuffleTrainTest() = 0;
235

236 237
    /** @brief Returns matrix of test samples */
    CV_WRAP Mat getTestSamples() const;
ahb's avatar
ahb committed
238

239 240 241
    /** @brief Returns vector of symbolic names captured in loadFromCSV() */
    CV_WRAP void getNames(std::vector<String>& names) const;

242
    CV_WRAP static Mat getSubVector(const Mat& vec, const Mat& idx);
243 244 245 246 247

    /** @brief Reads the dataset from a .csv file and returns the ready-to-use training data.

    @param filename The input file name
    @param headerLineCount The number of lines in the beginning to skip; besides the header, the
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
        function also skips empty lines and lines staring with `#`
    @param responseStartIdx Index of the first output variable. If -1, the function considers the
        last variable as the response
    @param responseEndIdx Index of the last output variable + 1. If -1, then there is single
        response variable at responseStartIdx.
    @param varTypeSpec The optional text string that specifies the variables' types. It has the
        format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2`
        (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are
        considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]`
        should cover all the variables. If varTypeSpec is not specified, then algorithm uses the
        following rules:
        - all input variables are considered ordered by default. If some column contains has non-
          numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding
          variable is considered categorical.
        - if there are several output variables, they are all considered as ordered. Error is
          reported when non-numerical values are used.
        - if there is a single output variable, then if its values are non-numerical or are all
          integers, then it's considered categorical. Otherwise, it's considered ordered.
266 267
    @param delimiter The character used to separate values in each line.
    @param missch The character used to specify missing measurements. It should not be a digit.
268 269
        Although it's a non-numerical value, it surely does not affect the decision of whether the
        variable ordered or categorical.
Lorena García's avatar
Lorena García committed
270 271
    @note If the dataset only contains input variables and no responses, use responseStartIdx = -2
        and responseEndIdx = 0. The output variables vector will just contain zeros.
272
     */
273 274 275 276 277 278 279
    static Ptr<TrainData> loadFromCSV(const String& filename,
                                      int headerLineCount,
                                      int responseStartIdx=-1,
                                      int responseEndIdx=-1,
                                      const String& varTypeSpec=String(),
                                      char delimiter=',',
                                      char missch='?');
280

281 282
    /** @brief Creates training data from in-memory arrays.

283
    @param samples matrix of samples. It should have CV_32F type.
284
    @param layout see ml::SampleTypes.
285
    @param responses matrix of responses. If the responses are scalar, they should be stored as a
286 287 288
        single row or as a single column. The matrix should have type CV_32F or CV_32S (in the
        former case the responses are considered as ordered by default; in the latter case - as
        categorical)
289
    @param varIdx vector specifying which variables to use for training. It can be an integer vector
290 291 292 293 294
        (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of
        active variables.
    @param sampleIdx vector specifying which samples to use for training. It can be an integer
        vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask
        of training samples.
295
    @param sampleWeights optional vector with weights for each sample. It should have CV_32F type.
296 297 298
    @param varType optional vector of type CV_8U and size `<number_of_variables_in_samples> +
        <number_of_variables_in_responses>`, containing types of each input and output variable. See
        ml::VariableTypes.
299
     */
mshabunin's avatar
mshabunin committed
300
    CV_WRAP static Ptr<TrainData> create(InputArray samples, int layout, InputArray responses,
301 302 303 304
                                 InputArray varIdx=noArray(), InputArray sampleIdx=noArray(),
                                 InputArray sampleWeights=noArray(), InputArray varType=noArray());
};

305 306
/** @brief Base class for statistical models in OpenCV ML.
 */
307 308 309
class CV_EXPORTS_W StatModel : public Algorithm
{
public:
310 311 312 313 314 315 316
    /** Predict options */
    enum Flags {
        UPDATE_MODEL = 1,
        RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label
        COMPRESSED_INPUT=2,
        PREPROCESSED_INPUT=4
    };
317

318
    /** @brief Returns the number of variables in training samples */
319
    CV_WRAP virtual int getVarCount() const = 0;
320

321 322
    CV_WRAP virtual bool empty() const;

323
    /** @brief Returns true if the model is trained */
324
    CV_WRAP virtual bool isTrained() const = 0;
325
    /** @brief Returns true if the model is classifier */
326
    CV_WRAP virtual bool isClassifier() const = 0;
327

328 329 330
    /** @brief Trains the statistical model

    @param trainData training data that can be loaded from file using TrainData::loadFromCSV or
331
        created with TrainData::create.
332
    @param flags optional flags, depending on the model. Some of the models can be updated with the
333
        new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
334
     */
mshabunin's avatar
mshabunin committed
335
    CV_WRAP virtual bool train( const Ptr<TrainData>& trainData, int flags=0 );
336 337 338

    /** @brief Trains the statistical model

339
    @param samples training samples
340
    @param layout See ml::SampleTypes.
341 342
    @param responses vector of responses associated with the training samples.
    */
343
    CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses );
344 345 346 347 348

    /** @brief Computes error on the training or test dataset

    @param data the training data
    @param test if true, the error is computed over the test subset of the data, otherwise it's
349 350 351 352
        computed over the training subset of the data. Please note that if you loaded a completely
        different dataset to evaluate already trained classifier, you will probably want not to set
        the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so
        that the error is computed for the whole new set. Yes, this sounds a bit confusing.
353 354
    @param resp the optional output responses.

355 356
    The method uses StatModel::predict to compute the error. For regression models the error is
    computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).
357
     */
mshabunin's avatar
mshabunin committed
358
    CV_WRAP virtual float calcError( const Ptr<TrainData>& data, bool test, OutputArray resp ) const;
359 360 361 362 363

    /** @brief Predicts response(s) for the provided sample(s)

    @param samples The input samples, floating-point matrix
    @param results The optional output matrix of results.
364
    @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.
365
     */
366
    CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
367

368
    /** @brief Create and train model with default parameters
369

370
    The class must implement static `create()` method with no parameters or with all default parameter values
371
    */
mshabunin's avatar
mshabunin committed
372
    template<typename _Tp> static Ptr<_Tp> train(const Ptr<TrainData>& data, int flags=0)
373
    {
374 375
        Ptr<_Tp> model = _Tp::create();
        return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>();
376
    }
377 378 379 380 381 382
};

/****************************************************************************************\
*                                 Normal Bayes Classifier                                *
\****************************************************************************************/

383
/** @brief Bayes classifier for normally distributed data.
384 385

@sa @ref ml_intro_bayes
386
 */
387
class CV_EXPORTS_W NormalBayesClassifier : public StatModel
388 389
{
public:
390 391
    /** @brief Predicts the response for sample(s).

392 393 394 395 396
    The method estimates the most probable classes for input vectors. Input vectors (one or more)
    are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one
    output vector outputs. The predicted class for a single input vector is returned by the method.
    The vector outputProbs contains the output probabilities corresponding to each element of
    result.
397
     */
398
    CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs,
399
                               OutputArray outputProbs, int flags=0 ) const = 0;
400

401 402
    /** Creates empty model
    Use StatModel::train to train the model after creation. */
403
    CV_WRAP static Ptr<NormalBayesClassifier> create();
404 405 406 407 408 409 410 411 412 413 414

    /** @brief Loads and creates a serialized NormalBayesClassifier from a file
     *
     * Use NormalBayesClassifier::save to serialize and store an NormalBayesClassifier to disk.
     * Load the NormalBayesClassifier from this file again, by calling this function with the path to the file.
     * Optionally specify the node for the file containing the classifier
     *
     * @param filepath path to serialized NormalBayesClassifier
     * @param nodeName name of node containing the classifier
     */
    CV_WRAP static Ptr<NormalBayesClassifier> load(const String& filepath , const String& nodeName = String());
415
};
416 417 418 419 420

/****************************************************************************************\
*                          K-Nearest Neighbour Classifier                                *
\****************************************************************************************/

421
/** @brief The class implements K-Nearest Neighbors model
422

423
@sa @ref ml_intro_knn
424
 */
425
class CV_EXPORTS_W KNearest : public StatModel
426 427
{
public:
428

429
    /** Default number of neighbors to use in predict method. */
430
    /** @see setDefaultK */
431
    CV_WRAP virtual int getDefaultK() const = 0;
432
    /** @copybrief getDefaultK @see getDefaultK */
433
    CV_WRAP virtual void setDefaultK(int val) = 0;
434 435

    /** Whether classification or regression model should be trained. */
436
    /** @see setIsClassifier */
437
    CV_WRAP virtual bool getIsClassifier() const = 0;
438
    /** @copybrief getIsClassifier @see getIsClassifier */
439
    CV_WRAP virtual void setIsClassifier(bool val) = 0;
440 441

    /** Parameter for KDTree implementation. */
442
    /** @see setEmax */
443
    CV_WRAP virtual int getEmax() const = 0;
444
    /** @copybrief getEmax @see getEmax */
445
    CV_WRAP virtual void setEmax(int val) = 0;
446 447

    /** %Algorithm type, one of KNearest::Types. */
448
    /** @see setAlgorithmType */
449
    CV_WRAP virtual int getAlgorithmType() const = 0;
450
    /** @copybrief getAlgorithmType @see getAlgorithmType */
451
    CV_WRAP virtual void setAlgorithmType(int val) = 0;
452 453 454 455

    /** @brief Finds the neighbors and predicts responses for input vectors.

    @param samples Input samples stored by rows. It is a single-precision floating-point matrix of
456
        `<number_of_samples> * k` size.
457 458
    @param k Number of used nearest neighbors. Should be greater than 1.
    @param results Vector with results of prediction (regression or classification) for each input
459 460 461 462 463 464 465 466
        sample. It is a single-precision floating-point vector with `<number_of_samples>` elements.
    @param neighborResponses Optional output values for corresponding neighbors. It is a single-
        precision floating-point matrix of `<number_of_samples> * k` size.
    @param dist Optional output distances from the input vectors to the corresponding neighbors. It
        is a single-precision floating-point matrix of `<number_of_samples> * k` size.

    For each input vector (a row of the matrix samples), the method finds the k nearest neighbors.
    In case of regression, the predicted result is a mean value of the particular vector's neighbor
467 468 469 470 471 472 473
    responses. In case of classification, the class is determined by voting.

    For each input vector, the neighbors are sorted by their distances to the vector.

    In case of C++ interface you can use output pointers to empty matrices and the function will
    allocate memory itself.

474 475
    If only a single input vector is passed, all output matrices are optional and the predicted
    value is returned by the method.
476 477 478

    The function is parallelized with the TBB library.
     */
479
    CV_WRAP virtual float findNearest( InputArray samples, int k,
480 481 482
                               OutputArray results,
                               OutputArray neighborResponses=noArray(),
                               OutputArray dist=noArray() ) const = 0;
483

484 485 486 487 488 489 490
    /** @brief Implementations of KNearest algorithm
       */
    enum Types
    {
        BRUTE_FORCE=1,
        KDTREE=2
    };
491

492 493
    /** @brief Creates the empty model

494
    The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.
495
     */
496
    CV_WRAP static Ptr<KNearest> create();
497 498 499 500 501 502
};

/****************************************************************************************\
*                                   Support Vector Machines                              *
\****************************************************************************************/

503 504
/** @brief Support Vector Machines.

505
@sa @ref ml_intro_svm
506
 */
507
class CV_EXPORTS_W SVM : public StatModel
508 509 510
{
public:

511 512 513 514 515 516
    class CV_EXPORTS Kernel : public Algorithm
    {
    public:
        virtual int getType() const = 0;
        virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0;
    };
517

518 519
    /** Type of a %SVM formulation.
    See SVM::Types. Default value is SVM::C_SVC. */
520
    /** @see setType */
521
    CV_WRAP virtual int getType() const = 0;
522
    /** @copybrief getType @see getType */
523
    CV_WRAP virtual void setType(int val) = 0;
524 525 526

    /** Parameter \f$\gamma\f$ of a kernel function.
    For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */
527
    /** @see setGamma */
528
    CV_WRAP virtual double getGamma() const = 0;
529
    /** @copybrief getGamma @see getGamma */
530
    CV_WRAP virtual void setGamma(double val) = 0;
531 532 533

    /** Parameter _coef0_ of a kernel function.
    For SVM::POLY or SVM::SIGMOID. Default value is 0.*/
534
    /** @see setCoef0 */
535
    CV_WRAP virtual double getCoef0() const = 0;
536
    /** @copybrief getCoef0 @see getCoef0 */
537
    CV_WRAP virtual void setCoef0(double val) = 0;
538 539 540

    /** Parameter _degree_ of a kernel function.
    For SVM::POLY. Default value is 0. */
541
    /** @see setDegree */
542
    CV_WRAP virtual double getDegree() const = 0;
543
    /** @copybrief getDegree @see getDegree */
544
    CV_WRAP virtual void setDegree(double val) = 0;
545 546 547

    /** Parameter _C_ of a %SVM optimization problem.
    For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */
548
    /** @see setC */
549
    CV_WRAP virtual double getC() const = 0;
550
    /** @copybrief getC @see getC */
551
    CV_WRAP virtual void setC(double val) = 0;
552 553 554

    /** Parameter \f$\nu\f$ of a %SVM optimization problem.
    For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */
555
    /** @see setNu */
556
    CV_WRAP virtual double getNu() const = 0;
557
    /** @copybrief getNu @see getNu */
558
    CV_WRAP virtual void setNu(double val) = 0;
559 560 561

    /** Parameter \f$\epsilon\f$ of a %SVM optimization problem.
    For SVM::EPS_SVR. Default value is 0. */
562
    /** @see setP */
563
    CV_WRAP virtual double getP() const = 0;
564
    /** @copybrief getP @see getP */
565
    CV_WRAP virtual void setP(double val) = 0;
566 567 568 569 570 571

    /** Optional weights in the SVM::C_SVC problem, assigned to particular classes.
    They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus
    these weights affect the misclassification penalty for different classes. The larger weight,
    the larger penalty on misclassification of data from the corresponding class. Default value is
    empty Mat. */
572
    /** @see setClassWeights */
573
    CV_WRAP virtual cv::Mat getClassWeights() const = 0;
574
    /** @copybrief getClassWeights @see getClassWeights */
575
    CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0;
576 577 578 579 580

    /** Termination criteria of the iterative %SVM training procedure which solves a partial
    case of constrained quadratic optimization problem.
    You can specify tolerance and/or the maximum number of iterations. Default value is
    `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */
581
    /** @see setTermCriteria */
582
    CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0;
583
    /** @copybrief getTermCriteria @see getTermCriteria */
584
    CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;
585 586 587

    /** Type of a %SVM kernel.
    See SVM::KernelTypes. Default value is SVM::RBF. */
588
    CV_WRAP virtual int getKernelType() const = 0;
589 590 591

    /** Initialize with one of predefined kernels.
    See SVM::KernelTypes. */
592
    CV_WRAP virtual void setKernel(int kernelType) = 0;
593 594 595 596 597

    /** Initialize with custom kernel.
    See SVM::Kernel class for implementation details */
    virtual void setCustomKernel(const Ptr<Kernel> &_kernel) = 0;

598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
    //! %SVM type
    enum Types {
        /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows
        imperfect separation of classes with penalty multiplier C for outliers. */
        C_SVC=100,
        /** \f$\nu\f$-Support Vector Classification. n-class classification with possible
        imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother
        the decision boundary) is used instead of C. */
        NU_SVC=101,
        /** Distribution Estimation (One-class %SVM). All the training data are from
        the same class, %SVM builds a boundary that separates the class from the rest of the feature
        space. */
        ONE_CLASS=102,
        /** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors
        from the training set and the fitting hyper-plane must be less than p. For outliers the
        penalty multiplier C is used. */
        EPS_SVR=103,
        /** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p.
        See @cite LibSVM for details. */
        NU_SVR=104
    };

    /** @brief %SVM kernel type
621

622 623 624 625 626 627 628
    A comparison of different kernels on the following 2D test case with four classes. Four
    SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three
    different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score.
    Bright means max-score \> 0, dark means max-score \< 0.
    ![image](pics/SVM_Comparison.png)
    */
    enum KernelTypes {
629
        /** Returned by SVM::getKernelType in case when custom kernel has been set */
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
        CUSTOM=-1,
        /** Linear kernel. No mapping is done, linear discrimination (or regression) is
        done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */
        LINEAR=0,
        /** Polynomial kernel:
        \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */
        POLY=1,
        /** Radial basis function (RBF), a good choice in most cases.
        \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */
        RBF=2,
        /** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */
        SIGMOID=3,
        /** Exponential Chi2 kernel, similar to the RBF kernel:
        \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */
        CHI2=4,
        /** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */
        INTER=5
    };
648

649 650 651 652 653 654 655 656 657
    //! %SVM params type
    enum ParamTypes {
        C=0,
        GAMMA=1,
        P=2,
        NU=3,
        COEF=4,
        DEGREE=5
    };
658

659
    /** @brief Trains an %SVM with optimal parameters.
660 661

    @param data the training data that can be constructed using TrainData::create or
662
        TrainData::loadFromCSV.
663
    @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One
664 665 666 667 668 669 670 671
        subset is used to test the model, the others form the train set. So, the %SVM algorithm is
        executed kFold times.
    @param Cgrid grid for C
    @param gammaGrid grid for gamma
    @param pGrid grid for p
    @param nuGrid grid for nu
    @param coeffGrid grid for coeff
    @param degreeGrid grid for degree
672
    @param balanced If true and the problem is 2-class classification then the method creates more
673 674 675 676
        balanced cross-validation subsets that is proportions between classes in subsets are close
        to such proportion in the whole train dataset.

    The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,
677
    nu, coef0, degree. Parameters are considered optimal when the cross-validation
678 679 680 681 682
    estimate of the test set error is minimal.

    If there is no need to optimize a parameter, the corresponding grid step should be set to any
    value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step
    = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value
683
    `Gamma` is taken for gamma.
684 685 686 687 688 689 690 691

    And, finally, if the optimization in a parameter is required but the corresponding grid is
    unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for
    gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`.

    This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the
    regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and
    the usual %SVM with parameters specified in params is executed.
692
     */
mshabunin's avatar
mshabunin committed
693
    virtual bool trainAuto( const Ptr<TrainData>& data, int kFold = 10,
694 695 696 697 698 699
                    ParamGrid Cgrid = getDefaultGrid(C),
                    ParamGrid gammaGrid  = getDefaultGrid(GAMMA),
                    ParamGrid pGrid      = getDefaultGrid(P),
                    ParamGrid nuGrid     = getDefaultGrid(NU),
                    ParamGrid coeffGrid  = getDefaultGrid(COEF),
                    ParamGrid degreeGrid = getDefaultGrid(DEGREE),
700 701
                    bool balanced=false) = 0;

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
    /** @brief Trains an %SVM with optimal parameters

    @param samples training samples
    @param layout See ml::SampleTypes.
    @param responses vector of responses associated with the training samples.
    @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One
        subset is used to test the model, the others form the train set. So, the %SVM algorithm is
    @param Cgrid grid for C
    @param gammaGrid grid for gamma
    @param pGrid grid for p
    @param nuGrid grid for nu
    @param coeffGrid grid for coeff
    @param degreeGrid grid for degree
    @param balanced If true and the problem is 2-class classification then the method creates more
        balanced cross-validation subsets that is proportions between classes in subsets are close
        to such proportion in the whole train dataset.

    The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,
    nu, coef0, degree. Parameters are considered optimal when the cross-validation
    estimate of the test set error is minimal.

    This function only makes use of SVM::getDefaultGrid for parameter optimization and thus only
    offers rudimentary parameter options.

    This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the
    regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and
    the usual %SVM with parameters specified in params is executed.
    */
    CV_WRAP bool trainAuto(InputArray samples,
            int layout,
            InputArray responses,
            int kFold = 10,
            Ptr<ParamGrid> Cgrid = SVM::getDefaultGridPtr(SVM::C),
            Ptr<ParamGrid> gammaGrid  = SVM::getDefaultGridPtr(SVM::GAMMA),
            Ptr<ParamGrid> pGrid      = SVM::getDefaultGridPtr(SVM::P),
            Ptr<ParamGrid> nuGrid     = SVM::getDefaultGridPtr(SVM::NU),
            Ptr<ParamGrid> coeffGrid  = SVM::getDefaultGridPtr(SVM::COEF),
            Ptr<ParamGrid> degreeGrid = SVM::getDefaultGridPtr(SVM::DEGREE),
            bool balanced=false);

742 743
    /** @brief Retrieves all the support vectors

744
    The method returns all the support vectors as a floating-point matrix, where support vectors are
745
    stored as matrix rows.
746
     */
747 748
    CV_WRAP virtual Mat getSupportVectors() const = 0;

749 750 751 752 753 754 755 756
    /** @brief Retrieves all the uncompressed support vectors of a linear %SVM

    The method returns all the uncompressed support vectors of a linear %SVM that the compressed
    support vector, used for prediction, was derived from. They are returned in a floating-point
    matrix, where the support vectors are stored as matrix rows.
     */
    CV_WRAP Mat getUncompressedSupportVectors() const;

757 758 759
    /** @brief Retrieves the decision function

    @param i the index of the decision function. If the problem solved is regression, 1-class or
760 761 762
        2-class classification, then there will be just one decision function and the index should
        always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$
        decision functions.
763
    @param alpha the optional output vector for weights, corresponding to different support vectors.
764 765 766 767
        In the case of linear %SVM all the alpha's will be 1's.
    @param svidx the optional output vector of indices of support vectors within the matrix of
        support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear
        %SVM each decision function consists of a single "compressed" support vector.
768

769 770
    The method returns rho parameter of the decision function, a scalar subtracted from the weighted
    sum of kernel responses.
771
     */
772
    CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0;
773

774
    /** @brief Generates a grid for %SVM parameters.
775

776
    @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is
777
    generated for the parameter with this ID.
778

779
    The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be
780 781
    passed to the function SVM::trainAuto.
     */
782
    static ParamGrid getDefaultGrid( int param_id );
783

784 785 786 787 788 789 790 791 792 793
    /** @brief Generates a grid for %SVM parameters.

    @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is
    generated for the parameter with this ID.

    The function generates a grid pointer for the specified parameter of the %SVM algorithm.
    The grid may be passed to the function SVM::trainAuto.
     */
    CV_WRAP static Ptr<ParamGrid> getDefaultGridPtr( int param_id );

794 795 796
    /** Creates empty model.
    Use StatModel::train to train the model. Since %SVM has several parameters, you may want to
    find the best parameters for your problem, it can be done with SVM::trainAuto. */
797
    CV_WRAP static Ptr<SVM> create();
798

799 800 801 802 803
    /** @brief Loads and creates a serialized svm from a file
     *
     * Use SVM::save to serialize and store an SVM to disk.
     * Load the SVM from this file again, by calling this function with the path to the file.
     *
804
     * @param filepath path to serialized svm
805 806
     */
    CV_WRAP static Ptr<SVM> load(const String& filepath);
807 808 809 810 811
};

/****************************************************************************************\
*                              Expectation - Maximization                                *
\****************************************************************************************/
812

813
/** @brief The class implements the Expectation Maximization algorithm.
814

815
@sa @ref ml_intro_em
816
 */
817
class CV_EXPORTS_W EM : public StatModel
818 819
{
public:
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
    //! Type of covariation matrices
    enum Types {
        /** A scaled identity matrix \f$\mu_k * I\f$. There is the only
        parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases,
        when the constraint is relevant, or as a first step in the optimization (for example in case
        when the data is preprocessed with PCA). The results of such preliminary estimation may be
        passed again to the optimization procedure, this time with
        covMatType=EM::COV_MAT_DIAGONAL. */
        COV_MAT_SPHERICAL=0,
        /** A diagonal matrix with positive diagonal elements. The number of
        free parameters is d for each matrix. This is most commonly used option yielding good
        estimation results. */
        COV_MAT_DIAGONAL=1,
        /** A symmetric positively defined matrix. The number of free
        parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless
        there is pretty accurate initial estimation of the parameters and/or a huge number of
        training samples. */
        COV_MAT_GENERIC=2,
        COV_MAT_DEFAULT=COV_MAT_DIAGONAL
    };
840

841
    //! Default parameters
Vadim Pisarevsky's avatar
Vadim Pisarevsky committed
842
    enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
Andrey Kamaev's avatar
Andrey Kamaev committed
843

844
    //! The initial step
845
    enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
846

847 848 849 850
    /** The number of mixture components in the Gaussian mixture model.
    Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could
    determine the optimal number of mixtures within a specified value range, but that is not the
    case in ML yet. */
851
    /** @see setClustersNumber */
852
    CV_WRAP virtual int getClustersNumber() const = 0;
853
    /** @copybrief getClustersNumber @see getClustersNumber */
854
    CV_WRAP virtual void setClustersNumber(int val) = 0;
855 856 857

    /** Constraint on covariance matrices which defines type of matrices.
    See EM::Types. */
858
    /** @see setCovarianceMatrixType */
859
    CV_WRAP virtual int getCovarianceMatrixType() const = 0;
860
    /** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */
861
    CV_WRAP virtual void setCovarianceMatrixType(int val) = 0;
862 863 864 865 866

    /** The termination criteria of the %EM algorithm.
    The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of
    M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default
    maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */
867
    /** @see setTermCriteria */
868
    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
869
    /** @copybrief getTermCriteria @see getTermCriteria */
870
    CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;
871

872 873 874 875
    /** @brief Returns weights of the mixtures

    Returns vector with the number of elements equal to the number of mixtures.
     */
876
    CV_WRAP virtual Mat getWeights() const = 0;
877 878
    /** @brief Returns the cluster centers (means of the Gaussian mixture)

879 880
    Returns matrix with the number of rows equal to the number of mixtures and number of columns
    equal to the space dimensionality.
881
     */
882
    CV_WRAP virtual Mat getMeans() const = 0;
883 884
    /** @brief Returns covariation matrices

885 886
    Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
    each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
887
     */
berak's avatar
berak committed
888
    CV_WRAP virtual void getCovs(CV_OUT std::vector<Mat>& covs) const = 0;
889

890 891 892 893 894 895 896 897 898
    /** @brief Returns posterior probabilities for the provided samples

    @param samples The input samples, floating-point matrix
    @param results The optional output \f$ nSamples \times nClusters\f$ matrix of results. It contains
    posterior probabilities for each sample from the input
    @param flags This parameter will be ignored
     */
    CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;

899 900
    /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component
    for the given sample.
901

902 903 904 905
    @param sample A sample for classification. It should be a one-channel matrix of
        \f$1 \times dims\f$ or \f$dims \times 1\f$ size.
    @param probs Optional output matrix that contains posterior probabilities of each component
        given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type.
906

907 908 909
    The method returns a two-element double vector. Zero element is a likelihood logarithm value for
    the sample. First element is an index of the most probable mixture component for the given
    sample.
910
     */
berak's avatar
berak committed
911
    CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0;
912

913
    /** @brief Estimate the Gaussian mixture parameters from a samples set.
914 915 916 917 918 919 920 921 922 923 924 925 926 927

    This variation starts with Expectation step. Initial values of the model parameters will be
    estimated by the k-means algorithm.

    Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
    responses (class labels or function values) as input. Instead, it computes the *Maximum
    Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
    parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in
    covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each
    sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most
    probable mixture component for each sample).

    The trained model can be used further for prediction, just like any other classifier. The
    trained model is similar to the NormalBayesClassifier.
928 929

    @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
930 931
        one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
        it will be converted to the inner matrix of such type for the further computing.
932
    @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
933
        each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
934
    @param labels The optional output "class label" for each sample:
935 936
        \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable
        mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
937
    @param probs The optional output matrix that contains posterior probabilities of each Gaussian
938 939
        mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
        CV_64FC1 type.
940
     */
941
    CV_WRAP virtual bool trainEM(InputArray samples,
942 943 944
                         OutputArray logLikelihoods=noArray(),
                         OutputArray labels=noArray(),
                         OutputArray probs=noArray()) = 0;
945

946
    /** @brief Estimate the Gaussian mixture parameters from a samples set.
947 948 949

    This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of
    mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices
950 951 952
    \f$S_k\f$ of mixture components.

    @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
953 954
        one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
        it will be converted to the inner matrix of such type for the further computing.
955
    @param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of
956 957
        \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be
        converted to the inner matrix of such type for the further computing.
958
    @param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of
959 960 961
        covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices
        do not have CV_64F type they will be converted to the inner matrices of such type for the
        further computing.
962
    @param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel
963
        floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size.
964
    @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
965
        each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
966
    @param labels The optional output "class label" for each sample:
967 968
        \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable
        mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
969
    @param probs The optional output matrix that contains posterior probabilities of each Gaussian
970 971
        mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
        CV_64FC1 type.
972
    */
973
    CV_WRAP virtual bool trainE(InputArray samples, InputArray means0,
974 975 976 977 978
                        InputArray covs0=noArray(),
                        InputArray weights0=noArray(),
                        OutputArray logLikelihoods=noArray(),
                        OutputArray labels=noArray(),
                        OutputArray probs=noArray()) = 0;
979

980
    /** @brief Estimate the Gaussian mixture parameters from a samples set.
981 982 983

    This variation starts with Maximization step. You need to provide initial probabilities
    \f$p_{i,k}\f$ to use this option.
984 985

    @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
986 987
        one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
        it will be converted to the inner matrix of such type for the further computing.
988 989
    @param probs0
    @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
990
        each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
991
    @param labels The optional output "class label" for each sample:
992 993
        \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable
        mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
994
    @param probs The optional output matrix that contains posterior probabilities of each Gaussian
995 996
        mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
        CV_64FC1 type.
997
    */
998
    CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0,
999 1000 1001
                        OutputArray logLikelihoods=noArray(),
                        OutputArray labels=noArray(),
                        OutputArray probs=noArray()) = 0;
1002

1003
    /** Creates empty %EM model.
1004
    The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
1005
    can use one of the EM::train\* methods or load it from file using Algorithm::load\<EM\>(filename).
1006
     */
1007
    CV_WRAP static Ptr<EM> create();
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018

    /** @brief Loads and creates a serialized EM from a file
     *
     * Use EM::save to serialize and store an EM to disk.
     * Load the EM from this file again, by calling this function with the path to the file.
     * Optionally specify the node for the file containing the classifier
     *
     * @param filepath path to serialized EM
     * @param nodeName name of node containing the classifier
     */
    CV_WRAP static Ptr<EM> load(const String& filepath , const String& nodeName = String());
1019 1020 1021
};

/****************************************************************************************\
1022
*                                      Decision Tree                                     *
1023 1024
\****************************************************************************************/

1025
/** @brief The class represents a single decision tree or a collection of decision trees.
1026

1027 1028 1029 1030 1031 1032 1033
The current public interface of the class allows user to train only a single decision tree, however
the class is capable of storing multiple decision trees and using them for prediction (by summing
responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost)
use this capability to implement decision tree ensembles.

@sa @ref ml_intro_trees
*/
1034
class CV_EXPORTS_W DTrees : public StatModel
1035 1036
{
public:
1037 1038
    /** Predict options */
    enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) };
1039

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
    /** Cluster possible values of a categorical variable into K\<=maxCategories clusters to
    find a suboptimal split.
    If a discrete variable, on which the training procedure tries to make a split, takes more than
    maxCategories values, the precise best subset estimation may take a very long time because the
    algorithm is exponential. Instead, many decision trees engines (including our implementation)
    try to find sub-optimal split in this case by clustering all the samples into maxCategories
    clusters that is some categories are merged together. The clustering is applied only in n \>
    2-class classification problems for categorical variables with N \> max_categories possible
    values. In case of regression and 2-class classification the optimal split can be found
    efficiently without employing clustering, thus the parameter is not used in these cases.
    Default value is 10.*/
1051
    /** @see setMaxCategories */
1052
    CV_WRAP virtual int getMaxCategories() const = 0;
1053
    /** @copybrief getMaxCategories @see getMaxCategories */
1054
    CV_WRAP virtual void setMaxCategories(int val) = 0;
1055 1056 1057 1058 1059 1060

    /** The maximum possible depth of the tree.
    That is the training algorithms attempts to split a node while its depth is less than maxDepth.
    The root node has zero depth. The actual depth may be smaller if the other termination criteria
    are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the
    tree is pruned. Default value is INT_MAX.*/
1061
    /** @see setMaxDepth */
1062
    CV_WRAP virtual int getMaxDepth() const = 0;
1063
    /** @copybrief getMaxDepth @see getMaxDepth */
1064
    CV_WRAP virtual void setMaxDepth(int val) = 0;
1065 1066 1067 1068

    /** If the number of samples in a node is less than this parameter then the node will not be split.

    Default value is 10.*/
1069
    /** @see setMinSampleCount */
1070
    CV_WRAP virtual int getMinSampleCount() const = 0;
1071
    /** @copybrief getMinSampleCount @see getMinSampleCount */
1072
    CV_WRAP virtual void setMinSampleCount(int val) = 0;
1073 1074 1075 1076

    /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold
    cross-validation procedure where K is equal to CVFolds.
    Default value is 10.*/
1077
    /** @see setCVFolds */
1078
    CV_WRAP virtual int getCVFolds() const = 0;
1079
    /** @copybrief getCVFolds @see getCVFolds */
1080
    CV_WRAP virtual void setCVFolds(int val) = 0;
1081 1082 1083 1084 1085

    /** If true then surrogate splits will be built.
    These splits allow to work with missing data and compute variable importance correctly.
    Default value is false.
    @note currently it's not implemented.*/
1086
    /** @see setUseSurrogates */
1087
    CV_WRAP virtual bool getUseSurrogates() const = 0;
1088
    /** @copybrief getUseSurrogates @see getUseSurrogates */
1089
    CV_WRAP virtual void setUseSurrogates(bool val) = 0;
1090 1091 1092 1093

    /** If true then a pruning will be harsher.
    This will make a tree more compact and more resistant to the training data noise but a bit less
    accurate. Default value is true.*/
1094
    /** @see setUse1SERule */
1095
    CV_WRAP virtual bool getUse1SERule() const = 0;
1096
    /** @copybrief getUse1SERule @see getUse1SERule */
1097
    CV_WRAP virtual void setUse1SERule(bool val) = 0;
1098 1099 1100 1101

    /** If true then pruned branches are physically removed from the tree.
    Otherwise they are retained and it is possible to get results from the original unpruned (or
    pruned less aggressively) tree. Default value is true.*/
1102
    /** @see setTruncatePrunedTree */
1103
    CV_WRAP virtual bool getTruncatePrunedTree() const = 0;
1104
    /** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */
1105
    CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0;
1106 1107 1108 1109 1110

    /** Termination criteria for regression trees.
    If all absolute differences between an estimated value in a node and values of train samples
    in this node are less than this parameter then the node will not be split further. Default
    value is 0.01f*/
1111
    /** @see setRegressionAccuracy */
1112
    CV_WRAP virtual float getRegressionAccuracy() const = 0;
1113
    /** @copybrief getRegressionAccuracy @see getRegressionAccuracy */
1114
    CV_WRAP virtual void setRegressionAccuracy(float val) = 0;
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130

    /** @brief The array of a priori class probabilities, sorted by the class label value.

    The parameter can be used to tune the decision tree preferences toward a certain class. For
    example, if you want to detect some rare anomaly occurrence, the training base will likely
    contain much more normal cases than anomalies, so a very good classification performance
    will be achieved just by considering every case as normal. To avoid this, the priors can be
    specified, where the anomaly probability is artificially increased (up to 0.5 or even
    greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is
    adjusted properly.

    You can also think about this parameter as weights of prediction categories which determine
    relative weights that you give to misclassification. That is, if the weight of the first
    category is 1 and the weight of the second category is 10, then each mistake in predicting
    the second category is equivalent to making 10 mistakes in predicting the first category.
    Default value is empty Mat.*/
1131
    /** @see setPriors */
1132
    CV_WRAP virtual cv::Mat getPriors() const = 0;
1133
    /** @copybrief getPriors @see getPriors */
1134
    CV_WRAP virtual void setPriors(const cv::Mat &val) = 0;
1135

1136
    /** @brief The class represents a decision tree node.
1137
     */
1138 1139 1140 1141
    class CV_EXPORTS Node
    {
    public:
        Node();
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
        double value; //!< Value at the node: a class label in case of classification or estimated
                      //!< function value in case of regression.
        int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the
                      //!< node. It is used internally in classification trees and tree ensembles.
        int parent; //!< Index of the parent node
        int left; //!< Index of the left child node
        int right; //!< Index of right child node
        int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the
                        //!< case of missing values.
        int split; //!< Index of the first split
1152
    };
1153

1154
    /** @brief The class represents split in a decision tree.
1155
     */
1156 1157 1158 1159
    class CV_EXPORTS Split
    {
    public:
        Split();
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
        int varIdx; //!< Index of variable on which the split is created.
        bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right
                       //!< branches are exchanged in the rule expressions below).
        float quality; //!< The split quality, a positive number. It is used to choose the best split.
        int next; //!< Index of the next split in the list of splits for the node
        float c; /**< The threshold value in case of split on an ordered variable.
                      The rule is:
                      @code{.none}
                      if var_value < c
                        then next_node <- left
                        else next_node <- right
                      @endcode */
        int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable.
                            The rule is:
                            @code{.none}
                            if bitset[var_value] == 1
                                then next_node <- left
                                else next_node <- right
                            @endcode */
1179
    };
1180

1181 1182
    /** @brief Returns indices of root nodes
    */
1183
    virtual const std::vector<int>& getRoots() const = 0;
1184 1185
    /** @brief Returns all the nodes

1186
    all the node indices are indices in the returned vector
1187
     */
1188
    virtual const std::vector<Node>& getNodes() const = 0;
1189 1190
    /** @brief Returns all the splits

1191
    all the split indices are indices in the returned vector
1192
     */
1193
    virtual const std::vector<Split>& getSplits() const = 0;
1194 1195 1196 1197
    /** @brief Returns all the bitsets for categorical splits

    Split::subsetOfs is an offset in the returned vector
     */
1198
    virtual const std::vector<int>& getSubsets() const = 0;
1199

1200 1201 1202
    /** @brief Creates the empty model

    The static method creates empty decision tree with the specified parameters. It should be then
1203
    trained using train method (see StatModel::train). Alternatively, you can load the model from
1204
    file using Algorithm::load\<DTrees\>(filename).
1205
     */
1206
    CV_WRAP static Ptr<DTrees> create();
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217

    /** @brief Loads and creates a serialized DTrees from a file
     *
     * Use DTree::save to serialize and store an DTree to disk.
     * Load the DTree from this file again, by calling this function with the path to the file.
     * Optionally specify the node for the file containing the classifier
     *
     * @param filepath path to serialized DTree
     * @param nodeName name of node containing the classifier
     */
    CV_WRAP static Ptr<DTrees> load(const String& filepath , const String& nodeName = String());
1218 1219 1220
};

/****************************************************************************************\
1221
*                                   Random Trees Classifier                              *
1222 1223
\****************************************************************************************/

1224
/** @brief The class implements the random forest predictor.
1225

1226
@sa @ref ml_intro_rtrees
1227
 */
1228
class CV_EXPORTS_W RTrees : public DTrees
1229 1230
{
public:
1231

1232 1233
    /** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.
    Default value is false.*/
1234
    /** @see setCalculateVarImportance */
1235
    CV_WRAP virtual bool getCalculateVarImportance() const = 0;
1236
    /** @copybrief getCalculateVarImportance @see getCalculateVarImportance */
1237
    CV_WRAP virtual void setCalculateVarImportance(bool val) = 0;
1238 1239 1240 1241 1242

    /** The size of the randomly selected subset of features at each tree node and that are used
    to find the best split(s).
    If you set it to 0 then the size will be set to the square root of the total number of
    features. Default value is 0.*/
1243
    /** @see setActiveVarCount */
1244
    CV_WRAP virtual int getActiveVarCount() const = 0;
1245
    /** @copybrief getActiveVarCount @see getActiveVarCount */
1246
    CV_WRAP virtual void setActiveVarCount(int val) = 0;
1247 1248 1249 1250 1251 1252 1253 1254

    /** The termination criteria that specifies when the training algorithm stops.
    Either when the specified number of trees is trained and added to the ensemble or when
    sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the
    better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes
    pass a certain number of trees. Also to keep in mind, the number of tree increases the
    prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +
    TermCriteria::EPS, 50, 0.1)*/
1255
    /** @see setTermCriteria */
1256
    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
1257
    /** @copybrief getTermCriteria @see getTermCriteria */
1258
    CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;
1259 1260

    /** Returns the variable importance array.
1261
    The method returns the variable importance vector, computed at the training stage when
1262
    CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is
1263 1264
    returned.
     */
1265
    CV_WRAP virtual Mat getVarImportance() const = 0;
1266

mrquorr's avatar
mrquorr committed
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
    /** Returns the result of each individual tree in the forest.
    In case the model is a regression problem, the method will return each of the trees'
    results for each of the sample cases. If the model is a classifier, it will return
    a Mat with samples + 1 rows, where the first row gives the class number and the
    following rows return the votes each class had for each sample.
        @param samples Array containg the samples for which votes will be calculated.
        @param results Array where the result of the calculation will be written.
        @param flags Flags for defining the type of RTrees.
    */
    CV_WRAP void getVotes(InputArray samples, OutputArray results, int flags) const;

1278
    /** Creates the empty model.
1279
    Use StatModel::train to train the model, StatModel::train to create and train the model,
1280
    Algorithm::load to load the pre-trained model.
1281
     */
1282
    CV_WRAP static Ptr<RTrees> create();
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293

    /** @brief Loads and creates a serialized RTree from a file
     *
     * Use RTree::save to serialize and store an RTree to disk.
     * Load the RTree from this file again, by calling this function with the path to the file.
     * Optionally specify the node for the file containing the classifier
     *
     * @param filepath path to serialized RTree
     * @param nodeName name of node containing the classifier
     */
    CV_WRAP static Ptr<RTrees> load(const String& filepath , const String& nodeName = String());
1294 1295
};

1296 1297 1298
/****************************************************************************************\
*                                   Boosted tree classifier                              *
\****************************************************************************************/
1299

1300
/** @brief Boosted tree classifier derived from DTrees
1301 1302

@sa @ref ml_intro_boost
1303
 */
1304
class CV_EXPORTS_W Boost : public DTrees
1305 1306
{
public:
1307 1308
    /** Type of the boosting algorithm.
    See Boost::Types. Default value is Boost::REAL. */
1309
    /** @see setBoostType */
1310
    CV_WRAP virtual int getBoostType() const = 0;
1311
    /** @copybrief getBoostType @see getBoostType */
1312
    CV_WRAP virtual void setBoostType(int val) = 0;
1313

1314 1315
    /** The number of weak classifiers.
    Default value is 100. */
1316
    /** @see setWeakCount */
1317
    CV_WRAP virtual int getWeakCount() const = 0;
1318
    /** @copybrief getWeakCount @see getWeakCount */
1319
    CV_WRAP virtual void setWeakCount(int val) = 0;
1320

1321 1322 1323
    /** A threshold between 0 and 1 used to save computational time.
    Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next*
    iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/
1324
    /** @see setWeightTrimRate */
1325
    CV_WRAP virtual double getWeightTrimRate() const = 0;
1326
    /** @copybrief getWeightTrimRate @see getWeightTrimRate */
1327
    CV_WRAP virtual void setWeightTrimRate(double val) = 0;
1328

1329 1330
    /** Boosting type.
    Gentle AdaBoost and Real AdaBoost are often the preferable choices. */
1331 1332 1333 1334 1335 1336 1337 1338
    enum Types {
        DISCRETE=0, //!< Discrete AdaBoost.
        REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions
                //!< and works well with categorical data.
        LOGIT=2, //!< LogitBoost. It can produce good regression fits.
        GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that
                 //!<reason is often good with regression data.
    };
1339

1340
    /** Creates the empty model.
1341
    Use StatModel::train to train the model, Algorithm::load\<Boost\>(filename) to load the pre-trained model. */
1342
    CV_WRAP static Ptr<Boost> create();
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353

    /** @brief Loads and creates a serialized Boost from a file
     *
     * Use Boost::save to serialize and store an RTree to disk.
     * Load the Boost from this file again, by calling this function with the path to the file.
     * Optionally specify the node for the file containing the classifier
     *
     * @param filepath path to serialized Boost
     * @param nodeName name of node containing the classifier
     */
    CV_WRAP static Ptr<Boost> load(const String& filepath , const String& nodeName = String());
1354
};
1355

1356 1357 1358 1359
/****************************************************************************************\
*                                   Gradient Boosted Trees                               *
\****************************************************************************************/

1360
/*class CV_EXPORTS_W GBTrees : public DTrees
1361 1362
{
public:
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
    struct CV_EXPORTS_W_MAP Params : public DTrees::Params
    {
        CV_PROP_RW int weakCount;
        CV_PROP_RW int lossFunctionType;
        CV_PROP_RW float subsamplePortion;
        CV_PROP_RW float shrinkage;

        Params();
        Params( int lossFunctionType, int weakCount, float shrinkage,
                float subsamplePortion, int maxDepth, bool useSurrogates );
    };
1374 1375

    enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS};
Andrey Kamaev's avatar
Andrey Kamaev committed
1376

1377
    virtual void setK(int k) = 0;
Andrey Kamaev's avatar
Andrey Kamaev committed
1378

1379
    virtual float predictSerial( InputArray samples,
1380
                                 OutputArray weakResponses, int flags) const = 0;
1381

1382
    static Ptr<GBTrees> create(const Params& p);
1383
};*/
1384

1385 1386 1387 1388 1389 1390
/****************************************************************************************\
*                              Artificial Neural Networks (ANN)                          *
\****************************************************************************************/

/////////////////////////////////// Multi-Layer Perceptrons //////////////////////////////

1391
/** @brief Artificial Neural Networks - Multi-Layer Perceptrons.
1392 1393 1394

Unlike many other models in ML that are constructed and trained at once, in the MLP model these
steps are separated. First, a network with the specified topology is created using the non-default
1395
constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is
1396 1397
trained using a set of input and output vectors. The training procedure can be repeated more than
once, that is, the weights can be adjusted based on the new training data.
1398 1399 1400 1401

Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.

@sa @ref ml_intro_ann
1402
 */
1403
class CV_EXPORTS_W ANN_MLP : public StatModel
1404 1405
{
public:
1406 1407 1408 1409
    /** Available training methods */
    enum TrainingMethods {
        BACKPROP=0, //!< The back-propagation algorithm.
        RPROP=1 //!< The RPROP algorithm. See @cite RPROP93 for details.
1410
    };
Andrey Kamaev's avatar
Andrey Kamaev committed
1411

1412 1413 1414 1415 1416
    /** Sets training method and common parameters.
    @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.
    @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP
    @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP.
    */
1417
    CV_WRAP virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0;
1418 1419

    /** Returns current training method */
1420
    CV_WRAP virtual int getTrainMethod() const = 0;
1421 1422 1423 1424 1425 1426 1427

    /** Initialize the activation function for each neuron.
    Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.
    @param type The type of activation function. See ANN_MLP::ActivationFunctions.
    @param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0.
    @param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0.
    */
1428
    CV_WRAP virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0;
1429 1430 1431 1432 1433

    /**  Integer vector specifying the number of neurons in each layer including the input and output layers.
    The very first element specifies the number of elements in the input layer.
    The last element - number of elements in the output layer. Default value is empty Mat.
    @sa getLayerSizes */
1434
    CV_WRAP virtual void setLayerSizes(InputArray _layer_sizes) = 0;
1435 1436 1437 1438 1439

    /**  Integer vector specifying the number of neurons in each layer including the input and output layers.
    The very first element specifies the number of elements in the input layer.
    The last element - number of elements in the output layer.
    @sa setLayerSizes */
1440
    CV_WRAP virtual cv::Mat getLayerSizes() const = 0;
1441 1442 1443 1444 1445

    /** Termination criteria of the training algorithm.
    You can specify the maximum number of iterations (maxCount) and/or how much the error could
    change between the iterations to make the algorithm continue (epsilon). Default value is
    TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/
1446
    /** @see setTermCriteria */
1447
    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
1448
    /** @copybrief getTermCriteria @see getTermCriteria */
1449
    CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;
1450 1451 1452

    /** BPROP: Strength of the weight gradient term.
    The recommended value is about 0.1. Default value is 0.1.*/
1453
    /** @see setBackpropWeightScale */
1454
    CV_WRAP virtual double getBackpropWeightScale() const = 0;
1455
    /** @copybrief getBackpropWeightScale @see getBackpropWeightScale */
1456
    CV_WRAP virtual void setBackpropWeightScale(double val) = 0;
1457 1458 1459 1460 1461

    /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).
    This parameter provides some inertia to smooth the random fluctuations of the weights. It can
    vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
    Default value is 0.1.*/
1462
    /** @see setBackpropMomentumScale */
1463
    CV_WRAP virtual double getBackpropMomentumScale() const = 0;
1464
    /** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */
1465
    CV_WRAP virtual void setBackpropMomentumScale(double val) = 0;
1466 1467 1468

    /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.
    Default value is 0.1.*/
1469
    /** @see setRpropDW0 */
1470
    CV_WRAP virtual double getRpropDW0() const = 0;
1471
    /** @copybrief getRpropDW0 @see getRpropDW0 */
1472
    CV_WRAP virtual void setRpropDW0(double val) = 0;
1473 1474 1475

    /** RPROP: Increase factor \f$\eta^+\f$.
    It must be \>1. Default value is 1.2.*/
1476
    /** @see setRpropDWPlus */
1477
    CV_WRAP virtual double getRpropDWPlus() const = 0;
1478
    /** @copybrief getRpropDWPlus @see getRpropDWPlus */
1479
    CV_WRAP virtual void setRpropDWPlus(double val) = 0;
1480 1481 1482

    /** RPROP: Decrease factor \f$\eta^-\f$.
    It must be \<1. Default value is 0.5.*/
1483
    /** @see setRpropDWMinus */
1484
    CV_WRAP virtual double getRpropDWMinus() const = 0;
1485
    /** @copybrief getRpropDWMinus @see getRpropDWMinus */
1486
    CV_WRAP virtual void setRpropDWMinus(double val) = 0;
1487 1488 1489

    /** RPROP: Update-values lower limit \f$\Delta_{min}\f$.
    It must be positive. Default value is FLT_EPSILON.*/
1490
    /** @see setRpropDWMin */
1491
    CV_WRAP virtual double getRpropDWMin() const = 0;
1492
    /** @copybrief getRpropDWMin @see getRpropDWMin */
1493
    CV_WRAP virtual void setRpropDWMin(double val) = 0;
1494 1495 1496

    /** RPROP: Update-values upper limit \f$\Delta_{max}\f$.
    It must be \>1. Default value is 50.*/
1497
    /** @see setRpropDWMax */
1498
    CV_WRAP virtual double getRpropDWMax() const = 0;
1499
    /** @copybrief getRpropDWMax @see getRpropDWMax */
1500
    CV_WRAP virtual void setRpropDWMax(double val) = 0;
1501

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
    /** possible activation functions */
    enum ActivationFunctions {
        /** Identity function: \f$f(x)=x\f$ */
        IDENTITY = 0,
        /** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}\f$
        @note
        If you are using the default sigmoid activation function with the default parameter values
        fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output
        will range from [-1.7159, 1.7159], instead of [0,1].*/
        SIGMOID_SYM = 1,
        /** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */
        GAUSSIAN = 2
    };
1515

1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
    /** Train options */
    enum TrainFlags {
        /** Update the network weights, rather than compute them from scratch. In the latter case
        the weights are initialized using the Nguyen-Widrow algorithm. */
        UPDATE_WEIGHTS = 1,
        /** Do not normalize the input vectors. If this flag is not set, the training algorithm
        normalizes each input feature independently, shifting its mean value to 0 and making the
        standard deviation equal to 1. If the network is assumed to be updated frequently, the new
        training data could be much different from original one. In this case, you should take care
        of proper normalization. */
        NO_INPUT_SCALE = 2,
        /** Do not normalize the output vectors. If the flag is not set, the training algorithm
        normalizes each output feature independently, by transforming it to the certain range
        depending on the used activation function. */
        NO_OUTPUT_SCALE = 4
    };
1532

1533
    CV_WRAP virtual Mat getWeights(int layerIdx) const = 0;
1534 1535 1536

    /** @brief Creates empty model

1537
    Use StatModel::train to train the model, Algorithm::load\<ANN_MLP\>(filename) to load the pre-trained model.
1538
    Note that the train method has optional flags: ANN_MLP::TrainFlags.
1539
     */
1540
    CV_WRAP static Ptr<ANN_MLP> create();
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550

    /** @brief Loads and creates a serialized ANN from a file
     *
     * Use ANN::save to serialize and store an ANN to disk.
     * Load the ANN from this file again, by calling this function with the path to the file.
     *
     * @param filepath path to serialized ANN
     */
    CV_WRAP static Ptr<ANN_MLP> load(const String& filepath);

1551 1552
};

1553 1554 1555 1556
/****************************************************************************************\
*                           Logistic Regression                                          *
\****************************************************************************************/

1557
/** @brief Implements Logistic Regression classifier.
1558 1559

@sa @ref ml_intro_lr
1560
 */
1561
class CV_EXPORTS_W LogisticRegression : public StatModel
1562 1563
{
public:
1564 1565

    /** Learning rate. */
1566
    /** @see setLearningRate */
1567
    CV_WRAP virtual double getLearningRate() const = 0;
1568
    /** @copybrief getLearningRate @see getLearningRate */
1569
    CV_WRAP virtual void setLearningRate(double val) = 0;
1570 1571

    /** Number of iterations. */
1572
    /** @see setIterations */
1573
    CV_WRAP virtual int getIterations() const = 0;
1574
    /** @copybrief getIterations @see getIterations */
1575
    CV_WRAP virtual void setIterations(int val) = 0;
1576 1577

    /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */
1578
    /** @see setRegularization */
1579
    CV_WRAP virtual int getRegularization() const = 0;
1580
    /** @copybrief getRegularization @see getRegularization */
1581
    CV_WRAP virtual void setRegularization(int val) = 0;
1582 1583

    /** Kind of training method used. See LogisticRegression::Methods. */
1584
    /** @see setTrainMethod */
1585
    CV_WRAP virtual int getTrainMethod() const = 0;
1586
    /** @copybrief getTrainMethod @see getTrainMethod */
1587
    CV_WRAP virtual void setTrainMethod(int val) = 0;
1588 1589 1590 1591

    /** Specifies the number of training samples taken in each step of Mini-Batch Gradient
    Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It
    has to take values less than the total number of training samples. */
1592
    /** @see setMiniBatchSize */
1593
    CV_WRAP virtual int getMiniBatchSize() const = 0;
1594
    /** @copybrief getMiniBatchSize @see getMiniBatchSize */
1595
    CV_WRAP virtual void setMiniBatchSize(int val) = 0;
1596 1597

    /** Termination criteria of the algorithm. */
1598
    /** @see setTermCriteria */
1599
    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
1600
    /** @copybrief getTermCriteria @see getTermCriteria */
1601
    CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;
1602

1603 1604
    //! Regularization kinds
    enum RegKinds {
1605
        REG_DISABLE = -1, //!< Regularization disabled
1606
        REG_L1 = 0, //!< %L1 norm
1607
        REG_L2 = 1 //!< %L2 norm
1608
    };
1609

1610 1611 1612
    //! Training methods
    enum Methods {
        BATCH = 0,
1613
        MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method.
1614
    };
1615 1616 1617 1618

    /** @brief Predicts responses for input samples and returns a float type.

    @param samples The input data for the prediction algorithm. Matrix [m x n], where each row
1619
        contains variables (features) of one object being classified. Should have data type CV_32F.
1620
    @param results Predicted labels as a column matrix of type CV_32S.
1621 1622
    @param flags Not used.
     */
1623
    CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
1624

1625 1626
    /** @brief This function returns the trained paramters arranged across rows.

1627 1628
    For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of
    the Logistic Regression as a matrix of type CV_32F.
1629
     */
1630
    CV_WRAP virtual Mat get_learnt_thetas() const = 0;
1631

1632 1633 1634 1635
    /** @brief Creates empty model.

    Creates Logistic Regression model with parameters given.
     */
1636
    CV_WRAP static Ptr<LogisticRegression> create();
1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647

    /** @brief Loads and creates a serialized LogisticRegression from a file
     *
     * Use LogisticRegression::save to serialize and store an LogisticRegression to disk.
     * Load the LogisticRegression from this file again, by calling this function with the path to the file.
     * Optionally specify the node for the file containing the classifier
     *
     * @param filepath path to serialized LogisticRegression
     * @param nodeName name of node containing the classifier
     */
    CV_WRAP static Ptr<LogisticRegression> load(const String& filepath , const String& nodeName = String());
1648 1649
};

1650 1651 1652 1653 1654 1655 1656 1657

/****************************************************************************************\
*                        Stochastic Gradient Descent SVM Classifier                      *
\****************************************************************************************/

/*!
@brief Stochastic Gradient Descent SVM classifier

1658 1659
SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach,
as presented in @cite bottou2010large.
1660

1661
The classifier has following parameters:
1662 1663
- model type,
- margin type,
Marina Noskova's avatar
Marina Noskova committed
1664 1665 1666
- margin regularization (\f$\lambda\f$),
- initial step size (\f$\gamma_0\f$),
- step decreasing power (\f$c\f$),
1667
- and termination criteria.
1668

1669
The model type may have one of the following values: \ref SGD and \ref ASGD.
1670

1671 1672 1673 1674 1675 1676 1677
- \ref SGD is the classic version of SVMSGD classifier: every next step is calculated by the formula
  \f[w_{t+1} = w_t - \gamma(t) \frac{dQ_i}{dw} |_{w = w_t}\f]
  where
  - \f$w_t\f$ is the weights vector for decision function at step \f$t\f$,
  - \f$\gamma(t)\f$ is the step size of model parameters at the iteration \f$t\f$, it is decreased on each step by the formula
    \f$\gamma(t) = \gamma_0  (1 + \lambda  \gamma_0 t) ^ {-c}\f$
  - \f$Q_i\f$ is the target functional from SVM task for sample with number \f$i\f$, this sample is chosen stochastically on each step of the algorithm.
1678

1679 1680
- \ref ASGD is Average Stochastic Gradient Descent SVM Classifier. ASGD classifier averages weights vector on each step of algorithm by the formula
\f$\widehat{w}_{t+1} = \frac{t}{1+t}\widehat{w}_{t} + \frac{1}{1+t}w_{t+1}\f$
1681

1682 1683 1684 1685 1686 1687
The recommended model type is ASGD (following @cite bottou2010large).

The margin type may have one of the following values: \ref SOFT_MARGIN or \ref HARD_MARGIN.

- You should use \ref HARD_MARGIN type, if you have linearly separable sets.
- You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers.
1688
- In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN.
1689 1690

The other parameters may be described as follows:
Marina Noskova's avatar
Marina Noskova committed
1691
- Margin regularization parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers
1692 1693 1694
  (the less the parameter, the less probability that an outlier will be ignored).
  Recommended value for SGD model is 0.0001, for ASGD model is 0.00001.

Marina Noskova's avatar
Marina Noskova committed
1695
- Initial step size parameter is the initial value for the step size \f$\gamma(t)\f$.
1696
  You will have to find the best initial step for your problem.
1697

Marina Noskova's avatar
Marina Noskova committed
1698
- Step decreasing power is the power parameter for \f$\gamma(t)\f$ decreasing by the formula, mentioned above.
1699 1700 1701 1702 1703
  Recommended value for SGD model is 1, for ASGD model is 0.75.

- Termination criteria can be TermCriteria::COUNT, TermCriteria::EPS or TermCriteria::COUNT + TermCriteria::EPS.
  You will have to find the best termination criteria for your problem.

Marina Noskova's avatar
Marina Noskova committed
1704
Note that the parameters margin regularization, initial step size, and step decreasing power should be positive.
1705 1706 1707

To use SVMSGD algorithm do as follows:

Marina Noskova's avatar
Marina Noskova committed
1708 1709
- first, create the SVMSGD object. The algoorithm will set optimal parameters by default, but you can set your own parameters via functions setSvmsgdType(),
  setMarginType(), setMarginRegularization(), setInitialStepSize(), and setStepDecreasingPower().
1710 1711 1712 1713

- then the SVM model can be trained using the train features and the correspondent labels by the method train().

- after that, the label of a new feature vector can be predicted using the method predict().
1714 1715

@code
1716 1717 1718
// Create empty object
cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();

1719
// Train the Stochastic Gradient Descent SVM
1720
svmsgd->train(trainData);
1721

1722 1723
// Predict labels for the new samples
svmsgd->predict(samples, responses);
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
@endcode

*/

class CV_EXPORTS_W SVMSGD : public cv::ml::StatModel
{
public:

    /** SVMSGD type.
    ASGD is often the preferable choice. */
    enum SvmsgdType
    {
1736 1737 1738 1739 1740 1741 1742 1743 1744
        SGD, //!< Stochastic Gradient Descent
        ASGD //!< Average Stochastic Gradient Descent
    };

    /** Margin type.*/
    enum MarginType
    {
        SOFT_MARGIN, //!< General case, suits to the case of non-linearly separable sets, allows outliers.
        HARD_MARGIN  //!< More accurate for the case of linearly separable sets.
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
    };

    /**
     * @return the weights of the trained model (decision function f(x) = weights * x + shift).
    */
    CV_WRAP virtual Mat getWeights() = 0;

    /**
     * @return the shift of the trained model (decision function f(x) = weights * x + shift).
    */
    CV_WRAP virtual float getShift() = 0;

1757
    /** @brief Creates empty model.
1758 1759
     * Use StatModel::train to train the model. Since %SVMSGD has several parameters, you may want to
     * find the best parameters for your problem or use setOptimalParameters() to set some default parameters.
1760 1761 1762
    */
    CV_WRAP static Ptr<SVMSGD> create();

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
    /** @brief Loads and creates a serialized SVMSGD from a file
     *
     * Use SVMSGD::save to serialize and store an SVMSGD to disk.
     * Load the SVMSGD from this file again, by calling this function with the path to the file.
     * Optionally specify the node for the file containing the classifier
     *
     * @param filepath path to serialized SVMSGD
     * @param nodeName name of node containing the classifier
     */
    CV_WRAP static Ptr<SVMSGD> load(const String& filepath , const String& nodeName = String());

1774
    /** @brief Function sets optimal parameters values for chosen SVM SGD model.
1775 1776
     * @param svmsgdType is the type of SVMSGD classifier.
     * @param marginType is the type of margin constraint.
1777
    */
1778
    CV_WRAP virtual void setOptimalParameters(int svmsgdType = SVMSGD::ASGD, int marginType = SVMSGD::SOFT_MARGIN) = 0;
1779

1780
    /** @brief %Algorithm type, one of SVMSGD::SvmsgdType. */
Marina Noskova's avatar
Marina Noskova committed
1781
    /** @see setSvmsgdType */
1782
    CV_WRAP virtual int getSvmsgdType() const = 0;
Marina Noskova's avatar
Marina Noskova committed
1783
    /** @copybrief getSvmsgdType @see getSvmsgdType */
1784 1785 1786 1787 1788 1789 1790 1791
    CV_WRAP virtual void setSvmsgdType(int svmsgdType) = 0;

    /** @brief %Margin type, one of SVMSGD::MarginType. */
    /** @see setMarginType */
    CV_WRAP virtual int getMarginType() const = 0;
    /** @copybrief getMarginType @see getMarginType */
    CV_WRAP virtual void setMarginType(int marginType) = 0;

1792
    /** @brief Parameter marginRegularization of a %SVMSGD optimization problem. */
1793 1794 1795 1796
    /** @see setMarginRegularization */
    CV_WRAP virtual float getMarginRegularization() const = 0;
    /** @copybrief getMarginRegularization @see getMarginRegularization */
    CV_WRAP virtual void setMarginRegularization(float marginRegularization) = 0;
1797

1798
    /** @brief Parameter initialStepSize of a %SVMSGD optimization problem. */
1799 1800 1801 1802 1803
    /** @see setInitialStepSize */
    CV_WRAP virtual float getInitialStepSize() const = 0;
    /** @copybrief getInitialStepSize @see getInitialStepSize */
    CV_WRAP virtual void setInitialStepSize(float InitialStepSize) = 0;

1804
    /** @brief Parameter stepDecreasingPower of a %SVMSGD optimization problem. */
1805 1806 1807 1808
    /** @see setStepDecreasingPower */
    CV_WRAP virtual float getStepDecreasingPower() const = 0;
    /** @copybrief getStepDecreasingPower @see getStepDecreasingPower */
    CV_WRAP virtual void setStepDecreasingPower(float stepDecreasingPower) = 0;
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819

    /** @brief Termination criteria of the training algorithm.
    You can specify the maximum number of iterations (maxCount) and/or how much the error could
    change between the iterations to make the algorithm continue (epsilon).*/
    /** @see setTermCriteria */
    CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
    /** @copybrief getTermCriteria @see getTermCriteria */
    CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;
};


1820 1821 1822 1823
/****************************************************************************************\
*                           Auxilary functions declarations                              *
\****************************************************************************************/

1824 1825 1826 1827 1828 1829 1830
/** @brief Generates _sample_ from multivariate normal distribution

@param mean an average row vector
@param cov symmetric covariation matrix
@param nsamples returned samples count
@param samples returned samples array
*/
1831
CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples);
1832

1833
/** @brief Creates test set */
1834
CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses,
1835
                                                OutputArray samples, OutputArray responses);
1836

1837 1838
//! @} ml

1839
}
1840 1841
}

1842
#endif // __cplusplus
sourin's avatar
sourin committed
1843
#endif // OPENCV_ML_HPP
1844

1845
/* End of file. */