Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
daa99514
Commit
daa99514
authored
Mar 23, 2015
by
Vadim Pisarevsky
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #3846 from mshabunin:fix-headers
parents
d2da7dc3
9f083103
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
461 additions
and
206 deletions
+461
-206
Doxyfile.in
doc/Doxyfile.in
+1
-5
core.hpp
modules/core/include/opencv2/core.hpp
+0
-35
private.hpp
modules/core/include/opencv2/core/private.hpp
+21
-0
CMakeLists.txt
modules/ml/CMakeLists.txt
+1
-1
ml.hpp
modules/ml/include/opencv2/ml.hpp
+254
-119
superres.hpp
modules/superres/include/opencv2/superres.hpp
+40
-10
optical_flow.hpp
modules/superres/include/opencv2/superres/optical_flow.hpp
+96
-24
tracking.hpp
modules/video/include/opencv2/video/tracking.hpp
+48
-12
No files found.
doc/Doxyfile.in
View file @
daa99514
...
@@ -243,11 +243,7 @@ PREDEFINED = __cplusplus=1 \
...
@@ -243,11 +243,7 @@ PREDEFINED = __cplusplus=1 \
CV_NORETURN= \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_DEFAULT(x)=" = x" \
CV_NEON=1 \
CV_NEON=1 \
FLANN_DEPRECATED= \
FLANN_DEPRECATED=
"CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \
"CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;"
EXPAND_AS_DEFINED =
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
SKIP_FUNCTION_MACROS = YES
TAGFILES =
TAGFILES =
...
...
modules/core/include/opencv2/core.hpp
View file @
daa99514
...
@@ -2821,41 +2821,6 @@ public:
...
@@ -2821,41 +2821,6 @@ public:
virtual
void
read
(
const
FileNode
&
fn
)
{
(
void
)
fn
;
}
virtual
void
read
(
const
FileNode
&
fn
)
{
(
void
)
fn
;
}
};
};
// define properties
#define CV_PURE_PROPERTY(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(type val) = 0;
#define CV_PURE_PROPERTY_S(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(const type & val) = 0;
#define CV_PURE_PROPERTY_RO(type, name) \
CV_WRAP virtual type get##name() const = 0;
// basic property implementation
#define CV_IMPL_PROPERTY_RO(type, name, member) \
inline type get##name() const { return member; }
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
CV_IMPL_PROPERTY_RO(r_type, name, member) \
inline void set##name(w_type val) { member = val; }
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
r_type get##name() const { return internal_obj.get##internal_name(); } \
void set##name(w_type val) { internal_obj.set##internal_name(val); }
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
struct
Param
{
struct
Param
{
enum
{
INT
=
0
,
BOOLEAN
=
1
,
REAL
=
2
,
STRING
=
3
,
MAT
=
4
,
MAT_VECTOR
=
5
,
ALGORITHM
=
6
,
FLOAT
=
7
,
enum
{
INT
=
0
,
BOOLEAN
=
1
,
REAL
=
2
,
STRING
=
3
,
MAT
=
4
,
MAT_VECTOR
=
5
,
ALGORITHM
=
6
,
FLOAT
=
7
,
UNSIGNED_INT
=
8
,
UINT64
=
9
,
UCHAR
=
11
};
UNSIGNED_INT
=
8
,
UINT64
=
9
,
UCHAR
=
11
};
...
...
modules/core/include/opencv2/core/private.hpp
View file @
daa99514
...
@@ -172,6 +172,27 @@ namespace cv
...
@@ -172,6 +172,27 @@ namespace cv
CV_EXPORTS
void
scalarToRawData
(
const
cv
::
Scalar
&
s
,
void
*
buf
,
int
type
,
int
unroll_to
=
0
);
CV_EXPORTS
void
scalarToRawData
(
const
cv
::
Scalar
&
s
,
void
*
buf
,
int
type
,
int
unroll_to
=
0
);
}
}
// property implementation macros
#define CV_IMPL_PROPERTY_RO(type, name, member) \
inline type get##name() const { return member; }
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
CV_IMPL_PROPERTY_RO(r_type, name, member) \
inline void set##name(w_type val) { member = val; }
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
r_type get##name() const { return internal_obj.get##internal_name(); } \
void set##name(w_type val) { internal_obj.set##internal_name(val); }
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
/****************************************************************************************\
/****************************************************************************************\
* Structures and macros for integration with IPP *
* Structures and macros for integration with IPP *
...
...
modules/ml/CMakeLists.txt
View file @
daa99514
set
(
the_description
"Machine Learning"
)
set
(
the_description
"Machine Learning"
)
ocv_define_module
(
ml opencv_core
)
ocv_define_module
(
ml opencv_core
WRAP java python
)
modules/ml/include/opencv2/ml.hpp
View file @
daa99514
...
@@ -104,7 +104,7 @@ enum SampleTypes
...
@@ -104,7 +104,7 @@ enum SampleTypes
It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate
It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate
being computed by cross-validation.
being computed by cross-validation.
*/
*/
class
CV_EXPORTS
_W_MAP
ParamGrid
class
CV_EXPORTS
ParamGrid
{
{
public
:
public
:
/** @brief Default constructor */
/** @brief Default constructor */
...
@@ -112,8 +112,8 @@ public:
...
@@ -112,8 +112,8 @@ public:
/** @brief Constructor with parameters */
/** @brief Constructor with parameters */
ParamGrid
(
double
_minVal
,
double
_maxVal
,
double
_logStep
);
ParamGrid
(
double
_minVal
,
double
_maxVal
,
double
_logStep
);
CV_PROP_RW
double
minVal
;
//!< Minimum value of the statmodel parameter. Default value is 0.
double
minVal
;
//!< Minimum value of the statmodel parameter. Default value is 0.
CV_PROP_RW
double
maxVal
;
//!< Maximum value of the statmodel parameter. Default value is 0.
double
maxVal
;
//!< Maximum value of the statmodel parameter. Default value is 0.
/** @brief Logarithmic step for iterating the statmodel parameter.
/** @brief Logarithmic step for iterating the statmodel parameter.
The grid determines the following iteration sequence of the statmodel parameter values:
The grid determines the following iteration sequence of the statmodel parameter values:
...
@@ -122,7 +122,7 @@ public:
...
@@ -122,7 +122,7 @@ public:
\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]
\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]
The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.
The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.
*/
*/
CV_PROP_RW
double
logStep
;
double
logStep
;
};
};
/** @brief Class encapsulating training data.
/** @brief Class encapsulating training data.
...
@@ -134,22 +134,22 @@ of this class into StatModel::train.
...
@@ -134,22 +134,22 @@ of this class into StatModel::train.
@sa @ref ml_intro_data
@sa @ref ml_intro_data
*/
*/
class
CV_EXPORTS
TrainData
class
CV_EXPORTS
_W
TrainData
{
{
public
:
public
:
static
inline
float
missingValue
()
{
return
FLT_MAX
;
}
static
inline
float
missingValue
()
{
return
FLT_MAX
;
}
virtual
~
TrainData
();
virtual
~
TrainData
();
virtual
int
getLayout
()
const
=
0
;
CV_WRAP
virtual
int
getLayout
()
const
=
0
;
virtual
int
getNTrainSamples
()
const
=
0
;
CV_WRAP
virtual
int
getNTrainSamples
()
const
=
0
;
virtual
int
getNTestSamples
()
const
=
0
;
CV_WRAP
virtual
int
getNTestSamples
()
const
=
0
;
virtual
int
getNSamples
()
const
=
0
;
CV_WRAP
virtual
int
getNSamples
()
const
=
0
;
virtual
int
getNVars
()
const
=
0
;
CV_WRAP
virtual
int
getNVars
()
const
=
0
;
virtual
int
getNAllVars
()
const
=
0
;
CV_WRAP
virtual
int
getNAllVars
()
const
=
0
;
virtual
void
getSample
(
InputArray
varIdx
,
int
sidx
,
float
*
buf
)
const
=
0
;
CV_WRAP
virtual
void
getSample
(
InputArray
varIdx
,
int
sidx
,
float
*
buf
)
const
=
0
;
virtual
Mat
getSamples
()
const
=
0
;
CV_WRAP
virtual
Mat
getSamples
()
const
=
0
;
virtual
Mat
getMissing
()
const
=
0
;
CV_WRAP
virtual
Mat
getMissing
()
const
=
0
;
/** @brief Returns matrix of train samples
/** @brief Returns matrix of train samples
...
@@ -163,7 +163,7 @@ public:
...
@@ -163,7 +163,7 @@ public:
In current implementation the function tries to avoid physical data copying and returns the
In current implementation the function tries to avoid physical data copying and returns the
matrix stored inside TrainData (unless the transposition or compression is needed).
matrix stored inside TrainData (unless the transposition or compression is needed).
*/
*/
virtual
Mat
getTrainSamples
(
int
layout
=
ROW_SAMPLE
,
CV_WRAP
virtual
Mat
getTrainSamples
(
int
layout
=
ROW_SAMPLE
,
bool
compressSamples
=
true
,
bool
compressSamples
=
true
,
bool
compressVars
=
true
)
const
=
0
;
bool
compressVars
=
true
)
const
=
0
;
...
@@ -172,7 +172,7 @@ public:
...
@@ -172,7 +172,7 @@ public:
The function returns ordered or the original categorical responses. Usually it's used in
The function returns ordered or the original categorical responses. Usually it's used in
regression algorithms.
regression algorithms.
*/
*/
virtual
Mat
getTrainResponses
()
const
=
0
;
CV_WRAP
virtual
Mat
getTrainResponses
()
const
=
0
;
/** @brief Returns the vector of normalized categorical responses
/** @brief Returns the vector of normalized categorical responses
...
@@ -180,38 +180,38 @@ public:
...
@@ -180,38 +180,38 @@ public:
classes>-1`. The actual label value can be retrieved then from the class label vector, see
classes>-1`. The actual label value can be retrieved then from the class label vector, see
TrainData::getClassLabels.
TrainData::getClassLabels.
*/
*/
virtual
Mat
getTrainNormCatResponses
()
const
=
0
;
CV_WRAP
virtual
Mat
getTrainNormCatResponses
()
const
=
0
;
virtual
Mat
getTestResponses
()
const
=
0
;
CV_WRAP
virtual
Mat
getTestResponses
()
const
=
0
;
virtual
Mat
getTestNormCatResponses
()
const
=
0
;
CV_WRAP
virtual
Mat
getTestNormCatResponses
()
const
=
0
;
virtual
Mat
getResponses
()
const
=
0
;
CV_WRAP
virtual
Mat
getResponses
()
const
=
0
;
virtual
Mat
getNormCatResponses
()
const
=
0
;
CV_WRAP
virtual
Mat
getNormCatResponses
()
const
=
0
;
virtual
Mat
getSampleWeights
()
const
=
0
;
CV_WRAP
virtual
Mat
getSampleWeights
()
const
=
0
;
virtual
Mat
getTrainSampleWeights
()
const
=
0
;
CV_WRAP
virtual
Mat
getTrainSampleWeights
()
const
=
0
;
virtual
Mat
getTestSampleWeights
()
const
=
0
;
CV_WRAP
virtual
Mat
getTestSampleWeights
()
const
=
0
;
virtual
Mat
getVarIdx
()
const
=
0
;
CV_WRAP
virtual
Mat
getVarIdx
()
const
=
0
;
virtual
Mat
getVarType
()
const
=
0
;
CV_WRAP
virtual
Mat
getVarType
()
const
=
0
;
virtual
int
getResponseType
()
const
=
0
;
CV_WRAP
virtual
int
getResponseType
()
const
=
0
;
virtual
Mat
getTrainSampleIdx
()
const
=
0
;
CV_WRAP
virtual
Mat
getTrainSampleIdx
()
const
=
0
;
virtual
Mat
getTestSampleIdx
()
const
=
0
;
CV_WRAP
virtual
Mat
getTestSampleIdx
()
const
=
0
;
virtual
void
getValues
(
int
vi
,
InputArray
sidx
,
float
*
values
)
const
=
0
;
CV_WRAP
virtual
void
getValues
(
int
vi
,
InputArray
sidx
,
float
*
values
)
const
=
0
;
virtual
void
getNormCatValues
(
int
vi
,
InputArray
sidx
,
int
*
values
)
const
=
0
;
virtual
void
getNormCatValues
(
int
vi
,
InputArray
sidx
,
int
*
values
)
const
=
0
;
virtual
Mat
getDefaultSubstValues
()
const
=
0
;
CV_WRAP
virtual
Mat
getDefaultSubstValues
()
const
=
0
;
virtual
int
getCatCount
(
int
vi
)
const
=
0
;
CV_WRAP
virtual
int
getCatCount
(
int
vi
)
const
=
0
;
/** @brief Returns the vector of class labels
/** @brief Returns the vector of class labels
The function returns vector of unique labels occurred in the responses.
The function returns vector of unique labels occurred in the responses.
*/
*/
virtual
Mat
getClassLabels
()
const
=
0
;
CV_WRAP
virtual
Mat
getClassLabels
()
const
=
0
;
virtual
Mat
getCatOfs
()
const
=
0
;
CV_WRAP
virtual
Mat
getCatOfs
()
const
=
0
;
virtual
Mat
getCatMap
()
const
=
0
;
CV_WRAP
virtual
Mat
getCatMap
()
const
=
0
;
/** @brief Splits the training data into the training and test parts
/** @brief Splits the training data into the training and test parts
@sa TrainData::setTrainTestSplitRatio
@sa TrainData::setTrainTestSplitRatio
*/
*/
virtual
void
setTrainTestSplit
(
int
count
,
bool
shuffle
=
true
)
=
0
;
CV_WRAP
virtual
void
setTrainTestSplit
(
int
count
,
bool
shuffle
=
true
)
=
0
;
/** @brief Splits the training data into the training and test parts
/** @brief Splits the training data into the training and test parts
...
@@ -221,10 +221,10 @@ public:
...
@@ -221,10 +221,10 @@ public:
subset can be retrieved and processed as well.
subset can be retrieved and processed as well.
@sa TrainData::setTrainTestSplit
@sa TrainData::setTrainTestSplit
*/
*/
virtual
void
setTrainTestSplitRatio
(
double
ratio
,
bool
shuffle
=
true
)
=
0
;
CV_WRAP
virtual
void
setTrainTestSplitRatio
(
double
ratio
,
bool
shuffle
=
true
)
=
0
;
virtual
void
shuffleTrainTest
()
=
0
;
CV_WRAP
virtual
void
shuffleTrainTest
()
=
0
;
static
Mat
getSubVector
(
const
Mat
&
vec
,
const
Mat
&
idx
);
CV_WRAP
static
Mat
getSubVector
(
const
Mat
&
vec
,
const
Mat
&
idx
);
/** @brief Reads the dataset from a .csv file and returns the ready-to-use training data.
/** @brief Reads the dataset from a .csv file and returns the ready-to-use training data.
...
@@ -280,7 +280,7 @@ public:
...
@@ -280,7 +280,7 @@ public:
<number_of_variables_in_responses>`, containing types of each input and output variable. See
<number_of_variables_in_responses>`, containing types of each input and output variable. See
ml::VariableTypes.
ml::VariableTypes.
*/
*/
static
Ptr
<
TrainData
>
create
(
InputArray
samples
,
int
layout
,
InputArray
responses
,
CV_WRAP
static
Ptr
<
TrainData
>
create
(
InputArray
samples
,
int
layout
,
InputArray
responses
,
InputArray
varIdx
=
noArray
(),
InputArray
sampleIdx
=
noArray
(),
InputArray
varIdx
=
noArray
(),
InputArray
sampleIdx
=
noArray
(),
InputArray
sampleWeights
=
noArray
(),
InputArray
varType
=
noArray
());
InputArray
sampleWeights
=
noArray
(),
InputArray
varType
=
noArray
());
};
};
...
@@ -297,15 +297,15 @@ public:
...
@@ -297,15 +297,15 @@ public:
COMPRESSED_INPUT
=
2
,
COMPRESSED_INPUT
=
2
,
PREPROCESSED_INPUT
=
4
PREPROCESSED_INPUT
=
4
};
};
virtual
void
clear
();
CV_WRAP
virtual
void
clear
();
/** @brief Returns the number of variables in training samples */
/** @brief Returns the number of variables in training samples */
virtual
int
getVarCount
()
const
=
0
;
CV_WRAP
virtual
int
getVarCount
()
const
=
0
;
/** @brief Returns true if the model is trained */
/** @brief Returns true if the model is trained */
virtual
bool
isTrained
()
const
=
0
;
CV_WRAP
virtual
bool
isTrained
()
const
=
0
;
/** @brief Returns true if the model is classifier */
/** @brief Returns true if the model is classifier */
virtual
bool
isClassifier
()
const
=
0
;
CV_WRAP
virtual
bool
isClassifier
()
const
=
0
;
/** @brief Trains the statistical model
/** @brief Trains the statistical model
...
@@ -314,7 +314,7 @@ public:
...
@@ -314,7 +314,7 @@ public:
@param flags optional flags, depending on the model. Some of the models can be updated with the
@param flags optional flags, depending on the model. Some of the models can be updated with the
new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
*/
*/
virtual
bool
train
(
const
Ptr
<
TrainData
>&
trainData
,
int
flags
=
0
);
CV_WRAP
virtual
bool
train
(
const
Ptr
<
TrainData
>&
trainData
,
int
flags
=
0
);
/** @brief Trains the statistical model
/** @brief Trains the statistical model
...
@@ -322,7 +322,7 @@ public:
...
@@ -322,7 +322,7 @@ public:
@param layout See ml::SampleTypes.
@param layout See ml::SampleTypes.
@param responses vector of responses associated with the training samples.
@param responses vector of responses associated with the training samples.
*/
*/
virtual
bool
train
(
InputArray
samples
,
int
layout
,
InputArray
responses
);
CV_WRAP
virtual
bool
train
(
InputArray
samples
,
int
layout
,
InputArray
responses
);
/** @brief Computes error on the training or test dataset
/** @brief Computes error on the training or test dataset
...
@@ -337,7 +337,7 @@ public:
...
@@ -337,7 +337,7 @@ public:
The method uses StatModel::predict to compute the error. For regression models the error is
The method uses StatModel::predict to compute the error. For regression models the error is
computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).
computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).
*/
*/
virtual
float
calcError
(
const
Ptr
<
TrainData
>&
data
,
bool
test
,
OutputArray
resp
)
const
;
CV_WRAP
virtual
float
calcError
(
const
Ptr
<
TrainData
>&
data
,
bool
test
,
OutputArray
resp
)
const
;
/** @brief Predicts response(s) for the provided sample(s)
/** @brief Predicts response(s) for the provided sample(s)
...
@@ -345,7 +345,7 @@ public:
...
@@ -345,7 +345,7 @@ public:
@param results The optional output matrix of results.
@param results The optional output matrix of results.
@param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.
@param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.
*/
*/
virtual
float
predict
(
InputArray
samples
,
OutputArray
results
=
noArray
(),
int
flags
=
0
)
const
=
0
;
CV_WRAP
virtual
float
predict
(
InputArray
samples
,
OutputArray
results
=
noArray
(),
int
flags
=
0
)
const
=
0
;
/** @brief Loads model from the file
/** @brief Loads model from the file
...
@@ -393,11 +393,11 @@ public:
...
@@ -393,11 +393,11 @@ public:
/** Saves the model to a file.
/** Saves the model to a file.
In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */
In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */
virtual
void
save
(
const
String
&
filename
)
const
;
CV_WRAP
virtual
void
save
(
const
String
&
filename
)
const
;
/** Returns model string identifier.
/** Returns model string identifier.
This string is used as top level xml/yml node tag when model is saved to a file or string. */
This string is used as top level xml/yml node tag when model is saved to a file or string. */
virtual
String
getDefaultModelName
()
const
=
0
;
CV_WRAP
virtual
String
getDefaultModelName
()
const
=
0
;
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -419,12 +419,12 @@ public:
...
@@ -419,12 +419,12 @@ public:
The vector outputProbs contains the output probabilities corresponding to each element of
The vector outputProbs contains the output probabilities corresponding to each element of
result.
result.
*/
*/
virtual
float
predictProb
(
InputArray
inputs
,
OutputArray
outputs
,
CV_WRAP
virtual
float
predictProb
(
InputArray
inputs
,
OutputArray
outputs
,
OutputArray
outputProbs
,
int
flags
=
0
)
const
=
0
;
OutputArray
outputProbs
,
int
flags
=
0
)
const
=
0
;
/** Creates empty model
/** Creates empty model
Use StatModel::train to train the model after creation. */
Use StatModel::train to train the model after creation. */
static
Ptr
<
NormalBayesClassifier
>
create
();
CV_WRAP
static
Ptr
<
NormalBayesClassifier
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -440,16 +440,28 @@ class CV_EXPORTS_W KNearest : public StatModel
...
@@ -440,16 +440,28 @@ class CV_EXPORTS_W KNearest : public StatModel
public
:
public
:
/** Default number of neighbors to use in predict method. */
/** Default number of neighbors to use in predict method. */
CV_PURE_PROPERTY
(
int
,
DefaultK
)
/** @see setDefaultK */
CV_WRAP
virtual
int
getDefaultK
()
const
=
0
;
/** @copybrief getDefaultK @see getDefaultK */
CV_WRAP
virtual
void
setDefaultK
(
int
val
)
=
0
;
/** Whether classification or regression model should be trained. */
/** Whether classification or regression model should be trained. */
CV_PURE_PROPERTY
(
bool
,
IsClassifier
)
/** @see setIsClassifier */
CV_WRAP
virtual
bool
getIsClassifier
()
const
=
0
;
/** @copybrief getIsClassifier @see getIsClassifier */
CV_WRAP
virtual
void
setIsClassifier
(
bool
val
)
=
0
;
/** Parameter for KDTree implementation. */
/** Parameter for KDTree implementation. */
CV_PURE_PROPERTY
(
int
,
Emax
)
/** @see setEmax */
CV_WRAP
virtual
int
getEmax
()
const
=
0
;
/** @copybrief getEmax @see getEmax */
CV_WRAP
virtual
void
setEmax
(
int
val
)
=
0
;
/** %Algorithm type, one of KNearest::Types. */
/** %Algorithm type, one of KNearest::Types. */
CV_PURE_PROPERTY
(
int
,
AlgorithmType
)
/** @see setAlgorithmType */
CV_WRAP
virtual
int
getAlgorithmType
()
const
=
0
;
/** @copybrief getAlgorithmType @see getAlgorithmType */
CV_WRAP
virtual
void
setAlgorithmType
(
int
val
)
=
0
;
/** @brief Finds the neighbors and predicts responses for input vectors.
/** @brief Finds the neighbors and predicts responses for input vectors.
...
@@ -477,7 +489,7 @@ public:
...
@@ -477,7 +489,7 @@ public:
The function is parallelized with the TBB library.
The function is parallelized with the TBB library.
*/
*/
virtual
float
findNearest
(
InputArray
samples
,
int
k
,
CV_WRAP
virtual
float
findNearest
(
InputArray
samples
,
int
k
,
OutputArray
results
,
OutputArray
results
,
OutputArray
neighborResponses
=
noArray
(),
OutputArray
neighborResponses
=
noArray
(),
OutputArray
dist
=
noArray
()
)
const
=
0
;
OutputArray
dist
=
noArray
()
)
const
=
0
;
...
@@ -494,7 +506,7 @@ public:
...
@@ -494,7 +506,7 @@ public:
The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.
The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.
*/
*/
static
Ptr
<
KNearest
>
create
();
CV_WRAP
static
Ptr
<
KNearest
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -518,52 +530,79 @@ public:
...
@@ -518,52 +530,79 @@ public:
/** Type of a %SVM formulation.
/** Type of a %SVM formulation.
See SVM::Types. Default value is SVM::C_SVC. */
See SVM::Types. Default value is SVM::C_SVC. */
CV_PURE_PROPERTY
(
int
,
Type
)
/** @see setType */
CV_WRAP
virtual
int
getType
()
const
=
0
;
/** @copybrief getType @see getType */
CV_WRAP
virtual
void
setType
(
int
val
)
=
0
;
/** Parameter \f$\gamma\f$ of a kernel function.
/** Parameter \f$\gamma\f$ of a kernel function.
For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */
For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */
CV_PURE_PROPERTY
(
double
,
Gamma
)
/** @see setGamma */
CV_WRAP
virtual
double
getGamma
()
const
=
0
;
/** @copybrief getGamma @see getGamma */
CV_WRAP
virtual
void
setGamma
(
double
val
)
=
0
;
/** Parameter _coef0_ of a kernel function.
/** Parameter _coef0_ of a kernel function.
For SVM::POLY or SVM::SIGMOID. Default value is 0.*/
For SVM::POLY or SVM::SIGMOID. Default value is 0.*/
CV_PURE_PROPERTY
(
double
,
Coef0
)
/** @see setCoef0 */
CV_WRAP
virtual
double
getCoef0
()
const
=
0
;
/** @copybrief getCoef0 @see getCoef0 */
CV_WRAP
virtual
void
setCoef0
(
double
val
)
=
0
;
/** Parameter _degree_ of a kernel function.
/** Parameter _degree_ of a kernel function.
For SVM::POLY. Default value is 0. */
For SVM::POLY. Default value is 0. */
CV_PURE_PROPERTY
(
double
,
Degree
)
/** @see setDegree */
CV_WRAP
virtual
double
getDegree
()
const
=
0
;
/** @copybrief getDegree @see getDegree */
CV_WRAP
virtual
void
setDegree
(
double
val
)
=
0
;
/** Parameter _C_ of a %SVM optimization problem.
/** Parameter _C_ of a %SVM optimization problem.
For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */
For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */
CV_PURE_PROPERTY
(
double
,
C
)
/** @see setC */
CV_WRAP
virtual
double
getC
()
const
=
0
;
/** @copybrief getC @see getC */
CV_WRAP
virtual
void
setC
(
double
val
)
=
0
;
/** Parameter \f$\nu\f$ of a %SVM optimization problem.
/** Parameter \f$\nu\f$ of a %SVM optimization problem.
For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */
For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */
CV_PURE_PROPERTY
(
double
,
Nu
)
/** @see setNu */
CV_WRAP
virtual
double
getNu
()
const
=
0
;
/** @copybrief getNu @see getNu */
CV_WRAP
virtual
void
setNu
(
double
val
)
=
0
;
/** Parameter \f$\epsilon\f$ of a %SVM optimization problem.
/** Parameter \f$\epsilon\f$ of a %SVM optimization problem.
For SVM::EPS_SVR. Default value is 0. */
For SVM::EPS_SVR. Default value is 0. */
CV_PURE_PROPERTY
(
double
,
P
)
/** @see setP */
CV_WRAP
virtual
double
getP
()
const
=
0
;
/** @copybrief getP @see getP */
CV_WRAP
virtual
void
setP
(
double
val
)
=
0
;
/** Optional weights in the SVM::C_SVC problem, assigned to particular classes.
/** Optional weights in the SVM::C_SVC problem, assigned to particular classes.
They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus
They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus
these weights affect the misclassification penalty for different classes. The larger weight,
these weights affect the misclassification penalty for different classes. The larger weight,
the larger penalty on misclassification of data from the corresponding class. Default value is
the larger penalty on misclassification of data from the corresponding class. Default value is
empty Mat. */
empty Mat. */
CV_PURE_PROPERTY_S
(
cv
::
Mat
,
ClassWeights
)
/** @see setClassWeights */
CV_WRAP
virtual
cv
::
Mat
getClassWeights
()
const
=
0
;
/** @copybrief getClassWeights @see getClassWeights */
CV_WRAP
virtual
void
setClassWeights
(
const
cv
::
Mat
&
val
)
=
0
;
/** Termination criteria of the iterative %SVM training procedure which solves a partial
/** Termination criteria of the iterative %SVM training procedure which solves a partial
case of constrained quadratic optimization problem.
case of constrained quadratic optimization problem.
You can specify tolerance and/or the maximum number of iterations. Default value is
You can specify tolerance and/or the maximum number of iterations. Default value is
`TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */
`TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */
CV_PURE_PROPERTY_S
(
cv
::
TermCriteria
,
TermCriteria
)
/** @see setTermCriteria */
CV_WRAP
virtual
cv
::
TermCriteria
getTermCriteria
()
const
=
0
;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP
virtual
void
setTermCriteria
(
const
cv
::
TermCriteria
&
val
)
=
0
;
/** Type of a %SVM kernel.
/** Type of a %SVM kernel.
See SVM::KernelTypes. Default value is SVM::RBF. */
See SVM::KernelTypes. Default value is SVM::RBF. */
virtual
int
getKernelType
()
const
=
0
;
CV_WRAP
virtual
int
getKernelType
()
const
=
0
;
/** Initialize with one of predefined kernels.
/** Initialize with one of predefined kernels.
See SVM::KernelTypes. */
See SVM::KernelTypes. */
virtual
void
setKernel
(
int
kernelType
)
=
0
;
CV_WRAP
virtual
void
setKernel
(
int
kernelType
)
=
0
;
/** Initialize with custom kernel.
/** Initialize with custom kernel.
See SVM::Kernel class for implementation details */
See SVM::Kernel class for implementation details */
...
@@ -695,7 +734,7 @@ public:
...
@@ -695,7 +734,7 @@ public:
The method returns rho parameter of the decision function, a scalar subtracted from the weighted
The method returns rho parameter of the decision function, a scalar subtracted from the weighted
sum of kernel responses.
sum of kernel responses.
*/
*/
virtual
double
getDecisionFunction
(
int
i
,
OutputArray
alpha
,
OutputArray
svidx
)
const
=
0
;
CV_WRAP
virtual
double
getDecisionFunction
(
int
i
,
OutputArray
alpha
,
OutputArray
svidx
)
const
=
0
;
/** @brief Generates a grid for %SVM parameters.
/** @brief Generates a grid for %SVM parameters.
...
@@ -710,7 +749,7 @@ public:
...
@@ -710,7 +749,7 @@ public:
/** Creates empty model.
/** Creates empty model.
Use StatModel::train to train the model. Since %SVM has several parameters, you may want to
Use StatModel::train to train the model. Since %SVM has several parameters, you may want to
find the best parameters for your problem, it can be done with SVM::trainAuto. */
find the best parameters for your problem, it can be done with SVM::trainAuto. */
static
Ptr
<
SVM
>
create
();
CV_WRAP
static
Ptr
<
SVM
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -755,29 +794,38 @@ public:
...
@@ -755,29 +794,38 @@ public:
Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could
Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could
determine the optimal number of mixtures within a specified value range, but that is not the
determine the optimal number of mixtures within a specified value range, but that is not the
case in ML yet. */
case in ML yet. */
CV_PURE_PROPERTY
(
int
,
ClustersNumber
)
/** @see setClustersNumber */
CV_WRAP
virtual
int
getClustersNumber
()
const
=
0
;
/** @copybrief getClustersNumber @see getClustersNumber */
CV_WRAP
virtual
void
setClustersNumber
(
int
val
)
=
0
;
/** Constraint on covariance matrices which defines type of matrices.
/** Constraint on covariance matrices which defines type of matrices.
See EM::Types. */
See EM::Types. */
CV_PURE_PROPERTY
(
int
,
CovarianceMatrixType
)
/** @see setCovarianceMatrixType */
CV_WRAP
virtual
int
getCovarianceMatrixType
()
const
=
0
;
/** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */
CV_WRAP
virtual
void
setCovarianceMatrixType
(
int
val
)
=
0
;
/** The termination criteria of the %EM algorithm.
/** The termination criteria of the %EM algorithm.
The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of
The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of
M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default
M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default
maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */
maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */
CV_PURE_PROPERTY_S
(
TermCriteria
,
TermCriteria
)
/** @see setTermCriteria */
CV_WRAP
virtual
TermCriteria
getTermCriteria
()
const
=
0
;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP
virtual
void
setTermCriteria
(
const
TermCriteria
&
val
)
=
0
;
/** @brief Returns weights of the mixtures
/** @brief Returns weights of the mixtures
Returns vector with the number of elements equal to the number of mixtures.
Returns vector with the number of elements equal to the number of mixtures.
*/
*/
virtual
Mat
getWeights
()
const
=
0
;
CV_WRAP
virtual
Mat
getWeights
()
const
=
0
;
/** @brief Returns the cluster centers (means of the Gaussian mixture)
/** @brief Returns the cluster centers (means of the Gaussian mixture)
Returns matrix with the number of rows equal to the number of mixtures and number of columns
Returns matrix with the number of rows equal to the number of mixtures and number of columns
equal to the space dimensionality.
equal to the space dimensionality.
*/
*/
virtual
Mat
getMeans
()
const
=
0
;
CV_WRAP
virtual
Mat
getMeans
()
const
=
0
;
/** @brief Returns covariation matrices
/** @brief Returns covariation matrices
Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
...
@@ -797,7 +845,7 @@ public:
...
@@ -797,7 +845,7 @@ public:
the sample. First element is an index of the most probable mixture component for the given
the sample. First element is an index of the most probable mixture component for the given
sample.
sample.
*/
*/
CV_WRAP
virtual
Vec2d
predict2
(
InputArray
sample
,
OutputArray
probs
)
const
=
0
;
CV_WRAP
CV_WRAP
virtual
Vec2d
predict2
(
InputArray
sample
,
OutputArray
probs
)
const
=
0
;
/** @brief Estimate the Gaussian mixture parameters from a samples set.
/** @brief Estimate the Gaussian mixture parameters from a samples set.
...
@@ -827,7 +875,7 @@ public:
...
@@ -827,7 +875,7 @@ public:
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
CV_64FC1 type.
*/
*/
virtual
bool
trainEM
(
InputArray
samples
,
CV_WRAP
virtual
bool
trainEM
(
InputArray
samples
,
OutputArray
logLikelihoods
=
noArray
(),
OutputArray
logLikelihoods
=
noArray
(),
OutputArray
labels
=
noArray
(),
OutputArray
labels
=
noArray
(),
OutputArray
probs
=
noArray
())
=
0
;
OutputArray
probs
=
noArray
())
=
0
;
...
@@ -859,7 +907,7 @@ public:
...
@@ -859,7 +907,7 @@ public:
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
CV_64FC1 type.
*/
*/
virtual
bool
trainE
(
InputArray
samples
,
InputArray
means0
,
CV_WRAP
virtual
bool
trainE
(
InputArray
samples
,
InputArray
means0
,
InputArray
covs0
=
noArray
(),
InputArray
covs0
=
noArray
(),
InputArray
weights0
=
noArray
(),
InputArray
weights0
=
noArray
(),
OutputArray
logLikelihoods
=
noArray
(),
OutputArray
logLikelihoods
=
noArray
(),
...
@@ -884,7 +932,7 @@ public:
...
@@ -884,7 +932,7 @@ public:
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
CV_64FC1 type.
*/
*/
virtual
bool
trainM
(
InputArray
samples
,
InputArray
probs0
,
CV_WRAP
virtual
bool
trainM
(
InputArray
samples
,
InputArray
probs0
,
OutputArray
logLikelihoods
=
noArray
(),
OutputArray
logLikelihoods
=
noArray
(),
OutputArray
labels
=
noArray
(),
OutputArray
labels
=
noArray
(),
OutputArray
probs
=
noArray
())
=
0
;
OutputArray
probs
=
noArray
())
=
0
;
...
@@ -893,7 +941,7 @@ public:
...
@@ -893,7 +941,7 @@ public:
The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
can use one of the EM::train\* methods or load it from file using StatModel::load\<EM\>(filename).
can use one of the EM::train\* methods or load it from file using StatModel::load\<EM\>(filename).
*/
*/
static
Ptr
<
EM
>
create
();
CV_WRAP
static
Ptr
<
EM
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -926,46 +974,70 @@ public:
...
@@ -926,46 +974,70 @@ public:
values. In case of regression and 2-class classification the optimal split can be found
values. In case of regression and 2-class classification the optimal split can be found
efficiently without employing clustering, thus the parameter is not used in these cases.
efficiently without employing clustering, thus the parameter is not used in these cases.
Default value is 10.*/
Default value is 10.*/
CV_PURE_PROPERTY
(
int
,
MaxCategories
)
/** @see setMaxCategories */
CV_WRAP
virtual
int
getMaxCategories
()
const
=
0
;
/** @copybrief getMaxCategories @see getMaxCategories */
CV_WRAP
virtual
void
setMaxCategories
(
int
val
)
=
0
;
/** The maximum possible depth of the tree.
/** The maximum possible depth of the tree.
That is the training algorithms attempts to split a node while its depth is less than maxDepth.
That is the training algorithms attempts to split a node while its depth is less than maxDepth.
The root node has zero depth. The actual depth may be smaller if the other termination criteria
The root node has zero depth. The actual depth may be smaller if the other termination criteria
are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the
are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the
tree is pruned. Default value is INT_MAX.*/
tree is pruned. Default value is INT_MAX.*/
CV_PURE_PROPERTY
(
int
,
MaxDepth
)
/** @see setMaxDepth */
CV_WRAP
virtual
int
getMaxDepth
()
const
=
0
;
/** @copybrief getMaxDepth @see getMaxDepth */
CV_WRAP
virtual
void
setMaxDepth
(
int
val
)
=
0
;
/** If the number of samples in a node is less than this parameter then the node will not be split.
/** If the number of samples in a node is less than this parameter then the node will not be split.
Default value is 10.*/
Default value is 10.*/
CV_PURE_PROPERTY
(
int
,
MinSampleCount
)
/** @see setMinSampleCount */
CV_WRAP
virtual
int
getMinSampleCount
()
const
=
0
;
/** @copybrief getMinSampleCount @see getMinSampleCount */
CV_WRAP
virtual
void
setMinSampleCount
(
int
val
)
=
0
;
/** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold
/** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold
cross-validation procedure where K is equal to CVFolds.
cross-validation procedure where K is equal to CVFolds.
Default value is 10.*/
Default value is 10.*/
CV_PURE_PROPERTY
(
int
,
CVFolds
)
/** @see setCVFolds */
CV_WRAP
virtual
int
getCVFolds
()
const
=
0
;
/** @copybrief getCVFolds @see getCVFolds */
CV_WRAP
virtual
void
setCVFolds
(
int
val
)
=
0
;
/** If true then surrogate splits will be built.
/** If true then surrogate splits will be built.
These splits allow to work with missing data and compute variable importance correctly.
These splits allow to work with missing data and compute variable importance correctly.
Default value is false.
Default value is false.
@note currently it's not implemented.*/
@note currently it's not implemented.*/
CV_PURE_PROPERTY
(
bool
,
UseSurrogates
)
/** @see setUseSurrogates */
CV_WRAP
virtual
bool
getUseSurrogates
()
const
=
0
;
/** @copybrief getUseSurrogates @see getUseSurrogates */
CV_WRAP
virtual
void
setUseSurrogates
(
bool
val
)
=
0
;
/** If true then a pruning will be harsher.
/** If true then a pruning will be harsher.
This will make a tree more compact and more resistant to the training data noise but a bit less
This will make a tree more compact and more resistant to the training data noise but a bit less
accurate. Default value is true.*/
accurate. Default value is true.*/
CV_PURE_PROPERTY
(
bool
,
Use1SERule
)
/** @see setUse1SERule */
CV_WRAP
virtual
bool
getUse1SERule
()
const
=
0
;
/** @copybrief getUse1SERule @see getUse1SERule */
CV_WRAP
virtual
void
setUse1SERule
(
bool
val
)
=
0
;
/** If true then pruned branches are physically removed from the tree.
/** If true then pruned branches are physically removed from the tree.
Otherwise they are retained and it is possible to get results from the original unpruned (or
Otherwise they are retained and it is possible to get results from the original unpruned (or
pruned less aggressively) tree. Default value is true.*/
pruned less aggressively) tree. Default value is true.*/
CV_PURE_PROPERTY
(
bool
,
TruncatePrunedTree
)
/** @see setTruncatePrunedTree */
CV_WRAP
virtual
bool
getTruncatePrunedTree
()
const
=
0
;
/** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */
CV_WRAP
virtual
void
setTruncatePrunedTree
(
bool
val
)
=
0
;
/** Termination criteria for regression trees.
/** Termination criteria for regression trees.
If all absolute differences between an estimated value in a node and values of train samples
If all absolute differences between an estimated value in a node and values of train samples
in this node are less than this parameter then the node will not be split further. Default
in this node are less than this parameter then the node will not be split further. Default
value is 0.01f*/
value is 0.01f*/
CV_PURE_PROPERTY
(
float
,
RegressionAccuracy
)
/** @see setRegressionAccuracy */
CV_WRAP
virtual
float
getRegressionAccuracy
()
const
=
0
;
/** @copybrief getRegressionAccuracy @see getRegressionAccuracy */
CV_WRAP
virtual
void
setRegressionAccuracy
(
float
val
)
=
0
;
/** @brief The array of a priori class probabilities, sorted by the class label value.
/** @brief The array of a priori class probabilities, sorted by the class label value.
...
@@ -982,7 +1054,10 @@ public:
...
@@ -982,7 +1054,10 @@ public:
category is 1 and the weight of the second category is 10, then each mistake in predicting
category is 1 and the weight of the second category is 10, then each mistake in predicting
the second category is equivalent to making 10 mistakes in predicting the first category.
the second category is equivalent to making 10 mistakes in predicting the first category.
Default value is empty Mat.*/
Default value is empty Mat.*/
CV_PURE_PROPERTY_S
(
cv
::
Mat
,
Priors
)
/** @see setPriors */
CV_WRAP
virtual
cv
::
Mat
getPriors
()
const
=
0
;
/** @copybrief getPriors @see getPriors */
CV_WRAP
virtual
void
setPriors
(
const
cv
::
Mat
&
val
)
=
0
;
/** @brief The class represents a decision tree node.
/** @brief The class represents a decision tree node.
*/
*/
...
@@ -1054,7 +1129,7 @@ public:
...
@@ -1054,7 +1129,7 @@ public:
trained using train method (see StatModel::train). Alternatively, you can load the model from
trained using train method (see StatModel::train). Alternatively, you can load the model from
file using StatModel::load\<DTrees\>(filename).
file using StatModel::load\<DTrees\>(filename).
*/
*/
static
Ptr
<
DTrees
>
create
();
CV_WRAP
static
Ptr
<
DTrees
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -1071,13 +1146,19 @@ public:
...
@@ -1071,13 +1146,19 @@ public:
/** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.
/** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.
Default value is false.*/
Default value is false.*/
CV_PURE_PROPERTY
(
bool
,
CalculateVarImportance
)
/** @see setCalculateVarImportance */
CV_WRAP
virtual
bool
getCalculateVarImportance
()
const
=
0
;
/** @copybrief getCalculateVarImportance @see getCalculateVarImportance */
CV_WRAP
virtual
void
setCalculateVarImportance
(
bool
val
)
=
0
;
/** The size of the randomly selected subset of features at each tree node and that are used
/** The size of the randomly selected subset of features at each tree node and that are used
to find the best split(s).
to find the best split(s).
If you set it to 0 then the size will be set to the square root of the total number of
If you set it to 0 then the size will be set to the square root of the total number of
features. Default value is 0.*/
features. Default value is 0.*/
CV_PURE_PROPERTY
(
int
,
ActiveVarCount
)
/** @see setActiveVarCount */
CV_WRAP
virtual
int
getActiveVarCount
()
const
=
0
;
/** @copybrief getActiveVarCount @see getActiveVarCount */
CV_WRAP
virtual
void
setActiveVarCount
(
int
val
)
=
0
;
/** The termination criteria that specifies when the training algorithm stops.
/** The termination criteria that specifies when the training algorithm stops.
Either when the specified number of trees is trained and added to the ensemble or when
Either when the specified number of trees is trained and added to the ensemble or when
...
@@ -1086,20 +1167,23 @@ public:
...
@@ -1086,20 +1167,23 @@ public:
pass a certain number of trees. Also to keep in mind, the number of tree increases the
pass a certain number of trees. Also to keep in mind, the number of tree increases the
prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +
prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +
TermCriteria::EPS, 50, 0.1)*/
TermCriteria::EPS, 50, 0.1)*/
CV_PURE_PROPERTY_S
(
TermCriteria
,
TermCriteria
)
/** @see setTermCriteria */
CV_WRAP
virtual
TermCriteria
getTermCriteria
()
const
=
0
;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP
virtual
void
setTermCriteria
(
const
TermCriteria
&
val
)
=
0
;
/** Returns the variable importance array.
/** Returns the variable importance array.
The method returns the variable importance vector, computed at the training stage when
The method returns the variable importance vector, computed at the training stage when
CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is
CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is
returned.
returned.
*/
*/
virtual
Mat
getVarImportance
()
const
=
0
;
CV_WRAP
virtual
Mat
getVarImportance
()
const
=
0
;
/** Creates the empty model.
/** Creates the empty model.
Use StatModel::train to train the model, StatModel::train to create and train the model,
Use StatModel::train to train the model, StatModel::train to create and train the model,
StatModel::load to load the pre-trained model.
StatModel::load to load the pre-trained model.
*/
*/
static
Ptr
<
RTrees
>
create
();
CV_WRAP
static
Ptr
<
RTrees
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -1115,16 +1199,25 @@ class CV_EXPORTS_W Boost : public DTrees
...
@@ -1115,16 +1199,25 @@ class CV_EXPORTS_W Boost : public DTrees
public
:
public
:
/** Type of the boosting algorithm.
/** Type of the boosting algorithm.
See Boost::Types. Default value is Boost::REAL. */
See Boost::Types. Default value is Boost::REAL. */
CV_PURE_PROPERTY
(
int
,
BoostType
)
/** @see setBoostType */
CV_WRAP
virtual
int
getBoostType
()
const
=
0
;
/** @copybrief getBoostType @see getBoostType */
CV_WRAP
virtual
void
setBoostType
(
int
val
)
=
0
;
/** The number of weak classifiers.
/** The number of weak classifiers.
Default value is 100. */
Default value is 100. */
CV_PURE_PROPERTY
(
int
,
WeakCount
)
/** @see setWeakCount */
CV_WRAP
virtual
int
getWeakCount
()
const
=
0
;
/** @copybrief getWeakCount @see getWeakCount */
CV_WRAP
virtual
void
setWeakCount
(
int
val
)
=
0
;
/** A threshold between 0 and 1 used to save computational time.
/** A threshold between 0 and 1 used to save computational time.
Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next*
Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next*
iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/
iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/
CV_PURE_PROPERTY
(
double
,
WeightTrimRate
)
/** @see setWeightTrimRate */
CV_WRAP
virtual
double
getWeightTrimRate
()
const
=
0
;
/** @copybrief getWeightTrimRate @see getWeightTrimRate */
CV_WRAP
virtual
void
setWeightTrimRate
(
double
val
)
=
0
;
/** Boosting type.
/** Boosting type.
Gentle AdaBoost and Real AdaBoost are often the preferable choices. */
Gentle AdaBoost and Real AdaBoost are often the preferable choices. */
...
@@ -1139,7 +1232,7 @@ public:
...
@@ -1139,7 +1232,7 @@ public:
/** Creates the empty model.
/** Creates the empty model.
Use StatModel::train to train the model, StatModel::load\<Boost\>(filename) to load the pre-trained model. */
Use StatModel::train to train the model, StatModel::load\<Boost\>(filename) to load the pre-trained model. */
static
Ptr
<
Boost
>
create
();
CV_WRAP
static
Ptr
<
Boost
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
@@ -1189,7 +1282,7 @@ Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.
...
@@ -1189,7 +1282,7 @@ Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.
@sa @ref ml_intro_ann
@sa @ref ml_intro_ann
*/
*/
class
CV_EXPORTS
_W
ANN_MLP
:
public
StatModel
class
CV_EXPORTS
ANN_MLP
:
public
StatModel
{
{
public
:
public
:
/** Available training methods */
/** Available training methods */
...
@@ -1232,37 +1325,61 @@ public:
...
@@ -1232,37 +1325,61 @@ public:
You can specify the maximum number of iterations (maxCount) and/or how much the error could
You can specify the maximum number of iterations (maxCount) and/or how much the error could
change between the iterations to make the algorithm continue (epsilon). Default value is
change between the iterations to make the algorithm continue (epsilon). Default value is
TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/
TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/
CV_PURE_PROPERTY
(
TermCriteria
,
TermCriteria
)
/** @see setTermCriteria */
virtual
TermCriteria
getTermCriteria
()
const
=
0
;
/** @copybrief getTermCriteria @see getTermCriteria */
virtual
void
setTermCriteria
(
TermCriteria
val
)
=
0
;
/** BPROP: Strength of the weight gradient term.
/** BPROP: Strength of the weight gradient term.
The recommended value is about 0.1. Default value is 0.1.*/
The recommended value is about 0.1. Default value is 0.1.*/
CV_PURE_PROPERTY
(
double
,
BackpropWeightScale
)
/** @see setBackpropWeightScale */
virtual
double
getBackpropWeightScale
()
const
=
0
;
/** @copybrief getBackpropWeightScale @see getBackpropWeightScale */
virtual
void
setBackpropWeightScale
(
double
val
)
=
0
;
/** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).
/** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).
This parameter provides some inertia to smooth the random fluctuations of the weights. It can
This parameter provides some inertia to smooth the random fluctuations of the weights. It can
vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
Default value is 0.1.*/
Default value is 0.1.*/
CV_PURE_PROPERTY
(
double
,
BackpropMomentumScale
)
/** @see setBackpropMomentumScale */
virtual
double
getBackpropMomentumScale
()
const
=
0
;
/** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */
virtual
void
setBackpropMomentumScale
(
double
val
)
=
0
;
/** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.
/** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.
Default value is 0.1.*/
Default value is 0.1.*/
CV_PURE_PROPERTY
(
double
,
RpropDW0
)
/** @see setRpropDW0 */
virtual
double
getRpropDW0
()
const
=
0
;
/** @copybrief getRpropDW0 @see getRpropDW0 */
virtual
void
setRpropDW0
(
double
val
)
=
0
;
/** RPROP: Increase factor \f$\eta^+\f$.
/** RPROP: Increase factor \f$\eta^+\f$.
It must be \>1. Default value is 1.2.*/
It must be \>1. Default value is 1.2.*/
CV_PURE_PROPERTY
(
double
,
RpropDWPlus
)
/** @see setRpropDWPlus */
virtual
double
getRpropDWPlus
()
const
=
0
;
/** @copybrief getRpropDWPlus @see getRpropDWPlus */
virtual
void
setRpropDWPlus
(
double
val
)
=
0
;
/** RPROP: Decrease factor \f$\eta^-\f$.
/** RPROP: Decrease factor \f$\eta^-\f$.
It must be \<1. Default value is 0.5.*/
It must be \<1. Default value is 0.5.*/
CV_PURE_PROPERTY
(
double
,
RpropDWMinus
)
/** @see setRpropDWMinus */
virtual
double
getRpropDWMinus
()
const
=
0
;
/** @copybrief getRpropDWMinus @see getRpropDWMinus */
virtual
void
setRpropDWMinus
(
double
val
)
=
0
;
/** RPROP: Update-values lower limit \f$\Delta_{min}\f$.
/** RPROP: Update-values lower limit \f$\Delta_{min}\f$.
It must be positive. Default value is FLT_EPSILON.*/
It must be positive. Default value is FLT_EPSILON.*/
CV_PURE_PROPERTY
(
double
,
RpropDWMin
)
/** @see setRpropDWMin */
virtual
double
getRpropDWMin
()
const
=
0
;
/** @copybrief getRpropDWMin @see getRpropDWMin */
virtual
void
setRpropDWMin
(
double
val
)
=
0
;
/** RPROP: Update-values upper limit \f$\Delta_{max}\f$.
/** RPROP: Update-values upper limit \f$\Delta_{max}\f$.
It must be \>1. Default value is 50.*/
It must be \>1. Default value is 50.*/
CV_PURE_PROPERTY
(
double
,
RpropDWMax
)
/** @see setRpropDWMax */
virtual
double
getRpropDWMax
()
const
=
0
;
/** @copybrief getRpropDWMax @see getRpropDWMax */
virtual
void
setRpropDWMax
(
double
val
)
=
0
;
/** possible activation functions */
/** possible activation functions */
enum
ActivationFunctions
{
enum
ActivationFunctions
{
...
@@ -1313,29 +1430,47 @@ public:
...
@@ -1313,29 +1430,47 @@ public:
@sa @ref ml_intro_lr
@sa @ref ml_intro_lr
*/
*/
class
CV_EXPORTS
LogisticRegression
:
public
StatModel
class
CV_EXPORTS
_W
LogisticRegression
:
public
StatModel
{
{
public
:
public
:
/** Learning rate. */
/** Learning rate. */
CV_PURE_PROPERTY
(
double
,
LearningRate
)
/** @see setLearningRate */
CV_WRAP
virtual
double
getLearningRate
()
const
=
0
;
/** @copybrief getLearningRate @see getLearningRate */
CV_WRAP
virtual
void
setLearningRate
(
double
val
)
=
0
;
/** Number of iterations. */
/** Number of iterations. */
CV_PURE_PROPERTY
(
int
,
Iterations
)
/** @see setIterations */
CV_WRAP
virtual
int
getIterations
()
const
=
0
;
/** @copybrief getIterations @see getIterations */
CV_WRAP
virtual
void
setIterations
(
int
val
)
=
0
;
/** Kind of regularization to be applied. See LogisticRegression::RegKinds. */
/** Kind of regularization to be applied. See LogisticRegression::RegKinds. */
CV_PURE_PROPERTY
(
int
,
Regularization
)
/** @see setRegularization */
CV_WRAP
virtual
int
getRegularization
()
const
=
0
;
/** @copybrief getRegularization @see getRegularization */
CV_WRAP
virtual
void
setRegularization
(
int
val
)
=
0
;
/** Kind of training method used. See LogisticRegression::Methods. */
/** Kind of training method used. See LogisticRegression::Methods. */
CV_PURE_PROPERTY
(
int
,
TrainMethod
)
/** @see setTrainMethod */
CV_WRAP
virtual
int
getTrainMethod
()
const
=
0
;
/** @copybrief getTrainMethod @see getTrainMethod */
CV_WRAP
virtual
void
setTrainMethod
(
int
val
)
=
0
;
/** Specifies the number of training samples taken in each step of Mini-Batch Gradient
/** Specifies the number of training samples taken in each step of Mini-Batch Gradient
Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It
Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It
has to take values less than the total number of training samples. */
has to take values less than the total number of training samples. */
CV_PURE_PROPERTY
(
int
,
MiniBatchSize
)
/** @see setMiniBatchSize */
CV_WRAP
virtual
int
getMiniBatchSize
()
const
=
0
;
/** @copybrief getMiniBatchSize @see getMiniBatchSize */
CV_WRAP
virtual
void
setMiniBatchSize
(
int
val
)
=
0
;
/** Termination criteria of the algorithm. */
/** Termination criteria of the algorithm. */
CV_PURE_PROPERTY
(
TermCriteria
,
TermCriteria
)
/** @see setTermCriteria */
CV_WRAP
virtual
TermCriteria
getTermCriteria
()
const
=
0
;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP
virtual
void
setTermCriteria
(
TermCriteria
val
)
=
0
;
//! Regularization kinds
//! Regularization kinds
enum
RegKinds
{
enum
RegKinds
{
...
@@ -1357,20 +1492,20 @@ public:
...
@@ -1357,20 +1492,20 @@ public:
@param results Predicted labels as a column matrix of type CV_32S.
@param results Predicted labels as a column matrix of type CV_32S.
@param flags Not used.
@param flags Not used.
*/
*/
virtual
float
predict
(
InputArray
samples
,
OutputArray
results
=
noArray
(),
int
flags
=
0
)
const
=
0
;
CV_WRAP
virtual
float
predict
(
InputArray
samples
,
OutputArray
results
=
noArray
(),
int
flags
=
0
)
const
=
0
;
/** @brief This function returns the trained paramters arranged across rows.
/** @brief This function returns the trained paramters arranged across rows.
For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of
For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of
the Logistic Regression as a matrix of type CV_32F.
the Logistic Regression as a matrix of type CV_32F.
*/
*/
virtual
Mat
get_learnt_thetas
()
const
=
0
;
CV_WRAP
virtual
Mat
get_learnt_thetas
()
const
=
0
;
/** @brief Creates empty model.
/** @brief Creates empty model.
Creates Logistic Regression model with parameters given.
Creates Logistic Regression model with parameters given.
*/
*/
static
Ptr
<
LogisticRegression
>
create
();
CV_WRAP
static
Ptr
<
LogisticRegression
>
create
();
};
};
/****************************************************************************************\
/****************************************************************************************\
...
...
modules/superres/include/opencv2/superres.hpp
View file @
daa99514
...
@@ -105,34 +105,64 @@ namespace cv
...
@@ -105,34 +105,64 @@ namespace cv
virtual
void
collectGarbage
();
virtual
void
collectGarbage
();
//! @brief Scale factor
//! @brief Scale factor
CV_PURE_PROPERTY
(
int
,
Scale
)
/** @see setScale */
virtual
int
getScale
()
const
=
0
;
/** @copybrief getScale @see getScale */
virtual
void
setScale
(
int
val
)
=
0
;
//! @brief Iterations count
//! @brief Iterations count
CV_PURE_PROPERTY
(
int
,
Iterations
)
/** @see setIterations */
virtual
int
getIterations
()
const
=
0
;
/** @copybrief getIterations @see getIterations */
virtual
void
setIterations
(
int
val
)
=
0
;
//! @brief Asymptotic value of steepest descent method
//! @brief Asymptotic value of steepest descent method
CV_PURE_PROPERTY
(
double
,
Tau
)
/** @see setTau */
virtual
double
getTau
()
const
=
0
;
/** @copybrief getTau @see getTau */
virtual
void
setTau
(
double
val
)
=
0
;
//! @brief Weight parameter to balance data term and smoothness term
//! @brief Weight parameter to balance data term and smoothness term
CV_PURE_PROPERTY
(
double
,
Labmda
)
/** @see setLabmda */
virtual
double
getLabmda
()
const
=
0
;
/** @copybrief getLabmda @see getLabmda */
virtual
void
setLabmda
(
double
val
)
=
0
;
//! @brief Parameter of spacial distribution in Bilateral-TV
//! @brief Parameter of spacial distribution in Bilateral-TV
CV_PURE_PROPERTY
(
double
,
Alpha
)
/** @see setAlpha */
virtual
double
getAlpha
()
const
=
0
;
/** @copybrief getAlpha @see getAlpha */
virtual
void
setAlpha
(
double
val
)
=
0
;
//! @brief Kernel size of Bilateral-TV filter
//! @brief Kernel size of Bilateral-TV filter
CV_PURE_PROPERTY
(
int
,
KernelSize
)
/** @see setKernelSize */
virtual
int
getKernelSize
()
const
=
0
;
/** @copybrief getKernelSize @see getKernelSize */
virtual
void
setKernelSize
(
int
val
)
=
0
;
//! @brief Gaussian blur kernel size
//! @brief Gaussian blur kernel size
CV_PURE_PROPERTY
(
int
,
BlurKernelSize
)
/** @see setBlurKernelSize */
virtual
int
getBlurKernelSize
()
const
=
0
;
/** @copybrief getBlurKernelSize @see getBlurKernelSize */
virtual
void
setBlurKernelSize
(
int
val
)
=
0
;
//! @brief Gaussian blur sigma
//! @brief Gaussian blur sigma
CV_PURE_PROPERTY
(
double
,
BlurSigma
)
/** @see setBlurSigma */
virtual
double
getBlurSigma
()
const
=
0
;
/** @copybrief getBlurSigma @see getBlurSigma */
virtual
void
setBlurSigma
(
double
val
)
=
0
;
//! @brief Radius of the temporal search area
//! @brief Radius of the temporal search area
CV_PURE_PROPERTY
(
int
,
TemporalAreaRadius
)
/** @see setTemporalAreaRadius */
virtual
int
getTemporalAreaRadius
()
const
=
0
;
/** @copybrief getTemporalAreaRadius @see getTemporalAreaRadius */
virtual
void
setTemporalAreaRadius
(
int
val
)
=
0
;
//! @brief Dense optical flow algorithm
//! @brief Dense optical flow algorithm
CV_PURE_PROPERTY_S
(
Ptr
<
cv
::
superres
::
DenseOpticalFlowExt
>
,
OpticalFlow
)
/** @see setOpticalFlow */
virtual
Ptr
<
cv
::
superres
::
DenseOpticalFlowExt
>
getOpticalFlow
()
const
=
0
;
/** @copybrief getOpticalFlow @see getOpticalFlow */
virtual
void
setOpticalFlow
(
const
Ptr
<
cv
::
superres
::
DenseOpticalFlowExt
>
&
val
)
=
0
;
protected
:
protected
:
SuperResolution
();
SuperResolution
();
...
...
modules/superres/include/opencv2/superres/optical_flow.hpp
View file @
daa99514
...
@@ -64,13 +64,34 @@ namespace cv
...
@@ -64,13 +64,34 @@ namespace cv
class
CV_EXPORTS
FarnebackOpticalFlow
:
public
virtual
DenseOpticalFlowExt
class
CV_EXPORTS
FarnebackOpticalFlow
:
public
virtual
DenseOpticalFlowExt
{
{
public
:
public
:
CV_PURE_PROPERTY
(
double
,
PyrScale
)
/** @see setPyrScale */
CV_PURE_PROPERTY
(
int
,
LevelsNumber
)
virtual
double
getPyrScale
()
const
=
0
;
CV_PURE_PROPERTY
(
int
,
WindowSize
)
/** @copybrief getPyrScale @see getPyrScale */
CV_PURE_PROPERTY
(
int
,
Iterations
)
virtual
void
setPyrScale
(
double
val
)
=
0
;
CV_PURE_PROPERTY
(
int
,
PolyN
)
/** @see setLevelsNumber */
CV_PURE_PROPERTY
(
double
,
PolySigma
)
virtual
int
getLevelsNumber
()
const
=
0
;
CV_PURE_PROPERTY
(
int
,
Flags
)
/** @copybrief getLevelsNumber @see getLevelsNumber */
virtual
void
setLevelsNumber
(
int
val
)
=
0
;
/** @see setWindowSize */
virtual
int
getWindowSize
()
const
=
0
;
/** @copybrief getWindowSize @see getWindowSize */
virtual
void
setWindowSize
(
int
val
)
=
0
;
/** @see setIterations */
virtual
int
getIterations
()
const
=
0
;
/** @copybrief getIterations @see getIterations */
virtual
void
setIterations
(
int
val
)
=
0
;
/** @see setPolyN */
virtual
int
getPolyN
()
const
=
0
;
/** @copybrief getPolyN @see getPolyN */
virtual
void
setPolyN
(
int
val
)
=
0
;
/** @see setPolySigma */
virtual
double
getPolySigma
()
const
=
0
;
/** @copybrief getPolySigma @see getPolySigma */
virtual
void
setPolySigma
(
double
val
)
=
0
;
/** @see setFlags */
virtual
int
getFlags
()
const
=
0
;
/** @copybrief getFlags @see getFlags */
virtual
void
setFlags
(
int
val
)
=
0
;
};
};
CV_EXPORTS
Ptr
<
FarnebackOpticalFlow
>
createOptFlow_Farneback
();
CV_EXPORTS
Ptr
<
FarnebackOpticalFlow
>
createOptFlow_Farneback
();
CV_EXPORTS
Ptr
<
FarnebackOpticalFlow
>
createOptFlow_Farneback_CUDA
();
CV_EXPORTS
Ptr
<
FarnebackOpticalFlow
>
createOptFlow_Farneback_CUDA
();
...
@@ -82,14 +103,38 @@ namespace cv
...
@@ -82,14 +103,38 @@ namespace cv
class
CV_EXPORTS
DualTVL1OpticalFlow
:
public
virtual
DenseOpticalFlowExt
class
CV_EXPORTS
DualTVL1OpticalFlow
:
public
virtual
DenseOpticalFlowExt
{
{
public
:
public
:
CV_PURE_PROPERTY
(
double
,
Tau
)
/** @see setTau */
CV_PURE_PROPERTY
(
double
,
Lambda
)
virtual
double
getTau
()
const
=
0
;
CV_PURE_PROPERTY
(
double
,
Theta
)
/** @copybrief getTau @see getTau */
CV_PURE_PROPERTY
(
int
,
ScalesNumber
)
virtual
void
setTau
(
double
val
)
=
0
;
CV_PURE_PROPERTY
(
int
,
WarpingsNumber
)
/** @see setLambda */
CV_PURE_PROPERTY
(
double
,
Epsilon
)
virtual
double
getLambda
()
const
=
0
;
CV_PURE_PROPERTY
(
int
,
Iterations
)
/** @copybrief getLambda @see getLambda */
CV_PURE_PROPERTY
(
bool
,
UseInitialFlow
)
virtual
void
setLambda
(
double
val
)
=
0
;
/** @see setTheta */
virtual
double
getTheta
()
const
=
0
;
/** @copybrief getTheta @see getTheta */
virtual
void
setTheta
(
double
val
)
=
0
;
/** @see setScalesNumber */
virtual
int
getScalesNumber
()
const
=
0
;
/** @copybrief getScalesNumber @see getScalesNumber */
virtual
void
setScalesNumber
(
int
val
)
=
0
;
/** @see setWarpingsNumber */
virtual
int
getWarpingsNumber
()
const
=
0
;
/** @copybrief getWarpingsNumber @see getWarpingsNumber */
virtual
void
setWarpingsNumber
(
int
val
)
=
0
;
/** @see setEpsilon */
virtual
double
getEpsilon
()
const
=
0
;
/** @copybrief getEpsilon @see getEpsilon */
virtual
void
setEpsilon
(
double
val
)
=
0
;
/** @see setIterations */
virtual
int
getIterations
()
const
=
0
;
/** @copybrief getIterations @see getIterations */
virtual
void
setIterations
(
int
val
)
=
0
;
/** @see setUseInitialFlow */
virtual
bool
getUseInitialFlow
()
const
=
0
;
/** @copybrief getUseInitialFlow @see getUseInitialFlow */
virtual
void
setUseInitialFlow
(
bool
val
)
=
0
;
};
};
CV_EXPORTS
Ptr
<
DualTVL1OpticalFlow
>
createOptFlow_DualTVL1
();
CV_EXPORTS
Ptr
<
DualTVL1OpticalFlow
>
createOptFlow_DualTVL1
();
CV_EXPORTS
Ptr
<
DualTVL1OpticalFlow
>
createOptFlow_DualTVL1_CUDA
();
CV_EXPORTS
Ptr
<
DualTVL1OpticalFlow
>
createOptFlow_DualTVL1_CUDA
();
...
@@ -99,17 +144,35 @@ namespace cv
...
@@ -99,17 +144,35 @@ namespace cv
{
{
public
:
public
:
//! @brief Flow smoothness
//! @brief Flow smoothness
CV_PURE_PROPERTY
(
double
,
Alpha
)
/** @see setAlpha */
virtual
double
getAlpha
()
const
=
0
;
/** @copybrief getAlpha @see getAlpha */
virtual
void
setAlpha
(
double
val
)
=
0
;
//! @brief Gradient constancy importance
//! @brief Gradient constancy importance
CV_PURE_PROPERTY
(
double
,
Gamma
)
/** @see setGamma */
virtual
double
getGamma
()
const
=
0
;
/** @copybrief getGamma @see getGamma */
virtual
void
setGamma
(
double
val
)
=
0
;
//! @brief Pyramid scale factor
//! @brief Pyramid scale factor
CV_PURE_PROPERTY
(
double
,
ScaleFactor
)
/** @see setScaleFactor */
virtual
double
getScaleFactor
()
const
=
0
;
/** @copybrief getScaleFactor @see getScaleFactor */
virtual
void
setScaleFactor
(
double
val
)
=
0
;
//! @brief Number of lagged non-linearity iterations (inner loop)
//! @brief Number of lagged non-linearity iterations (inner loop)
CV_PURE_PROPERTY
(
int
,
InnerIterations
)
/** @see setInnerIterations */
virtual
int
getInnerIterations
()
const
=
0
;
/** @copybrief getInnerIterations @see getInnerIterations */
virtual
void
setInnerIterations
(
int
val
)
=
0
;
//! @brief Number of warping iterations (number of pyramid levels)
//! @brief Number of warping iterations (number of pyramid levels)
CV_PURE_PROPERTY
(
int
,
OuterIterations
)
/** @see setOuterIterations */
virtual
int
getOuterIterations
()
const
=
0
;
/** @copybrief getOuterIterations @see getOuterIterations */
virtual
void
setOuterIterations
(
int
val
)
=
0
;
//! @brief Number of linear system solver iterations
//! @brief Number of linear system solver iterations
CV_PURE_PROPERTY
(
int
,
SolverIterations
)
/** @see setSolverIterations */
virtual
int
getSolverIterations
()
const
=
0
;
/** @copybrief getSolverIterations @see getSolverIterations */
virtual
void
setSolverIterations
(
int
val
)
=
0
;
};
};
CV_EXPORTS
Ptr
<
BroxOpticalFlow
>
createOptFlow_Brox_CUDA
();
CV_EXPORTS
Ptr
<
BroxOpticalFlow
>
createOptFlow_Brox_CUDA
();
...
@@ -117,9 +180,18 @@ namespace cv
...
@@ -117,9 +180,18 @@ namespace cv
class
PyrLKOpticalFlow
:
public
virtual
DenseOpticalFlowExt
class
PyrLKOpticalFlow
:
public
virtual
DenseOpticalFlowExt
{
{
public
:
public
:
CV_PURE_PROPERTY
(
int
,
WindowSize
)
/** @see setWindowSize */
CV_PURE_PROPERTY
(
int
,
MaxLevel
)
virtual
int
getWindowSize
()
const
=
0
;
CV_PURE_PROPERTY
(
int
,
Iterations
)
/** @copybrief getWindowSize @see getWindowSize */
virtual
void
setWindowSize
(
int
val
)
=
0
;
/** @see setMaxLevel */
virtual
int
getMaxLevel
()
const
=
0
;
/** @copybrief getMaxLevel @see getMaxLevel */
virtual
void
setMaxLevel
(
int
val
)
=
0
;
/** @see setIterations */
virtual
int
getIterations
()
const
=
0
;
/** @copybrief getIterations @see getIterations */
virtual
void
setIterations
(
int
val
)
=
0
;
};
};
CV_EXPORTS
Ptr
<
PyrLKOpticalFlow
>
createOptFlow_PyrLK_CUDA
();
CV_EXPORTS
Ptr
<
PyrLKOpticalFlow
>
createOptFlow_PyrLK_CUDA
();
...
...
modules/video/include/opencv2/video/tracking.hpp
View file @
daa99514
...
@@ -441,29 +441,65 @@ class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
...
@@ -441,29 +441,65 @@ class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
{
{
public
:
public
:
//! @brief Time step of the numerical scheme
//! @brief Time step of the numerical scheme
CV_PURE_PROPERTY
(
double
,
Tau
)
/** @see setTau */
virtual
double
getTau
()
const
=
0
;
/** @copybrief getTau @see getTau */
virtual
void
setTau
(
double
val
)
=
0
;
//! @brief Weight parameter for the data term, attachment parameter
//! @brief Weight parameter for the data term, attachment parameter
CV_PURE_PROPERTY
(
double
,
Lambda
)
/** @see setLambda */
virtual
double
getLambda
()
const
=
0
;
/** @copybrief getLambda @see getLambda */
virtual
void
setLambda
(
double
val
)
=
0
;
//! @brief Weight parameter for (u - v)^2, tightness parameter
//! @brief Weight parameter for (u - v)^2, tightness parameter
CV_PURE_PROPERTY
(
double
,
Theta
)
/** @see setTheta */
virtual
double
getTheta
()
const
=
0
;
/** @copybrief getTheta @see getTheta */
virtual
void
setTheta
(
double
val
)
=
0
;
//! @brief coefficient for additional illumination variation term
//! @brief coefficient for additional illumination variation term
CV_PURE_PROPERTY
(
double
,
Gamma
)
/** @see setGamma */
virtual
double
getGamma
()
const
=
0
;
/** @copybrief getGamma @see getGamma */
virtual
void
setGamma
(
double
val
)
=
0
;
//! @brief Number of scales used to create the pyramid of images
//! @brief Number of scales used to create the pyramid of images
CV_PURE_PROPERTY
(
int
,
ScalesNumber
)
/** @see setScalesNumber */
virtual
int
getScalesNumber
()
const
=
0
;
/** @copybrief getScalesNumber @see getScalesNumber */
virtual
void
setScalesNumber
(
int
val
)
=
0
;
//! @brief Number of warpings per scale
//! @brief Number of warpings per scale
CV_PURE_PROPERTY
(
int
,
WarpingsNumber
)
/** @see setWarpingsNumber */
virtual
int
getWarpingsNumber
()
const
=
0
;
/** @copybrief getWarpingsNumber @see getWarpingsNumber */
virtual
void
setWarpingsNumber
(
int
val
)
=
0
;
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
CV_PURE_PROPERTY
(
double
,
Epsilon
)
/** @see setEpsilon */
virtual
double
getEpsilon
()
const
=
0
;
/** @copybrief getEpsilon @see getEpsilon */
virtual
void
setEpsilon
(
double
val
)
=
0
;
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
CV_PURE_PROPERTY
(
int
,
InnerIterations
)
/** @see setInnerIterations */
virtual
int
getInnerIterations
()
const
=
0
;
/** @copybrief getInnerIterations @see getInnerIterations */
virtual
void
setInnerIterations
(
int
val
)
=
0
;
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
CV_PURE_PROPERTY
(
int
,
OuterIterations
)
/** @see setOuterIterations */
virtual
int
getOuterIterations
()
const
=
0
;
/** @copybrief getOuterIterations @see getOuterIterations */
virtual
void
setOuterIterations
(
int
val
)
=
0
;
//! @brief Use initial flow
//! @brief Use initial flow
CV_PURE_PROPERTY
(
bool
,
UseInitialFlow
)
/** @see setUseInitialFlow */
virtual
bool
getUseInitialFlow
()
const
=
0
;
/** @copybrief getUseInitialFlow @see getUseInitialFlow */
virtual
void
setUseInitialFlow
(
bool
val
)
=
0
;
//! @brief Step between scales (<1)
//! @brief Step between scales (<1)
CV_PURE_PROPERTY
(
double
,
ScaleStep
)
/** @see setScaleStep */
virtual
double
getScaleStep
()
const
=
0
;
/** @copybrief getScaleStep @see getScaleStep */
virtual
void
setScaleStep
(
double
val
)
=
0
;
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
CV_PURE_PROPERTY
(
int
,
MedianFiltering
)
/** @see setMedianFiltering */
virtual
int
getMedianFiltering
()
const
=
0
;
/** @copybrief getMedianFiltering @see getMedianFiltering */
virtual
void
setMedianFiltering
(
int
val
)
=
0
;
};
};
/** @brief Creates instance of cv::DenseOpticalFlow
/** @brief Creates instance of cv::DenseOpticalFlow
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment