Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
7a3c0cb7
Commit
7a3c0cb7
authored
May 06, 2015
by
Vadim Pisarevsky
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #3991 from mshabunin:python-underscore
parents
17565901
c137ba53
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
34 additions
and
25 deletions
+34
-25
ml.hpp
modules/ml/include/opencv2/ml.hpp
+24
-24
gen2.py
modules/python/src2/gen2.py
+1
-1
hdr_parser.py
modules/python/src2/hdr_parser.py
+9
-0
No files found.
modules/ml/include/opencv2/ml.hpp
View file @
7a3c0cb7
...
...
@@ -1241,7 +1241,7 @@ Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.
@sa @ref ml_intro_ann
*/
class
CV_EXPORTS
ANN_MLP
:
public
StatModel
class
CV_EXPORTS
_W
ANN_MLP
:
public
StatModel
{
public
:
/** Available training methods */
...
...
@@ -1255,10 +1255,10 @@ public:
@param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP
@param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP.
*/
virtual
void
setTrainMethod
(
int
method
,
double
param1
=
0
,
double
param2
=
0
)
=
0
;
CV_WRAP
virtual
void
setTrainMethod
(
int
method
,
double
param1
=
0
,
double
param2
=
0
)
=
0
;
/** Returns current training method */
virtual
int
getTrainMethod
()
const
=
0
;
CV_WRAP
virtual
int
getTrainMethod
()
const
=
0
;
/** Initialize the activation function for each neuron.
Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.
...
...
@@ -1266,79 +1266,79 @@ public:
@param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0.
@param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0.
*/
virtual
void
setActivationFunction
(
int
type
,
double
param1
=
0
,
double
param2
=
0
)
=
0
;
CV_WRAP
virtual
void
setActivationFunction
(
int
type
,
double
param1
=
0
,
double
param2
=
0
)
=
0
;
/** Integer vector specifying the number of neurons in each layer including the input and output layers.
The very first element specifies the number of elements in the input layer.
The last element - number of elements in the output layer. Default value is empty Mat.
@sa getLayerSizes */
virtual
void
setLayerSizes
(
InputArray
_layer_sizes
)
=
0
;
CV_WRAP
virtual
void
setLayerSizes
(
InputArray
_layer_sizes
)
=
0
;
/** Integer vector specifying the number of neurons in each layer including the input and output layers.
The very first element specifies the number of elements in the input layer.
The last element - number of elements in the output layer.
@sa setLayerSizes */
virtual
cv
::
Mat
getLayerSizes
()
const
=
0
;
CV_WRAP
virtual
cv
::
Mat
getLayerSizes
()
const
=
0
;
/** Termination criteria of the training algorithm.
You can specify the maximum number of iterations (maxCount) and/or how much the error could
change between the iterations to make the algorithm continue (epsilon). Default value is
TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/
/** @see setTermCriteria */
virtual
TermCriteria
getTermCriteria
()
const
=
0
;
CV_WRAP
virtual
TermCriteria
getTermCriteria
()
const
=
0
;
/** @copybrief getTermCriteria @see getTermCriteria */
virtual
void
setTermCriteria
(
TermCriteria
val
)
=
0
;
CV_WRAP
virtual
void
setTermCriteria
(
TermCriteria
val
)
=
0
;
/** BPROP: Strength of the weight gradient term.
The recommended value is about 0.1. Default value is 0.1.*/
/** @see setBackpropWeightScale */
virtual
double
getBackpropWeightScale
()
const
=
0
;
CV_WRAP
virtual
double
getBackpropWeightScale
()
const
=
0
;
/** @copybrief getBackpropWeightScale @see getBackpropWeightScale */
virtual
void
setBackpropWeightScale
(
double
val
)
=
0
;
CV_WRAP
virtual
void
setBackpropWeightScale
(
double
val
)
=
0
;
/** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).
This parameter provides some inertia to smooth the random fluctuations of the weights. It can
vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
Default value is 0.1.*/
/** @see setBackpropMomentumScale */
virtual
double
getBackpropMomentumScale
()
const
=
0
;
CV_WRAP
virtual
double
getBackpropMomentumScale
()
const
=
0
;
/** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */
virtual
void
setBackpropMomentumScale
(
double
val
)
=
0
;
CV_WRAP
virtual
void
setBackpropMomentumScale
(
double
val
)
=
0
;
/** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.
Default value is 0.1.*/
/** @see setRpropDW0 */
virtual
double
getRpropDW0
()
const
=
0
;
CV_WRAP
virtual
double
getRpropDW0
()
const
=
0
;
/** @copybrief getRpropDW0 @see getRpropDW0 */
virtual
void
setRpropDW0
(
double
val
)
=
0
;
CV_WRAP
virtual
void
setRpropDW0
(
double
val
)
=
0
;
/** RPROP: Increase factor \f$\eta^+\f$.
It must be \>1. Default value is 1.2.*/
/** @see setRpropDWPlus */
virtual
double
getRpropDWPlus
()
const
=
0
;
CV_WRAP
virtual
double
getRpropDWPlus
()
const
=
0
;
/** @copybrief getRpropDWPlus @see getRpropDWPlus */
virtual
void
setRpropDWPlus
(
double
val
)
=
0
;
CV_WRAP
virtual
void
setRpropDWPlus
(
double
val
)
=
0
;
/** RPROP: Decrease factor \f$\eta^-\f$.
It must be \<1. Default value is 0.5.*/
/** @see setRpropDWMinus */
virtual
double
getRpropDWMinus
()
const
=
0
;
CV_WRAP
virtual
double
getRpropDWMinus
()
const
=
0
;
/** @copybrief getRpropDWMinus @see getRpropDWMinus */
virtual
void
setRpropDWMinus
(
double
val
)
=
0
;
CV_WRAP
virtual
void
setRpropDWMinus
(
double
val
)
=
0
;
/** RPROP: Update-values lower limit \f$\Delta_{min}\f$.
It must be positive. Default value is FLT_EPSILON.*/
/** @see setRpropDWMin */
virtual
double
getRpropDWMin
()
const
=
0
;
CV_WRAP
virtual
double
getRpropDWMin
()
const
=
0
;
/** @copybrief getRpropDWMin @see getRpropDWMin */
virtual
void
setRpropDWMin
(
double
val
)
=
0
;
CV_WRAP
virtual
void
setRpropDWMin
(
double
val
)
=
0
;
/** RPROP: Update-values upper limit \f$\Delta_{max}\f$.
It must be \>1. Default value is 50.*/
/** @see setRpropDWMax */
virtual
double
getRpropDWMax
()
const
=
0
;
CV_WRAP
virtual
double
getRpropDWMax
()
const
=
0
;
/** @copybrief getRpropDWMax @see getRpropDWMax */
virtual
void
setRpropDWMax
(
double
val
)
=
0
;
CV_WRAP
virtual
void
setRpropDWMax
(
double
val
)
=
0
;
/** possible activation functions */
enum
ActivationFunctions
{
...
...
@@ -1371,14 +1371,14 @@ public:
NO_OUTPUT_SCALE
=
4
};
virtual
Mat
getWeights
(
int
layerIdx
)
const
=
0
;
CV_WRAP
virtual
Mat
getWeights
(
int
layerIdx
)
const
=
0
;
/** @brief Creates empty model
Use StatModel::train to train the model, Algorithm::load\<ANN_MLP\>(filename) to load the pre-trained model.
Note that the train method has optional flags: ANN_MLP::TrainFlags.
*/
static
Ptr
<
ANN_MLP
>
create
();
CV_WRAP
static
Ptr
<
ANN_MLP
>
create
();
};
/****************************************************************************************\
...
...
modules/python/src2/gen2.py
View file @
7a3c0cb7
...
...
@@ -393,7 +393,7 @@ class FuncVariant(object):
self
.
name
=
self
.
wname
=
name
self
.
isconstructor
=
isconstructor
self
.
rettype
=
handle_ptr
(
decl
[
1
])
self
.
rettype
=
decl
[
4
]
if
len
(
decl
)
>=
5
else
handle_ptr
(
decl
[
1
])
if
self
.
rettype
==
"void"
:
self
.
rettype
=
""
self
.
args
=
[]
...
...
modules/python/src2/hdr_parser.py
View file @
7a3c0cb7
...
...
@@ -445,6 +445,12 @@ class CppHeaderParser(object):
rettype
,
funcname
,
modlist
,
argno
=
self
.
parse_arg
(
decl_start
,
-
1
)
# determine original return type, hack for return types with underscore
original_type
=
None
i
=
decl_start
.
rfind
(
funcname
)
if
i
>
0
:
original_type
=
decl_start
[:
i
]
.
replace
(
"&"
,
""
)
.
replace
(
"const"
,
""
)
.
strip
()
if
argno
>=
0
:
classname
=
top
[
1
]
if
rettype
==
classname
or
rettype
==
"~"
+
classname
:
...
...
@@ -560,7 +566,10 @@ class CppHeaderParser(object):
if
static_method
:
func_modlist
.
append
(
"/S"
)
if
original_type
is
None
:
return
[
funcname
,
rettype
,
func_modlist
,
args
]
else
:
return
[
funcname
,
rettype
,
func_modlist
,
args
,
original_type
]
def
get_dotted_name
(
self
,
name
):
"""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment