Commit 81a59880 authored by Andrey Kamaev's avatar Andrey Kamaev

Merged the trunk r8467:8507 (inclusive) (big bunch of documentation fixes)

parent 052d2dc2
This diff is collapsed.
...@@ -294,7 +294,8 @@ class OCVPyXRefRole(XRefRole): ...@@ -294,7 +294,8 @@ class OCVPyXRefRole(XRefRole):
########################### C/C++/Java Part ########################### ########################### C/C++/Java Part ###########################
_identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*)\b') _identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*\b)')
_argument_name_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*\b(?:\[\d*\])?|\.\.\.)')
_whitespace_re = re.compile(r'\s+(?u)') _whitespace_re = re.compile(r'\s+(?u)')
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'" _string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S) r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
...@@ -303,10 +304,10 @@ _operator_re = re.compile(r'''(?x) ...@@ -303,10 +304,10 @@ _operator_re = re.compile(r'''(?x)
\[\s*\] \[\s*\]
| \(\s*\) | \(\s*\)
| (<<|>>)=? | (<<|>>)=?
| \+\+ | -- | ->\*?
| [!<>=/*%+|&^-]=? | [!<>=/*%+|&^-]=?
| \+\+ | --
| ~ | && | \| | \|\| | ~ | && | \| | \|\|
| ->\*? | \, | \,
''') ''')
_id_shortwords = { _id_shortwords = {
...@@ -667,13 +668,14 @@ class MemberObjDefExpr(NamedDefExpr): ...@@ -667,13 +668,14 @@ class MemberObjDefExpr(NamedDefExpr):
class FuncDefExpr(NamedDefExpr): class FuncDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static, explicit, rv, def __init__(self, name, visibility, static, explicit, rv,
signature, const, pure_virtual): signature, const, pure_virtual, virtual):
NamedDefExpr.__init__(self, name, visibility, static) NamedDefExpr.__init__(self, name, visibility, static)
self.rv = rv self.rv = rv
self.signature = signature self.signature = signature
self.explicit = explicit self.explicit = explicit
self.const = const self.const = const
self.pure_virtual = pure_virtual self.pure_virtual = pure_virtual
self.virtual = virtual
def get_id(self): def get_id(self):
return u'%s%s%s' % ( return u'%s%s%s' % (
...@@ -687,6 +689,8 @@ class FuncDefExpr(NamedDefExpr): ...@@ -687,6 +689,8 @@ class FuncDefExpr(NamedDefExpr):
buf = self.get_modifiers() buf = self.get_modifiers()
if self.explicit: if self.explicit:
buf.append(u'explicit') buf.append(u'explicit')
if self.virtual:
buf.append(u'virtual')
if self.rv is not None: if self.rv is not None:
buf.append(unicode(self.rv)) buf.append(unicode(self.rv))
buf.append(u'%s(%s)' % (self.name, u', '.join( buf.append(u'%s(%s)' % (self.name, u', '.join(
...@@ -700,8 +704,9 @@ class FuncDefExpr(NamedDefExpr): ...@@ -700,8 +704,9 @@ class FuncDefExpr(NamedDefExpr):
class ClassDefExpr(NamedDefExpr): class ClassDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static): def __init__(self, name, visibility, static, parents = None):
NamedDefExpr.__init__(self, name, visibility, static) NamedDefExpr.__init__(self, name, visibility, static)
self.parents = parents
def get_id(self): def get_id(self):
return self.name.get_id() return self.name.get_id()
...@@ -788,7 +793,6 @@ class DefinitionParser(object): ...@@ -788,7 +793,6 @@ class DefinitionParser(object):
if self.match(_operator_re): if self.match(_operator_re):
return NameDefExpr('operator' + return NameDefExpr('operator' +
_whitespace_re.sub('', self.matched_text)) _whitespace_re.sub('', self.matched_text))
# new/delete operator? # new/delete operator?
for allocop in 'new', 'delete': for allocop in 'new', 'delete':
if not self.skip_word(allocop): if not self.skip_word(allocop):
...@@ -807,7 +811,7 @@ class DefinitionParser(object): ...@@ -807,7 +811,7 @@ class DefinitionParser(object):
return CastOpDefExpr(type) return CastOpDefExpr(type)
def _parse_name(self): def _parse_name(self):
if not self.match(_identifier_re): if not self.match(_argument_name_re):
self.fail('expected name') self.fail('expected name')
identifier = self.matched_text identifier = self.matched_text
...@@ -1004,8 +1008,13 @@ class DefinitionParser(object): ...@@ -1004,8 +1008,13 @@ class DefinitionParser(object):
self.skip_ws() self.skip_ws()
argtype = self._parse_type() argtype = self._parse_type()
argname = default = None
self.skip_ws() self.skip_ws()
if unicode(argtype) == u"...":
if not self.skip_string(')'):
self.fail("var arg must be the last argument")
args.append(ArgumentDefExpr(None, argtype, None))
break
argname = default = None
if self.skip_string('='): if self.skip_string('='):
self.pos += 1 self.pos += 1
default = self._parse_default_expr() default = self._parse_default_expr()
...@@ -1072,6 +1081,11 @@ class DefinitionParser(object): ...@@ -1072,6 +1081,11 @@ class DefinitionParser(object):
self.skip_ws() self.skip_ws()
else: else:
explicit = False explicit = False
if self.skip_word('virtual'):
virtual = True
self.skip_ws()
else:
virtual = False
rv = self._parse_type() rv = self._parse_type()
self.skip_ws() self.skip_ws()
# some things just don't have return values # some things just don't have return values
...@@ -1081,11 +1095,26 @@ class DefinitionParser(object): ...@@ -1081,11 +1095,26 @@ class DefinitionParser(object):
else: else:
name = self._parse_type() name = self._parse_type()
return FuncDefExpr(name, visibility, static, explicit, rv, return FuncDefExpr(name, visibility, static, explicit, rv,
*self._parse_signature()) *self._parse_signature(), virtual = virtual)
def parse_class(self): def parse_class(self):
visibility, static = self._parse_visibility_static() visibility, static = self._parse_visibility_static()
return ClassDefExpr(self._parse_type(), visibility, static) typename = self._parse_type()
parent = None
self.skip_ws()
parents = []
if self.skip_string(':'):
while not self.eof:
self.skip_ws()
classname_pos = self.pos
pvisibility, pstatic = self._parse_visibility_static()
if pstatic:
self.fail('unsepected static keyword, got %r' %
self.definition[self.classname_pos:])
parents.append(ClassDefExpr(self._parse_type(), pvisibility, pstatic))
if not self.skip_string(','):
break
return ClassDefExpr(typename, visibility, static, parents)
def read_rest(self): def read_rest(self):
rv = self.definition[self.pos:] rv = self.definition[self.pos:]
...@@ -1212,8 +1241,8 @@ class OCVClassObject(OCVObject): ...@@ -1212,8 +1241,8 @@ class OCVClassObject(OCVObject):
object_annotation = "class " object_annotation = "class "
object_long_name = "class" object_long_name = "class"
def attach_modifiers(self, node, obj): def attach_modifiers(self, node, obj, skip_visibility = 'public'):
if obj.visibility != 'public': if obj.visibility != skip_visibility:
node += addnodes.desc_annotation(obj.visibility, node += addnodes.desc_annotation(obj.visibility,
obj.visibility) obj.visibility)
node += nodes.Text(' ') node += nodes.Text(' ')
...@@ -1231,6 +1260,15 @@ class OCVClassObject(OCVObject): ...@@ -1231,6 +1260,15 @@ class OCVClassObject(OCVObject):
self.attach_modifiers(signode, cls) self.attach_modifiers(signode, cls)
signode += addnodes.desc_annotation(self.__class__.object_annotation, self.__class__.object_annotation) signode += addnodes.desc_annotation(self.__class__.object_annotation, self.__class__.object_annotation)
self.attach_name(signode, cls.name) self.attach_name(signode, cls.name)
first_parent = True
for p in cls.parents:
if first_parent:
signode += nodes.Text(' : ')
first_parent = False
else:
signode += nodes.Text(', ')
self.attach_modifiers(signode, p, None)
self.attach_name(signode, p.name)
class OCVStructObject(OCVClassObject): class OCVStructObject(OCVClassObject):
object_annotation = "struct " object_annotation = "struct "
...@@ -1263,6 +1301,9 @@ class OCVMemberObject(OCVObject): ...@@ -1263,6 +1301,9 @@ class OCVMemberObject(OCVObject):
return '' return ''
def parse_definition(self, parser): def parse_definition(self, parser):
parent_class = self.env.temp_data.get('ocv:parent')
if parent_class is None:
parser.fail("missing parent structure/class")
return parser.parse_member_object() return parser.parse_member_object()
def describe_signature(self, signode, obj): def describe_signature(self, signode, obj):
...@@ -1298,7 +1339,12 @@ class OCVFunctionObject(OCVObject): ...@@ -1298,7 +1339,12 @@ class OCVFunctionObject(OCVObject):
self.attach_type(param, arg.type) self.attach_type(param, arg.type)
param += nodes.Text(u' ') param += nodes.Text(u' ')
#param += nodes.emphasis(unicode(arg.name), unicode(arg.name)) #param += nodes.emphasis(unicode(arg.name), unicode(arg.name))
sbrIdx = unicode(arg.name).find("[")
if sbrIdx < 0:
param += nodes.strong(unicode(arg.name), unicode(arg.name)) param += nodes.strong(unicode(arg.name), unicode(arg.name))
else:
param += nodes.strong(unicode(arg.name)[:sbrIdx], unicode(arg.name)[:sbrIdx])
param += nodes.Text(unicode(arg.name)[sbrIdx:])
if arg.default is not None: if arg.default is not None:
def_ = u'=' + unicode(arg.default) def_ = u'=' + unicode(arg.default)
#param += nodes.emphasis(def_, def_) #param += nodes.emphasis(def_, def_)
...@@ -1325,6 +1371,9 @@ class OCVFunctionObject(OCVObject): ...@@ -1325,6 +1371,9 @@ class OCVFunctionObject(OCVObject):
if func.explicit: if func.explicit:
signode += addnodes.desc_annotation('explicit', 'explicit') signode += addnodes.desc_annotation('explicit', 'explicit')
signode += nodes.Text(' ') signode += nodes.Text(' ')
if func.virtual:
signode += addnodes.desc_annotation('virtual', 'virtual')
signode += nodes.Text(' ')
# return value is None for things with a reverse return value # return value is None for things with a reverse return value
# such as casting operator definitions or constructors # such as casting operator definitions or constructors
# and destructors. # and destructors.
......
...@@ -199,7 +199,7 @@ protected: ...@@ -199,7 +199,7 @@ protected:
int is_supported(const char* supp_modes_key, const char* mode) int is_supported(const char* supp_modes_key, const char* mode)
{ {
const char* supported_modes = params.get(supp_modes_key); const char* supported_modes = params.get(supp_modes_key);
return strstr(supported_modes, mode) > 0; return (supported_modes && mode && (strstr(supported_modes, mode) > 0));
} }
float getFocusDistance(int focus_distance_type) float getFocusDistance(int focus_distance_type)
......
...@@ -215,8 +215,7 @@ CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size); ...@@ -215,8 +215,7 @@ CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size, CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners, CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL), int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+ int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */ /* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
...@@ -474,7 +473,7 @@ enum ...@@ -474,7 +473,7 @@ enum
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints, CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs, InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec, OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess=false, int flags=0); bool useExtrinsicGuess=false, int flags=ITERATIVE);
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible. //! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
...@@ -488,7 +487,7 @@ CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, ...@@ -488,7 +487,7 @@ CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
float reprojectionError = 8.0, float reprojectionError = 8.0,
int minInliersCount = 100, int minInliersCount = 100,
OutputArray inliers = noArray(), OutputArray inliers = noArray(),
int flags = 0); int flags = ITERATIVE);
//! initializes camera matrix from a few 3D points and the corresponding projections. //! initializes camera matrix from a few 3D points and the corresponding projections.
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
...@@ -501,8 +500,7 @@ enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2, ...@@ -501,8 +500,7 @@ enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
//! finds checkerboard pattern of the specified size in the image //! finds checkerboard pattern of the specified size in the image
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
OutputArray corners, OutputArray corners,
int flags=CALIB_CB_ADAPTIVE_THRESH+ int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners //! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size); CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
...@@ -574,8 +572,7 @@ CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints, ...@@ -574,8 +572,7 @@ CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
CV_OUT InputOutputArray distCoeffs2, CV_OUT InputOutputArray distCoeffs2,
Size imageSize, OutputArray R, Size imageSize, OutputArray R,
OutputArray T, OutputArray E, OutputArray F, OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
TermCriteria::EPS, 30, 1e-6),
int flags=CALIB_FIX_INTRINSIC ); int flags=CALIB_FIX_INTRINSIC );
...@@ -642,7 +639,7 @@ CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2, ...@@ -642,7 +639,7 @@ CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
double param1=3., double param2=0.99); double param1=3., double param2=0.99);
//! finds coordinates of epipolar lines corresponding the specified points //! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS void computeCorrespondEpilines( InputArray points1, CV_EXPORTS void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F, int whichImage, InputArray F,
OutputArray lines ); OutputArray lines );
...@@ -743,9 +740,9 @@ CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity, ...@@ -743,9 +740,9 @@ CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
bool handleMissingValues=false, bool handleMissingValues=false,
int ddepth=-1 ); int ddepth=-1 );
CV_EXPORTS_W int estimateAffine3D(InputArray _from, InputArray _to, CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray _out, OutputArray _inliers, OutputArray out, OutputArray inliers,
double param1=3, double param2=0.99); double ransacThreshold=3, double confidence=0.99);
} }
......
...@@ -3,3 +3,8 @@ contrib. Contributed/Experimental Stuff ...@@ -3,3 +3,8 @@ contrib. Contributed/Experimental Stuff
*************************************** ***************************************
The module contains some recently added functionality that has not been stabilized, or functionality that is considered optional. The module contains some recently added functionality that has not been stabilized, or functionality that is considered optional.
.. toctree::
:maxdepth: 2
stereo
\ No newline at end of file
Stereo Correspondence
========================================
.. highlight:: cpp
StereoVar
----------
.. ocv:class:: StereoVar
Class for computing stereo correspondence using the variational matching algorithm ::
class StereoVar
{
StereoVar();
StereoVar( int levels, double pyrScale,
int nIt, int minDisp, int maxDisp,
int poly_n, double poly_sigma, float fi,
float lambda, int penalization, int cycle,
int flags);
virtual ~StereoVar();
virtual void operator()(InputArray left, InputArray right, OutputArray disp);
int levels;
double pyrScale;
int nIt;
int minDisp;
int maxDisp;
int poly_n;
double poly_sigma;
float fi;
float lambda;
int penalization;
int cycle;
int flags;
...
};
The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows:
* The automatic initialization of method's parameters is added.
* The method of Smart Iteration Distribution (SID) is implemented.
* The support of Multi-Level Adaptation Technique (MLAT) is not included.
* The method of dynamic adaptation of method's parameters is not included.
StereoVar::StereoVar
--------------------------
.. ocv:function:: StereoVar::StereoVar()
.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags )
The constructor
:param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set.
:param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.)
:param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places.
:param maxDisp: Maximum possible disparity value.
:param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7
:param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5
:param fi: The smoothness parameter, ot the weight coefficient for the smoothness term.
:param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.)
:param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set).
:param flags: The operation flags; can be a combination of the following:
* USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation.
* USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase.
* USE_SMART_ID: Use the smart iteration distribution (SID).
* USE_AUTO_PARAMS: Allow the method to initialize the main parameters.
* USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase.
The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value.
StereoVar::operator ()
-----------------------
.. ocv:function:: void StereoVar::operator()( const Mat& left, const Mat& right, Mat& disp )
Computes disparity using the variational algorithm for a rectified stereo pair.
:param left: Left 8-bit single-channel or 3-channel image.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image.
The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method.
**Note**:
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.
This diff is collapsed.
...@@ -7,17 +7,17 @@ kmeans ...@@ -7,17 +7,17 @@ kmeans
------ ------
Finds centers of clusters and groups input samples around the clusters. Finds centers of clusters and groups input samples around the clusters.
.. ocv:function:: double kmeans( InputArray samples, int clusterCount, InputOutputArray labels, TermCriteria criteria, int attempts, int flags, OutputArray centers=noArray() ) .. ocv:function:: double kmeans( InputArray data, int K, InputOutputArray bestLabels, TermCriteria criteria, int attempts, int flags, OutputArray centers=noArray() )
.. ocv:pyfunction:: cv2.kmeans(data, K, criteria, attempts, flags[, bestLabels[, centers]]) -> retval, bestLabels, centers .. ocv:pyfunction:: cv2.kmeans(data, K, criteria, attempts, flags[, bestLabels[, centers]]) -> retval, bestLabels, centers
.. ocv:cfunction:: int cvKMeans2(const CvArr* samples, int clusterCount, CvArr* labels, CvTermCriteria criteria, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* centers=0, double* compactness=0) .. ocv:cfunction:: int cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, CvTermCriteria termcrit, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* _centers=0, double* compactness=0 )
.. ocv:pyoldfunction:: cv.KMeans2(samples, clusterCount, labels, criteria)-> None .. ocv:pyoldfunction:: cv.KMeans2(samples, nclusters, labels, termcrit, attempts=1, flags=0, centers=None) -> float
:param samples: Floating-point matrix of input samples, one row per sample. :param samples: Floating-point matrix of input samples, one row per sample.
:param clusterCount: Number of clusters to split the set by. :param cluster_count: Number of clusters to split the set by.
:param labels: Input/output integer array that stores the cluster indices for every sample. :param labels: Input/output integer array that stores the cluster indices for every sample.
...@@ -40,7 +40,7 @@ Finds centers of clusters and groups input samples around the clusters. ...@@ -40,7 +40,7 @@ Finds centers of clusters and groups input samples around the clusters.
:param compactness: The returned value that is described below. :param compactness: The returned value that is described below.
The function ``kmeans`` implements a k-means algorithm that finds the The function ``kmeans`` implements a k-means algorithm that finds the
centers of ``clusterCount`` clusters and groups the input samples centers of ``cluster_count`` clusters and groups the input samples
around the clusters. As an output, around the clusters. As an output,
:math:`\texttt{labels}_i` contains a 0-based cluster index for :math:`\texttt{labels}_i` contains a 0-based cluster index for
the sample stored in the the sample stored in the
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -93,9 +93,9 @@ Computes the cube root of an argument. ...@@ -93,9 +93,9 @@ Computes the cube root of an argument.
.. ocv:pyfunction:: cv2.cubeRoot(val) -> retval .. ocv:pyfunction:: cv2.cubeRoot(val) -> retval
.. ocv:cfunction:: float cvCbrt(float val) .. ocv:cfunction:: float cvCbrt( float value )
.. ocv:pyoldfunction:: cv.Cbrt(val)-> float .. ocv:pyoldfunction:: cv.Cbrt(value)-> float
:param val: A function argument. :param val: A function argument.
...@@ -182,7 +182,7 @@ Signals an error and raises an exception. ...@@ -182,7 +182,7 @@ Signals an error and raises an exception.
.. ocv:function:: void error( const Exception& exc ) .. ocv:function:: void error( const Exception& exc )
.. ocv:cfunction:: int cvError( int status, const char* funcName, const char* err_msg, const char* filename, int line ) .. ocv:cfunction:: void cvError( int status, const char* func_name, const char* err_msg, const char* file_name, int line )
:param exc: Exception to throw. :param exc: Exception to throw.
...@@ -209,7 +209,7 @@ The macro ``CV_Error_`` can be used to construct an error message on-fly to incl ...@@ -209,7 +209,7 @@ The macro ``CV_Error_`` can be used to construct an error message on-fly to incl
Exception Exception
--------- ---------
.. ocv:class:: Exception .. ocv:class:: Exception : public std::exception
Exception class passed to an error. :: Exception class passed to an error. ::
...@@ -244,7 +244,8 @@ fastMalloc ...@@ -244,7 +244,8 @@ fastMalloc
-------------- --------------
Allocates an aligned memory buffer. Allocates an aligned memory buffer.
.. ocv:function:: void* fastMalloc(size_t size) .. ocv:function:: void* fastMalloc( size_t bufSize )
.. ocv:cfunction:: void* cvAlloc( size_t size ) .. ocv:cfunction:: void* cvAlloc( size_t size )
:param size: Allocated buffer size. :param size: Allocated buffer size.
...@@ -276,7 +277,7 @@ Returns a text string formatted using the ``printf``\ -like expression. ...@@ -276,7 +277,7 @@ Returns a text string formatted using the ``printf``\ -like expression.
:param fmt: ``printf`` -compatible formatting specifiers. :param fmt: ``printf`` -compatible formatting specifiers.
The function acts like ``sprintf`` but forms and returns an STL string. It can be used to form an error message in the The function acts like ``sprintf`` but forms and returns an STL string. It can be used to form an error message in the
:ocv:func:`Exception` constructor. :ocv:class:`Exception` constructor.
...@@ -286,7 +287,7 @@ Returns true if the specified feature is supported by the host hardware. ...@@ -286,7 +287,7 @@ Returns true if the specified feature is supported by the host hardware.
.. ocv:function:: bool checkHardwareSupport(int feature) .. ocv:function:: bool checkHardwareSupport(int feature)
.. ocv:cfunction:: int cvCheckHardwareSupport(int feature) .. ocv:cfunction:: int cvCheckHardwareSupport(int feature)
.. ocv:pyfunction:: checkHardwareSupport(feature) -> Bool .. ocv:pyfunction:: cv2.checkHardwareSupport(feature) -> retval
:param feature: The feature of interest, one of: :param feature: The feature of interest, one of:
...@@ -419,13 +420,13 @@ setUseOptimized ...@@ -419,13 +420,13 @@ setUseOptimized
----------------- -----------------
Enables or disables the optimized code. Enables or disables the optimized code.
.. ocv:function:: void setUseOptimized(bool onoff) .. ocv:function:: int cvUseOptimized( int on_off )
.. ocv:pyfunction:: cv2.setUseOptimized(onoff) -> None .. ocv:pyfunction:: cv2.setUseOptimized(onoff) -> None
.. ocv:cfunction:: int cvUseOptimized( int onoff ) .. ocv:cfunction:: int cvUseOptimized( int on_off )
:param onoff: The boolean flag specifying whether the optimized code should be used (``onoff=true``) or not (``onoff=false``). :param on_off: The boolean flag specifying whether the optimized code should be used (``on_off=true``) or not (``on_off=false``).
The function can be used to dynamically turn on and off optimized code (code that uses SSE2, AVX, and other instructions on the platforms that support it). It sets a global flag that is further checked by OpenCV functions. Since the flag is not checked in the inner OpenCV loops, it is only safe to call the function on the very top level in your application where you can be sure that no other OpenCV function is currently executed. The function can be used to dynamically turn on and off optimized code (code that uses SSE2, AVX, and other instructions on the platforms that support it). It sets a global flag that is further checked by OpenCV functions. Since the flag is not checked in the inner OpenCV loops, it is only safe to call the function on the very top level in your application where you can be sure that no other OpenCV function is currently executed.
......
...@@ -663,7 +663,7 @@ FileNodeIterator::operator += ...@@ -663,7 +663,7 @@ FileNodeIterator::operator +=
----------------------------- -----------------------------
Moves iterator forward by the specified offset. Moves iterator forward by the specified offset.
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator += (int ofs) .. ocv:function:: FileNodeIterator& FileNodeIterator::operator +=( int ofs )
:param ofs: Offset (possibly negative) to move the iterator. :param ofs: Offset (possibly negative) to move the iterator.
...@@ -672,7 +672,7 @@ FileNodeIterator::operator -= ...@@ -672,7 +672,7 @@ FileNodeIterator::operator -=
----------------------------- -----------------------------
Moves iterator backward by the specified offset (possibly negative). Moves iterator backward by the specified offset (possibly negative).
.. ocv:function:: FileNodeIterator& FileNodeIterator::operator -= (int ofs) .. ocv:function:: FileNodeIterator& FileNodeIterator::operator -=( int ofs )
:param ofs: Offset (possibly negative) to move the iterator. :param ofs: Offset (possibly negative) to move the iterator.
......
...@@ -897,7 +897,7 @@ class CV_EXPORTS RotatedRect ...@@ -897,7 +897,7 @@ class CV_EXPORTS RotatedRect
public: public:
//! various constructors //! various constructors
RotatedRect(); RotatedRect();
RotatedRect(const Point2f& _center, const Size2f& _size, float _angle); RotatedRect(const Point2f& center, const Size2f& size, float angle);
RotatedRect(const CvBox2D& box); RotatedRect(const CvBox2D& box);
//! returns 4 vertices of the rectangle //! returns 4 vertices of the rectangle
...@@ -1634,22 +1634,22 @@ public: ...@@ -1634,22 +1634,22 @@ public:
Mat(); Mat();
//! constructs 2D matrix of the specified size and type //! constructs 2D matrix of the specified size and type
// (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
Mat(int _rows, int _cols, int _type); Mat(int rows, int cols, int type);
Mat(Size _size, int _type); Mat(Size size, int type);
//! constucts 2D matrix and fills it with the specified value _s. //! constucts 2D matrix and fills it with the specified value _s.
Mat(int _rows, int _cols, int _type, const Scalar& _s); Mat(int rows, int cols, int type, const Scalar& s);
Mat(Size _size, int _type, const Scalar& _s); Mat(Size size, int type, const Scalar& s);
//! constructs n-dimensional matrix //! constructs n-dimensional matrix
Mat(int _ndims, const int* _sizes, int _type); Mat(int ndims, const int* sizes, int type);
Mat(int _ndims, const int* _sizes, int _type, const Scalar& _s); Mat(int ndims, const int* sizes, int type, const Scalar& s);
//! copy constructor //! copy constructor
Mat(const Mat& m); Mat(const Mat& m);
//! constructor for matrix headers pointing to user-allocated data //! constructor for matrix headers pointing to user-allocated data
Mat(int _rows, int _cols, int _type, void* _data, size_t _step=AUTO_STEP); Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP);
Mat(Size _size, int _type, void* _data, size_t _step=AUTO_STEP); Mat(Size size, int type, void* data, size_t step=AUTO_STEP);
Mat(int _ndims, const int* _sizes, int _type, void* _data, const size_t* _steps=0); Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0);
//! creates a matrix header for a part of the bigger matrix //! creates a matrix header for a part of the bigger matrix
Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all());
...@@ -1664,11 +1664,9 @@ public: ...@@ -1664,11 +1664,9 @@ public:
//! builds matrix from std::vector with or without copying the data //! builds matrix from std::vector with or without copying the data
template<typename _Tp> explicit Mat(const vector<_Tp>& vec, bool copyData=false); template<typename _Tp> explicit Mat(const vector<_Tp>& vec, bool copyData=false);
//! builds matrix from cv::Vec; the data is copied by default //! builds matrix from cv::Vec; the data is copied by default
template<typename _Tp, int n> explicit Mat(const Vec<_Tp, n>& vec, template<typename _Tp, int n> explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true);
bool copyData=true);
//! builds matrix from cv::Matx; the data is copied by default //! builds matrix from cv::Matx; the data is copied by default
template<typename _Tp, int m, int n> explicit Mat(const Matx<_Tp, m, n>& mtx, template<typename _Tp, int m, int n> explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true);
bool copyData=true);
//! builds matrix from a 2D point //! builds matrix from a 2D point
template<typename _Tp> explicit Mat(const Point_<_Tp>& pt, bool copyData=true); template<typename _Tp> explicit Mat(const Point_<_Tp>& pt, bool copyData=true);
//! builds matrix from a 3D point //! builds matrix from a 3D point
...@@ -1721,8 +1719,8 @@ public: ...@@ -1721,8 +1719,8 @@ public:
Mat& setTo(InputArray value, InputArray mask=noArray()); Mat& setTo(InputArray value, InputArray mask=noArray());
//! creates alternative matrix header for the same data, with different //! creates alternative matrix header for the same data, with different
// number of channels and/or different number of rows. see cvReshape. // number of channels and/or different number of rows. see cvReshape.
Mat reshape(int _cn, int _rows=0) const; Mat reshape(int cn, int rows=0) const;
Mat reshape(int _cn, int _newndims, const int* _newsz) const; Mat reshape(int cn, int newndims, const int* newsz) const;
//! matrix transposition by means of matrix expressions //! matrix transposition by means of matrix expressions
MatExpr t() const; MatExpr t() const;
...@@ -1748,9 +1746,9 @@ public: ...@@ -1748,9 +1746,9 @@ public:
//! allocates new matrix data unless the matrix already has specified size and type. //! allocates new matrix data unless the matrix already has specified size and type.
// previous data is unreferenced if needed. // previous data is unreferenced if needed.
void create(int _rows, int _cols, int _type); void create(int rows, int cols, int type);
void create(Size _size, int _type); void create(Size size, int type);
void create(int _ndims, const int* _sizes, int _type); void create(int ndims, const int* sizes, int type);
//! increases the reference counter; use with care to avoid memleaks //! increases the reference counter; use with care to avoid memleaks
void addref(); void addref();
...@@ -1966,7 +1964,7 @@ public: ...@@ -1966,7 +1964,7 @@ public:
enum { UNIFORM=0, NORMAL=1 }; enum { UNIFORM=0, NORMAL=1 };
RNG(); RNG();
RNG(uint64 _state); RNG(uint64 state);
//! updates the state and returns the next 32-bit unsigned integer random number //! updates the state and returns the next 32-bit unsigned integer random number
unsigned next(); unsigned next();
...@@ -1976,7 +1974,7 @@ public: ...@@ -1976,7 +1974,7 @@ public:
operator short(); operator short();
operator unsigned(); operator unsigned();
//! returns a random integer sampled uniformly from [0, N). //! returns a random integer sampled uniformly from [0, N).
unsigned operator()(unsigned N); unsigned operator ()(unsigned N);
unsigned operator ()(); unsigned operator ()();
operator int(); operator int();
operator float(); operator float();
...@@ -4121,9 +4119,9 @@ public: ...@@ -4121,9 +4119,9 @@ public:
//! moves iterator to the previous node //! moves iterator to the previous node
FileNodeIterator operator -- (int); FileNodeIterator operator -- (int);
//! moves iterator forward by the specified offset (possibly negative) //! moves iterator forward by the specified offset (possibly negative)
FileNodeIterator& operator += (int); FileNodeIterator& operator += (int ofs);
//! moves iterator backward by the specified offset (possibly negative) //! moves iterator backward by the specified offset (possibly negative)
FileNodeIterator& operator -= (int); FileNodeIterator& operator -= (int ofs);
//! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format
FileNodeIterator& readRaw( const string& fmt, uchar* vec, FileNodeIterator& readRaw( const string& fmt, uchar* vec,
......
...@@ -1130,7 +1130,7 @@ CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) ...@@ -1130,7 +1130,7 @@ CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z )
/******************************** CvSize's & CvBox **************************************/ /******************************** CvSize's & CvBox **************************************/
typedef struct typedef struct CvSize
{ {
int width; int width;
int height; int height;
......
...@@ -13,7 +13,7 @@ descriptor extractors inherit the ...@@ -13,7 +13,7 @@ descriptor extractors inherit the
DescriptorExtractor DescriptorExtractor
------------------- -------------------
.. ocv:class:: DescriptorExtractor .. ocv:class:: DescriptorExtractor : public Algorithm
Abstract base class for computing descriptors for image keypoints. :: Abstract base class for computing descriptors for image keypoints. ::
...@@ -65,25 +65,6 @@ Computes the descriptors for a set of keypoints detected in an image (first vari ...@@ -65,25 +65,6 @@ Computes the descriptors for a set of keypoints detected in an image (first vari
:param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]`. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint. :param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]`. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
DescriptorExtractor::read
-----------------------------
Reads the object of a descriptor extractor from a file node.
.. ocv:function:: void DescriptorExtractor::read( const FileNode& fn )
:param fn: File node from which the detector is read.
DescriptorExtractor::write
------------------------------
Writes the object of a descriptor extractor to a file storage.
.. ocv:function:: void DescriptorExtractor::write( FileStorage& fs ) const
:param fs: File storage where the detector is written.
DescriptorExtractor::create DescriptorExtractor::create
------------------------------- -------------------------------
...@@ -107,7 +88,7 @@ for example: ``"OpponentSIFT"`` . ...@@ -107,7 +88,7 @@ for example: ``"OpponentSIFT"`` .
OpponentColorDescriptorExtractor OpponentColorDescriptorExtractor
-------------------------------- --------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor .. ocv:class:: OpponentColorDescriptorExtractor : public DescriptorExtractor
Class adapting a descriptor extractor to compute descriptors in the Opponent Color Space Class adapting a descriptor extractor to compute descriptors in the Opponent Color Space
(refer to Van de Sande et al., CGIV 2008 *Color Descriptors for Object Category Recognition*). (refer to Van de Sande et al., CGIV 2008 *Color Descriptors for Object Category Recognition*).
...@@ -132,7 +113,7 @@ them into a single color descriptor. :: ...@@ -132,7 +113,7 @@ them into a single color descriptor. ::
BriefDescriptorExtractor BriefDescriptorExtractor
------------------------ ------------------------
.. ocv:class:: BriefDescriptorExtractor .. ocv:class:: BriefDescriptorExtractor : public DescriptorExtractor
Class for computing BRIEF descriptors described in a paper of Calonder M., Lepetit V., Class for computing BRIEF descriptors described in a paper of Calonder M., Lepetit V.,
Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* , Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* ,
......
...@@ -11,7 +11,7 @@ descriptor matchers inherit the ...@@ -11,7 +11,7 @@ descriptor matchers inherit the
DMatch DMatch
------ ------
.. ocv:class:: DMatch .. ocv:struct:: DMatch
Class for matching keypoint descriptors: query descriptor index, Class for matching keypoint descriptors: query descriptor index,
train descriptor index, train image index, and distance between descriptors. :: train descriptor index, train image index, and distance between descriptors. ::
...@@ -40,7 +40,7 @@ train descriptor index, train image index, and distance between descriptors. :: ...@@ -40,7 +40,7 @@ train descriptor index, train image index, and distance between descriptors. ::
DescriptorMatcher DescriptorMatcher
----------------- -----------------
.. ocv:class:: DescriptorMatcher .. ocv:class:: DescriptorMatcher : public Algorithm
Abstract base class for matching keypoint descriptors. It has two groups Abstract base class for matching keypoint descriptors. It has two groups
of match methods: for matching descriptors of an image with another image or of match methods: for matching descriptors of an image with another image or
...@@ -227,7 +227,7 @@ DescriptorMatcher::clone ...@@ -227,7 +227,7 @@ DescriptorMatcher::clone
---------------------------- ----------------------------
Clones the matcher. Clones the matcher.
.. ocv:function:: Ptr<DescriptorMatcher> DescriptorMatcher::clone( bool emptyTrainData ) const .. ocv:function:: Ptr<DescriptorMatcher> DescriptorMatcher::clone( bool emptyTrainData=false )
:param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters but with empty train data. :param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters but with empty train data.
...@@ -258,25 +258,25 @@ Creates a descriptor matcher of a given type with the default parameters (using ...@@ -258,25 +258,25 @@ Creates a descriptor matcher of a given type with the default parameters (using
BFMatcher BFMatcher
----------------- -----------------
.. ocv:class::BFMatcher .. ocv:class:: BFMatcher : public DescriptorMatcher
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets. :: Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets.
BFMatcher::BFMatcher BFMatcher::BFMatcher
-------------------- --------------------
Brute-force matcher constructor. Brute-force matcher constructor.
.. ocv:function:: BFMatcher::BFMatcher( int distanceType, bool crossCheck=false ) .. ocv:function:: BFMatcher::BFMatcher( int normType, bool crossCheck=false )
:param distanceType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description). :param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper. :param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
FlannBasedMatcher FlannBasedMatcher
----------------- -----------------
.. ocv:class:: FlannBasedMatcher .. ocv:class:: FlannBasedMatcher : public DescriptorMatcher
Flann-based descriptor matcher. This matcher trains :ocv:class:`flann::Index_` on a train descriptor collection and calls its nearest search methods to find the best matches. So, this matcher may be faster when matching a large train collection than the brute force matcher. ``FlannBasedMatcher`` does not support masking permissible matches of descriptor sets because ``flann::Index`` does not support this. :: Flann-based descriptor matcher. This matcher trains :ocv:class:`flann::Index_` on a train descriptor collection and calls its nearest search methods to find the best matches. So, this matcher may be faster when matching a large train collection than the brute force matcher. ``FlannBasedMatcher`` does not support masking permissible matches of descriptor sets because ``flann::Index`` does not support this. ::
......
...@@ -12,7 +12,7 @@ KeyPoint ...@@ -12,7 +12,7 @@ KeyPoint
-------- --------
.. ocv:class:: KeyPoint .. ocv:class:: KeyPoint
Data structure for salient point detectors. Data structure for salient point detectors.
.. ocv:member:: Point2f pt .. ocv:member:: Point2f pt
...@@ -48,7 +48,7 @@ The keypoint constructors ...@@ -48,7 +48,7 @@ The keypoint constructors
.. ocv:function:: KeyPoint::KeyPoint(float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1) .. ocv:function:: KeyPoint::KeyPoint(float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1)
.. ocv:pyfunction:: cv2.KeyPoint(x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]) -> <KeyPoint object> .. ocv:pyfunction:: cv2.KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) -> <KeyPoint object>
:param x: x-coordinate of the keypoint :param x: x-coordinate of the keypoint
...@@ -69,7 +69,7 @@ The keypoint constructors ...@@ -69,7 +69,7 @@ The keypoint constructors
FeatureDetector FeatureDetector
--------------- ---------------
.. ocv:class:: FeatureDetector .. ocv:class:: FeatureDetector : public Algorithm
Abstract base class for 2D image feature detectors. :: Abstract base class for 2D image feature detectors. ::
...@@ -112,22 +112,6 @@ Detects keypoints in an image (first variant) or image set (second variant). ...@@ -112,22 +112,6 @@ Detects keypoints in an image (first variant) or image set (second variant).
:param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``. :param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``.
FeatureDetector::read
-------------------------
Reads a feature detector object from a file node.
.. ocv:function:: void FeatureDetector::read( const FileNode& fn )
:param fn: File node from which the detector is read.
FeatureDetector::write
--------------------------
Writes a feature detector object to a file storage.
.. ocv:function:: void FeatureDetector::write( FileStorage& fs ) const
:param fs: File storage where the detector is written.
FeatureDetector::create FeatureDetector::create
--------------------------- ---------------------------
Creates a feature detector by its name. Creates a feature detector by its name.
...@@ -156,7 +140,7 @@ for example: ``"GridFAST"``, ``"PyramidSTAR"`` . ...@@ -156,7 +140,7 @@ for example: ``"GridFAST"``, ``"PyramidSTAR"`` .
FastFeatureDetector FastFeatureDetector
------------------- -------------------
.. ocv:class:: FastFeatureDetector .. ocv:class:: FastFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the Wrapping class for feature detection using the
:ocv:func:`FAST` method. :: :ocv:func:`FAST` method. ::
...@@ -173,7 +157,7 @@ Wrapping class for feature detection using the ...@@ -173,7 +157,7 @@ Wrapping class for feature detection using the
GoodFeaturesToTrackDetector GoodFeaturesToTrackDetector
--------------------------- ---------------------------
.. ocv:class:: GoodFeaturesToTrackDetector .. ocv:class:: GoodFeaturesToTrackDetector : public FeatureDetector
Wrapping class for feature detection using the Wrapping class for feature detection using the
:ocv:func:`goodFeaturesToTrack` function. :: :ocv:func:`goodFeaturesToTrack` function. ::
...@@ -211,7 +195,7 @@ Wrapping class for feature detection using the ...@@ -211,7 +195,7 @@ Wrapping class for feature detection using the
MserFeatureDetector MserFeatureDetector
------------------- -------------------
.. ocv:class:: MserFeatureDetector .. ocv:class:: MserFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the Wrapping class for feature detection using the
:ocv:class:`MSER` class. :: :ocv:class:`MSER` class. ::
...@@ -233,7 +217,7 @@ Wrapping class for feature detection using the ...@@ -233,7 +217,7 @@ Wrapping class for feature detection using the
StarFeatureDetector StarFeatureDetector
------------------- -------------------
.. ocv:class:: StarFeatureDetector .. ocv:class:: StarFeatureDetector : public FeatureDetector
Wrapping class for feature detection using the Wrapping class for feature detection using the
:ocv:class:`StarDetector` class. :: :ocv:class:`StarDetector` class. ::
...@@ -252,7 +236,7 @@ Wrapping class for feature detection using the ...@@ -252,7 +236,7 @@ Wrapping class for feature detection using the
DenseFeatureDetector DenseFeatureDetector
-------------------- --------------------
.. ocv:class:: DenseFeatureDetector .. ocv:class:: DenseFeatureDetector : public FeatureDetector
Class for generation of image features which are distributed densely and regularly over the image. :: Class for generation of image features which are distributed densely and regularly over the image. ::
...@@ -279,7 +263,7 @@ The detector generates several levels (in the amount of ``featureScaleLevels``) ...@@ -279,7 +263,7 @@ The detector generates several levels (in the amount of ``featureScaleLevels``)
SimpleBlobDetector SimpleBlobDetector
------------------- -------------------
.. ocv:class:: SimpleBlobDetector .. ocv:class:: SimpleBlobDetector : public FeatureDetector
Class for extracting blobs from an image. :: Class for extracting blobs from an image. ::
...@@ -344,7 +328,7 @@ Default values of parameters are tuned to extract dark circular blobs. ...@@ -344,7 +328,7 @@ Default values of parameters are tuned to extract dark circular blobs.
GridAdaptedFeatureDetector GridAdaptedFeatureDetector
-------------------------- --------------------------
.. ocv:class:: GridAdaptedFeatureDetector .. ocv:class:: GridAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to partition the source image into a grid and detect points in each cell. :: Class adapting a detector to partition the source image into a grid and detect points in each cell. ::
...@@ -369,7 +353,7 @@ Class adapting a detector to partition the source image into a grid and detect p ...@@ -369,7 +353,7 @@ Class adapting a detector to partition the source image into a grid and detect p
PyramidAdaptedFeatureDetector PyramidAdaptedFeatureDetector
----------------------------- -----------------------------
.. ocv:class:: PyramidAdaptedFeatureDetector .. ocv:class:: PyramidAdaptedFeatureDetector : public FeatureDetector
Class adapting a detector to detect points over multiple levels of a Gaussian pyramid. Consider using this class for detectors that are not inherently scaled. :: Class adapting a detector to detect points over multiple levels of a Gaussian pyramid. Consider using this class for detectors that are not inherently scaled. ::
...@@ -387,7 +371,7 @@ Class adapting a detector to detect points over multiple levels of a Gaussian py ...@@ -387,7 +371,7 @@ Class adapting a detector to detect points over multiple levels of a Gaussian py
DynamicAdaptedFeatureDetector DynamicAdaptedFeatureDetector
----------------------------- -----------------------------
.. ocv:class:: DynamicAdaptedFeatureDetector .. ocv:class:: DynamicAdaptedFeatureDetector : public FeatureDetector
Adaptively adjusting detector that iteratively detects features until the desired number is found. :: Adaptively adjusting detector that iteratively detects features until the desired number is found. ::
...@@ -431,7 +415,7 @@ DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector ...@@ -431,7 +415,7 @@ DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector
---------------------------------------------------------------- ----------------------------------------------------------------
The constructor The constructor
.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features, int max_features, int max_iters ) .. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 )
:param adjuster: :ocv:class:`AdjusterAdapter` that detects features and adjusts parameters. :param adjuster: :ocv:class:`AdjusterAdapter` that detects features and adjusts parameters.
...@@ -443,7 +427,7 @@ The constructor ...@@ -443,7 +427,7 @@ The constructor
AdjusterAdapter AdjusterAdapter
--------------- ---------------
.. ocv:class:: AdjusterAdapter .. ocv:class:: AdjusterAdapter : public FeatureDetector
Class providing an interface for adjusting parameters of a feature detector. This interface is used by :ocv:class:`DynamicAdaptedFeatureDetector` . It is a wrapper for :ocv:class:`FeatureDetector` that enables adjusting parameters after feature detection. :: Class providing an interface for adjusting parameters of a feature detector. This interface is used by :ocv:class:`DynamicAdaptedFeatureDetector` . It is a wrapper for :ocv:class:`FeatureDetector` that enables adjusting parameters after feature detection. ::
...@@ -522,7 +506,7 @@ Creates an adjuster adapter by name ...@@ -522,7 +506,7 @@ Creates an adjuster adapter by name
FastAdjuster FastAdjuster
------------ ------------
.. ocv:class:: FastAdjuster .. ocv:class:: FastAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`FastFeatureDetector`. This class decreases or increases the threshold value by 1. :: :ocv:class:`AdjusterAdapter` for :ocv:class:`FastFeatureDetector`. This class decreases or increases the threshold value by 1. ::
...@@ -535,7 +519,7 @@ FastAdjuster ...@@ -535,7 +519,7 @@ FastAdjuster
StarAdjuster StarAdjuster
------------ ------------
.. ocv:class:: StarAdjuster .. ocv:class:: StarAdjuster : public AdjusterAdapter
:ocv:class:`AdjusterAdapter` for :ocv:class:`StarFeatureDetector`. This class adjusts the ``responseThreshhold`` of ``StarFeatureDetector``. :: :ocv:class:`AdjusterAdapter` for :ocv:class:`StarFeatureDetector`. This class adjusts the ``responseThreshhold`` of ``StarFeatureDetector``. ::
......
...@@ -130,7 +130,7 @@ GenericDescriptorMatcher::isMaskSupported ...@@ -130,7 +130,7 @@ GenericDescriptorMatcher::isMaskSupported
--------------------------------------------- ---------------------------------------------
Returns ``true`` if a generic descriptor matcher supports masking permissible matches. Returns ``true`` if a generic descriptor matcher supports masking permissible matches.
.. ocv:function:: void GenericDescriptorMatcher::isMaskSupported() .. ocv:function:: bool GenericDescriptorMatcher::isMaskSupported()
...@@ -231,7 +231,7 @@ GenericDescriptorMatcher::clone ...@@ -231,7 +231,7 @@ GenericDescriptorMatcher::clone
----------------------------------- -----------------------------------
Clones the matcher. Clones the matcher.
.. ocv:function:: Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::clone( bool emptyTrainData ) const .. ocv:function:: Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::clone( bool emptyTrainData=false ) const
:param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies :param emptyTrainData: If ``emptyTrainData`` is false, the method creates a deep copy of the object, that is, copies
both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters both parameters and train data. If ``emptyTrainData`` is true, the method creates an object copy with the current parameters
...@@ -240,7 +240,7 @@ Clones the matcher. ...@@ -240,7 +240,7 @@ Clones the matcher.
VectorDescriptorMatcher VectorDescriptorMatcher
----------------------- -----------------------
.. ocv:class:: VectorDescriptorMatcher .. ocv:class:: VectorDescriptorMatcher : public GenericDescriptorMatcher
Class used for matching descriptors that can be described as vectors in a finite-dimensional space. :: Class used for matching descriptors that can be described as vectors in a finite-dimensional space. ::
......
...@@ -9,7 +9,7 @@ Draws the found matches of keypoints from two images. ...@@ -9,7 +9,7 @@ Draws the found matches of keypoints from two images.
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT ) .. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char>>& matchesMask= vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT ) .. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char> >& matchesMask=vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
:param img1: First source image. :param img1: First source image.
...@@ -65,13 +65,13 @@ drawKeypoints ...@@ -65,13 +65,13 @@ drawKeypoints
----------------- -----------------
Draws keypoints. Draws keypoints.
.. ocv:function:: void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImg, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ) .. ocv:function:: void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImage, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
:param image: Source image. :param image: Source image.
:param keypoints: Keypoints from the source image. :param keypoints: Keypoints from the source image.
:param outImg: Output image. Its content depends on the ``flags`` value defining what is drawn in the output image. See possible ``flags`` bit values below. :param outImage: Output image. Its content depends on the ``flags`` value defining what is drawn in the output image. See possible ``flags`` bit values below.
:param color: Color of keypoints. :param color: Color of keypoints.
......
...@@ -24,7 +24,7 @@ Detects corners using the FAST algorithm by [Rosten06]_. ...@@ -24,7 +24,7 @@ Detects corners using the FAST algorithm by [Rosten06]_.
MSER MSER
---- ----
.. ocv:class:: MSER .. ocv:class:: MSER : public FeatureDetector
Maximally stable extremal region extractor. :: Maximally stable extremal region extractor. ::
...@@ -50,7 +50,7 @@ http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http:/ ...@@ -50,7 +50,7 @@ http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http:/
ORB ORB
--- ---
.. ocv:class:: ORB .. ocv:class:: ORB : public Feature2D
Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor, described in [RRKB11]_. The algorithm uses FAST in pyramids to detect stable keypoints, selects the strongest features using FAST or Harris response, finds their orientation using first-order moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or k-tuples) are rotated according to the measured orientation). Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor, described in [RRKB11]_. The algorithm uses FAST in pyramids to detect stable keypoints, selects the strongest features using FAST or Harris response, finds their orientation using first-order moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or k-tuples) are rotated according to the measured orientation).
...@@ -60,8 +60,6 @@ ORB::ORB ...@@ -60,8 +60,6 @@ ORB::ORB
-------- --------
The ORB constructor The ORB constructor
.. ocv:function:: ORB::ORB()
.. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31) .. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31)
:param nfeatures: The maximum number of features to retain. :param nfeatures: The maximum number of features to retain.
......
...@@ -54,7 +54,7 @@ BOWTrainer::descripotorsCount ...@@ -54,7 +54,7 @@ BOWTrainer::descripotorsCount
--------------------------------- ---------------------------------
Returns the count of all descriptors stored in the training set. Returns the count of all descriptors stored in the training set.
.. ocv:function:: const vector<Mat>& BOWTrainer::descripotorsCount() const .. ocv:function:: int BOWTrainer::descripotorsCount() const
...@@ -72,7 +72,7 @@ The vocabulary consists of cluster centers. So, this method returns the vocabula ...@@ -72,7 +72,7 @@ The vocabulary consists of cluster centers. So, this method returns the vocabula
BOWKMeansTrainer BOWKMeansTrainer
---------------- ----------------
.. ocv:class:: BOWKMeansTrainer .. ocv:class:: BOWKMeansTrainer : public BOWTrainer
:ocv:func:`kmeans` -based class to train visual vocabulary using the *bag of visual words* approach. :ocv:func:`kmeans` -based class to train visual vocabulary using the *bag of visual words* approach.
:: ::
......
...@@ -276,7 +276,7 @@ public: ...@@ -276,7 +276,7 @@ public:
enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31,
int firstLevel = 0, int WTA_K=2, int scoreType=0, int patchSize=31 ); int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31 );
// returns the descriptor size in bytes // returns the descriptor size in bytes
int descriptorSize() const; int descriptorSize() const;
...@@ -588,13 +588,13 @@ class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector ...@@ -588,13 +588,13 @@ class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector
{ {
public: public:
/** \param adjaster an AdjusterAdapter that will do the detection and parameter adjustment /** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment
* \param max_features the maximum desired number of features * \param max_features the maximum desired number of features
* \param max_iters the maximum number of times to try to adjust the feature detector params * \param max_iters the maximum number of times to try to adjust the feature detector params
* for the FastAdjuster this can be high, but with Star or Surf this can get time consuming * for the FastAdjuster this can be high, but with Star or Surf this can get time consuming
* \param min_features the minimum desired features * \param min_features the minimum desired features
*/ */
DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjaster, int min_features=400, int max_features=500, int max_iters=5 ); DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 );
virtual bool empty() const; virtual bool empty() const;
...@@ -1158,9 +1158,9 @@ public: ...@@ -1158,9 +1158,9 @@ public:
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false ); const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
// Reads matcher object from a file node // Reads matcher object from a file node
virtual void read( const FileNode& ); virtual void read( const FileNode& fn );
// Writes matcher object to a file storage // Writes matcher object to a file storage
virtual void write( FileStorage& ) const; virtual void write( FileStorage& fs ) const;
// Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty) // Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty)
virtual bool empty() const; virtual bool empty() const;
......
...@@ -289,7 +289,7 @@ Enables the :ocv:class:`gpu::StereoConstantSpaceBP` constructors. ...@@ -289,7 +289,7 @@ Enables the :ocv:class:`gpu::StereoConstantSpaceBP` constructors.
.. ocv:function:: gpu::StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP, int iters = DEFAULT_ITERS, int levels = DEFAULT_LEVELS, int nr_plane = DEFAULT_NR_PLANE, int msg_type = CV_32F) .. ocv:function:: gpu::StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP, int iters = DEFAULT_ITERS, int levels = DEFAULT_LEVELS, int nr_plane = DEFAULT_NR_PLANE, int msg_type = CV_32F)
.. ocv:function:: StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th = 0, int msg_type = CV_32F) .. ocv:function:: gpu::StereoConstantSpaceBP::StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th = 0, int msg_type = CV_32F)
:param ndisp: Number of disparities. :param ndisp: Number of disparities.
......
...@@ -338,7 +338,7 @@ Blocks the current CPU thread until all operations in the stream are complete. ...@@ -338,7 +338,7 @@ Blocks the current CPU thread until all operations in the stream are complete.
gpu::StreamAccessor gpu::StreamAccessor
------------------- -------------------
.. ocv:class:: gpu::StreamAccessor .. ocv:struct:: gpu::StreamAccessor
Class that enables getting ``cudaStream_t`` from :ocv:class:`gpu::Stream` and is declared in ``stream_accessor.hpp`` because it is the only public header that depends on the CUDA Runtime API. Including it brings a dependency to your code. :: Class that enables getting ``cudaStream_t`` from :ocv:class:`gpu::Stream` and is declared in ``stream_accessor.hpp`` because it is the only public header that depends on the CUDA Runtime API. Including it brings a dependency to your code. ::
......
...@@ -346,19 +346,19 @@ Detects keypoints and computes descriptors for them. ...@@ -346,19 +346,19 @@ Detects keypoints and computes descriptors for them.
gpu::ORB_GPU::downloadKeypoints gpu::ORB_GPU::downloadKeyPoints
------------------------------------- -------------------------------------
Download keypoints from GPU to CPU memory. Download keypoints from GPU to CPU memory.
.. ocv:function:: void gpu::ORB_GPU::downloadKeypoints(const GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints) .. ocv:function:: void gpu::ORB_GPU::downloadKeyPoints( GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints )
gpu::ORB_GPU::convertKeypoints gpu::ORB_GPU::convertKeyPoints
------------------------------------- -------------------------------------
Converts keypoints from GPU representation to vector of ``KeyPoint``. Converts keypoints from GPU representation to vector of ``KeyPoint``.
.. ocv:function:: void gpu::ORB_GPU::convertKeypoints(const Mat& h_keypoints, std::vector<KeyPoint>& keypoints) .. ocv:function:: void gpu::ORB_GPU::convertKeyPoints( Mat& d_keypoints, std::vector<KeyPoint>& keypoints )
......
This diff is collapsed.
This diff is collapsed.
...@@ -47,7 +47,6 @@ Any subsequent API call to this device will reinitialize the device. ...@@ -47,7 +47,6 @@ Any subsequent API call to this device will reinitialize the device.
gpu::FeatureSet gpu::FeatureSet
--------------- ---------------
.. ocv:class:: gpu::FeatureSet
Class providing GPU computing features. :: Class providing GPU computing features. ::
...@@ -74,9 +73,9 @@ Class providing a set of static methods to check what NVIDIA* card architecture ...@@ -74,9 +73,9 @@ Class providing a set of static methods to check what NVIDIA* card architecture
The following method checks whether the module was built with the support of the given feature: The following method checks whether the module was built with the support of the given feature:
.. ocv:function:: static bool gpu::TargetArchs::builtWith(FeatureSet feature) .. ocv:function:: static bool gpu::TargetArchs::builtWith( FeatureSet feature_set )
:param feature: Feature to be checked. See :ocv:class:`gpu::FeatureSet`. :param feature_set: Features to be checked. See :ocv:class:`gpu::FeatureSet`.
There is a set of methods to check whether the module contains intermediate (PTX) or binary GPU code for the given architecture(s): There is a set of methods to check whether the module contains intermediate (PTX) or binary GPU code for the given architecture(s):
...@@ -150,7 +149,7 @@ gpu::DeviceInfo::name ...@@ -150,7 +149,7 @@ gpu::DeviceInfo::name
------------------------- -------------------------
Returns the device name. Returns the device name.
.. ocv:function:: string gpu::DeviceInfo::name() .. ocv:function:: string gpu::DeviceInfo::name() const
...@@ -198,9 +197,9 @@ gpu::DeviceInfo::supports ...@@ -198,9 +197,9 @@ gpu::DeviceInfo::supports
----------------------------- -----------------------------
Provides information on GPU feature support. Provides information on GPU feature support.
.. ocv:function:: bool gpu::DeviceInfo::supports(FeatureSet feature) .. ocv:function:: bool gpu::DeviceInfo::supports( FeatureSet feature_set ) const
:param feature: Feature to be checked. See :ocv:class:`gpu::FeatureSet`. :param feature_set: Features to be checked. See :ocv:class:`gpu::FeatureSet`.
This function returns ``true`` if the device has the specified GPU feature. Otherwise, it returns ``false`` . This function returns ``true`` if the device has the specified GPU feature. Otherwise, it returns ``false`` .
......
...@@ -32,7 +32,7 @@ By default, the OpenCV GPU module includes: ...@@ -32,7 +32,7 @@ By default, the OpenCV GPU module includes:
PTX code for compute capabilities 1.1 and 1.3 (controlled by ``CUDA_ARCH_PTX`` in ``CMake``) PTX code for compute capabilities 1.1 and 1.3 (controlled by ``CUDA_ARCH_PTX`` in ``CMake``)
This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw
:ocv:func:`Exception`. For platforms where JIT compilation is performed first, the run is slow. :ocv:class:`Exception`. For platforms where JIT compilation is performed first, the run is slow.
On a GPU with CC 1.0, you can still compile the GPU module and most of the functions will run flawlessly. To achieve this, add "1.0" to the list of binaries, for example, ``CUDA_ARCH_BIN="1.0 1.3 2.0"`` . The functions that cannot be run on CC 1.0 GPUs throw an exception. On a GPU with CC 1.0, you can still compile the GPU module and most of the functions will run flawlessly. To achieve this, add "1.0" to the list of binaries, for example, ``CUDA_ARCH_BIN="1.0 1.3 2.0"`` . The functions that cannot be run on CC 1.0 GPUs throw an exception.
......
...@@ -7,7 +7,7 @@ Object Detection ...@@ -7,7 +7,7 @@ Object Detection
gpu::HOGDescriptor gpu::HOGDescriptor
------------------ ------------------
.. ocv:class:: gpu::HOGDescriptor .. ocv:struct:: gpu::HOGDescriptor
The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detector. :: The class implements Histogram of Oriented Gradients ([Dalal2005]_) object detector. ::
...@@ -235,7 +235,7 @@ gpu::CascadeClassifier_GPU::CascadeClassifier_GPU ...@@ -235,7 +235,7 @@ gpu::CascadeClassifier_GPU::CascadeClassifier_GPU
----------------------------------------------------- -----------------------------------------------------
Loads the classifier from a file. Loads the classifier from a file.
.. ocv:function:: gpu::CascadeClassifier_GPU(const string& filename) .. ocv:function:: gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const string& filename)
:param filename: Name of the file from which the classifier is loaded. Only the old ``haar`` classifier (trained by the ``haar`` training application) and NVIDIA's ``nvbin`` are supported. :param filename: Name of the file from which the classifier is loaded. Only the old ``haar`` classifier (trained by the ``haar`` training application) and NVIDIA's ``nvbin`` are supported.
......
...@@ -9,7 +9,7 @@ gpu::gemm ...@@ -9,7 +9,7 @@ gpu::gemm
------------------ ------------------
Performs generalized matrix multiplication. Performs generalized matrix multiplication.
.. ocv:function:: void gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null()) .. ocv:function:: void gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null())
:param src1: First multiplied input matrix that should have ``CV_32FC1`` , ``CV_64FC1`` , ``CV_32FC2`` , or ``CV_64FC2`` type. :param src1: First multiplied input matrix that should have ``CV_32FC1`` , ``CV_64FC1`` , ``CV_32FC2`` , or ``CV_64FC2`` type.
...@@ -47,9 +47,9 @@ gpu::transpose ...@@ -47,9 +47,9 @@ gpu::transpose
------------------ ------------------
Transposes a matrix. Transposes a matrix.
.. ocv:function:: void gpu::transpose(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) .. ocv:function:: void gpu::transpose( const GpuMat& src1, GpuMat& dst, Stream& stream=Stream::Null() )
:param src: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc). :param src1: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc).
:param dst: Destination matrix. :param dst: Destination matrix.
...@@ -63,11 +63,11 @@ gpu::flip ...@@ -63,11 +63,11 @@ gpu::flip
------------- -------------
Flips a 2D matrix around vertical, horizontal, or both axes. Flips a 2D matrix around vertical, horizontal, or both axes.
.. ocv:function:: void gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode, Stream& stream = Stream::Null()) .. ocv:function:: void gpu::flip( const GpuMat& a, GpuMat& b, int flipCode, Stream& stream=Stream::Null() )
:param src: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth. :param a: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth.
:param dst: Destination matrix. :param b: Destination matrix.
:param flipCode: Flip mode for the source: :param flipCode: Flip mode for the source:
...@@ -143,7 +143,7 @@ gpu::magnitude ...@@ -143,7 +143,7 @@ gpu::magnitude
------------------ ------------------
Computes magnitudes of complex matrix elements. Computes magnitudes of complex matrix elements.
.. ocv:function:: void gpu::magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null()) .. ocv:function:: void gpu::magnitude( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()) .. ocv:function:: void gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
...@@ -165,7 +165,7 @@ gpu::magnitudeSqr ...@@ -165,7 +165,7 @@ gpu::magnitudeSqr
--------------------- ---------------------
Computes squared magnitudes of complex matrix elements. Computes squared magnitudes of complex matrix elements.
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null()) .. ocv:function:: void gpu::magnitudeSqr( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()) .. ocv:function:: void gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
......
This diff is collapsed.
...@@ -431,11 +431,11 @@ CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = S ...@@ -431,11 +431,11 @@ CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = S
//! computes magnitude of complex (x(i).re, x(i).im) vector //! computes magnitude of complex (x(i).re, x(i).im) vector
//! supports only CV_32FC2 type //! supports only CV_32FC2 type
CV_EXPORTS void magnitude(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null()); CV_EXPORTS void magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
//! computes squared magnitude of complex (x(i).re, x(i).im) vector //! computes squared magnitude of complex (x(i).re, x(i).im) vector
//! supports only CV_32FC2 type //! supports only CV_32FC2 type
CV_EXPORTS void magnitudeSqr(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null()); CV_EXPORTS void magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
//! computes magnitude of each (x(i), y(i)) vector //! computes magnitude of each (x(i), y(i)) vector
//! supports only floating-point source //! supports only floating-point source
...@@ -480,7 +480,7 @@ CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale ...@@ -480,7 +480,7 @@ CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale
//! computes element-wise weighted quotient of matrix and scalar (c = a / s) //! computes element-wise weighted quotient of matrix and scalar (c = a / s)
CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
//! computes element-wise weighted reciprocal of an array (dst = scale/src2) //! computes element-wise weighted reciprocal of an array (dst = scale/src2)
CV_EXPORTS void divide(double scale, const GpuMat& src2, GpuMat& dst, int dtype = -1, Stream& stream = Stream::Null()); CV_EXPORTS void divide(double scale, const GpuMat& b, GpuMat& c, int dtype = -1, Stream& stream = Stream::Null());
//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) //! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst,
...@@ -1697,15 +1697,15 @@ public: ...@@ -1697,15 +1697,15 @@ public:
class CV_EXPORTS GoodFeaturesToTrackDetector_GPU class CV_EXPORTS GoodFeaturesToTrackDetector_GPU
{ {
public: public:
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners_ = 1000, double qualityLevel_ = 0.01, double minDistance_ = 0.0, explicit GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
int blockSize_ = 3, bool useHarrisDetector_ = false, double harrisK_ = 0.04) int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04)
{ {
maxCorners = maxCorners_; this->maxCorners = maxCorners;
qualityLevel = qualityLevel_; this->qualityLevel = qualityLevel;
minDistance = minDistance_; this->minDistance = minDistance;
blockSize = blockSize_; this->blockSize = blockSize;
useHarrisDetector = useHarrisDetector_; this->useHarrisDetector = useHarrisDetector;
harrisK = harrisK_; this->harrisK = harrisK;
} }
//! return 1 rows matrix with CV_32FC2 type //! return 1 rows matrix with CV_32FC2 type
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -521,7 +521,7 @@ void CV_SpecificVideoTest::run(int) ...@@ -521,7 +521,7 @@ void CV_SpecificVideoTest::run(int)
TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); } TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); }
#endif #endif
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT #if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && !defined(__APPLE__)
TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); } TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); }
TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); } TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); }
#endif #endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -7,7 +7,7 @@ matchTemplate ...@@ -7,7 +7,7 @@ matchTemplate
----------------- -----------------
Compares a template against overlapped image regions. Compares a template against overlapped image regions.
.. ocv:function:: void matchTemplate( InputArray image, InputArray temp, OutputArray result, int method ) .. ocv:function:: void matchTemplate( InputArray image, InputArray templ, OutputArray result, int method )
.. ocv:pyfunction:: cv2.matchTemplate(image, templ, method[, result]) -> result .. ocv:pyfunction:: cv2.matchTemplate(image, templ, method[, result]) -> result
......
...@@ -351,8 +351,8 @@ CVAPI(CvPoint) cvReadChainPoint( CvChainPtReader* reader ); ...@@ -351,8 +351,8 @@ CVAPI(CvPoint) cvReadChainPoint( CvChainPtReader* reader );
a tree of polygonal curves (contours) */ a tree of polygonal curves (contours) */
CVAPI(CvSeq*) cvApproxPoly( const void* src_seq, CVAPI(CvSeq*) cvApproxPoly( const void* src_seq,
int header_size, CvMemStorage* storage, int header_size, CvMemStorage* storage,
int method, double parameter, int method, double eps,
int parameter2 CV_DEFAULT(0)); int recursive CV_DEFAULT(0));
/* Calculates perimeter of a contour or length of a part of contour */ /* Calculates perimeter of a contour or length of a part of contour */
CVAPI(double) cvArcLength( const void* curve, CVAPI(double) cvArcLength( const void* curve,
......
...@@ -57,8 +57,6 @@ int icvIntersectLines( double x1, double dx1, double y1, double dy1, ...@@ -57,8 +57,6 @@ int icvIntersectLines( double x1, double dx1, double y1, double dy1,
double* t2 ); double* t2 );
void icvCreateCenterNormalLine( CvSubdiv2DEdge edge, double* a, double* b, double* c );
void icvIntersectLines3( double* a0, double* b0, double* c0, void icvIntersectLines3( double* a0, double* b0, double* c0,
double* a1, double* b1, double* c1, double* a1, double* b1, double* c1,
CvPoint2D32f* point ); CvPoint2D32f* point );
......
...@@ -589,9 +589,8 @@ class ClassInfo(object): ...@@ -589,9 +589,8 @@ class ClassInfo(object):
self.jname = m[1:] self.jname = m[1:]
self.base = '' self.base = ''
if decl[1]: if decl[1]:
self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip() #self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
self.base = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "")
class ArgInfo(object): class ArgInfo(object):
def __init__(self, arg_tuple): # [ ctype, name, def val, [mod], argno ] def __init__(self, arg_tuple): # [ ctype, name, def val, [mod], argno ]
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment