Commit 9638448c authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

reference manuals merge is complete!

parent 7f38aa60
...@@ -89,8 +89,7 @@ if(WIN32) ...@@ -89,8 +89,7 @@ if(WIN32)
set(CPACK_NSIS_MENU_LINKS set(CPACK_NSIS_MENU_LINKS
"http://opencv.willowgarage.com" "Start Page" "http://opencv.willowgarage.com" "Start Page"
"doc\\\\opencv2refman_cpp.pdf" "The OpenCV C++ Reference Manual" "doc\\\\opencv2refman.pdf" "The OpenCV Reference Manual"
"doc\\\\opencv2refman_py.pdf" "The OpenCV Python Reference Manual"
"doc\\\\opencv_tutorials.pdf" "The OpenCV Tutorials for Beginners" "doc\\\\opencv_tutorials.pdf" "The OpenCV Tutorials for Beginners"
"CMakeLists.txt" "The Build Script (open with CMake)" "CMakeLists.txt" "The Build Script (open with CMake)"
"samples\\\\c" "C Samples" "samples\\\\c" "C Samples"
......
...@@ -11,17 +11,14 @@ if(BUILD_DOCS AND PDFLATEX_COMPILER AND HAVE_SPHINX) ...@@ -11,17 +11,14 @@ if(BUILD_DOCS AND PDFLATEX_COMPILER AND HAVE_SPHINX)
project(opencv_docs) project(opencv_docs)
file(GLOB_RECURSE OPENCV2_FILES_PICT ../modules/*.png ../modules/*.jpg) file(GLOB_RECURSE OPENCV_FILES_REF ../modules/*.rst)
file(GLOB_RECURSE OPENCV2_FILES_RST ../modules/*.rst) file(GLOB_RECURSE OPENCV_FILES_REF_PICT ../modules/*.png ../modules/*.jpg)
file(GLOB_RECURSE OPENCV2_PY_FILES_RST opencv2/*.rst)
file(GLOB_RECURSE OPENCV1_FILES_PICT pics/*.png pics/*.jpg)
file(GLOB_RECURSE OPENCV1_FILES_RST opencv1/*.rst)
file(GLOB_RECURSE OPENCV_FILES_UG user_guide/*.rst) file(GLOB_RECURSE OPENCV_FILES_UG user_guide/*.rst)
file(GLOB_RECURSE OPENCV_FILES_TUT tutorials/*.rst) file(GLOB_RECURSE OPENCV_FILES_TUT tutorials/*.rst)
file(GLOB_RECURSE OPENCV_FILES_TUT_PICT tutorials/*.png tutorials/*.jpg)
set(OPENCV_DOC_DEPS conf.py ${OPENCV2_FILES_RST} ${OPENCV2_FILES_PICT} ${OPENCV2_PY_FILES_RST} set(OPENCV_DOC_DEPS conf.py ${OPENCV_FILES_REF} ${OPENCV_FILES_REF_PICT}
${OPENCV1_FILES_RST} ${OPENCV1_FILES_PICT} ${OPENCV_FILES_UG} ${OPENCV_FILES_TUT} ${OPENCV_FILES_TUT_PICT})
${OPENCV_FILES_UG} ${OPENCV_FILES_TUT})
add_custom_target(docs add_custom_target(docs
${SPHINX_BUILD} ${SPHINX_BUILD}
...@@ -31,14 +28,8 @@ add_custom_target(docs ...@@ -31,14 +28,8 @@ add_custom_target(docs
${CMAKE_CURRENT_SOURCE_DIR}/pics ${CMAKE_CURRENT_BINARY_DIR}/doc/opencv1/pics ${CMAKE_CURRENT_SOURCE_DIR}/pics ${CMAKE_CURRENT_BINARY_DIR}/doc/opencv1/pics
COMMAND ${CMAKE_COMMAND} -E copy COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR}
COMMAND ${PDFLATEX_COMPILER} opencv2refman_cpp COMMAND ${PDFLATEX_COMPILER} opencv2refman
COMMAND ${PDFLATEX_COMPILER} opencv2refman_cpp COMMAND ${PDFLATEX_COMPILER} opencv2refman
COMMAND ${PDFLATEX_COMPILER} opencv2refman_py
COMMAND ${PDFLATEX_COMPILER} opencv2refman_py
COMMAND ${PDFLATEX_COMPILER} opencv1refman_c
COMMAND ${PDFLATEX_COMPILER} opencv1refman_c
COMMAND ${PDFLATEX_COMPILER} opencv1refman_py
COMMAND ${PDFLATEX_COMPILER} opencv1refman_py
COMMAND ${PDFLATEX_COMPILER} opencv_user COMMAND ${PDFLATEX_COMPILER} opencv_user
COMMAND ${PDFLATEX_COMPILER} opencv_user COMMAND ${PDFLATEX_COMPILER} opencv_user
COMMAND ${PDFLATEX_COMPILER} opencv_tutorials COMMAND ${PDFLATEX_COMPILER} opencv_tutorials
...@@ -51,8 +42,6 @@ add_custom_target(html_docs ...@@ -51,8 +42,6 @@ add_custom_target(html_docs
${SPHINX_BUILD} ${SPHINX_BUILD}
-b html -c ${CMAKE_CURRENT_SOURCE_DIR} -b html -c ${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/.. ./_html ${CMAKE_CURRENT_SOURCE_DIR}/.. ./_html
COMMAND ${CMAKE_COMMAND} -E copy_directory
${CMAKE_CURRENT_SOURCE_DIR}/pics ${CMAKE_CURRENT_BINARY_DIR}/doc/opencv1/pics
COMMAND ${CMAKE_COMMAND} -E copy COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${OPENCV_DOC_DEPS} DEPENDS ${OPENCV_DOC_DEPS}
......
...@@ -223,13 +223,7 @@ pngmath_latex_preamble = r""" ...@@ -223,13 +223,7 @@ pngmath_latex_preamble = r"""
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]). # (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [ latex_documents = [
('modules/refman', 'opencv2refman_cpp.tex', u'The OpenCV 2.x C++ Reference Manual', ('modules/refman', 'opencv2refman.tex', u'The OpenCV Reference Manual',
u'', 'manual'),
('doc/opencv2/py/py_index', 'opencv2refman_py.tex', u'The OpenCV 2.x Python Reference Manual',
u'', 'manual'),
('doc/opencv1/c/c_index', 'opencv1refman_c.tex', u'The OpenCV 1.x C Reference Manual',
u'', 'manual'),
('doc/opencv1/py/py_index', 'opencv1refman_py.tex', u'The OpenCV 1.x Python Reference Manual',
u'', 'manual'), u'', 'manual'),
('doc/user_guide/user_guide', 'opencv_user.tex', u'The OpenCV User Guide', ('doc/user_guide/user_guide', 'opencv_user.tex', u'The OpenCV User Guide',
u'', 'manual'), u'', 'manual'),
......
...@@ -1081,6 +1081,7 @@ class OCVObject(ObjectDescription): ...@@ -1081,6 +1081,7 @@ class OCVObject(ObjectDescription):
"""Description of a C++ language object.""" """Description of a C++ language object."""
langname = "C++" langname = "C++"
ismember = False
doc_field_types = [ doc_field_types = [
TypedField('parameter', label=l_('Parameters'), TypedField('parameter', label=l_('Parameters'),
...@@ -1111,9 +1112,11 @@ class OCVObject(ObjectDescription): ...@@ -1111,9 +1112,11 @@ class OCVObject(ObjectDescription):
node += pnode node += pnode
def attach_modifiers(self, node, obj): def attach_modifiers(self, node, obj):
lname = self.__class__.langname if not self.__class__.ismember:
node += nodes.strong(lname + ":", lname + ":") lname = self.__class__.langname
node += addnodes.desc_name(" ", " ") node += nodes.strong(lname + ":", lname + ":")
node += addnodes.desc_name(" ", " ")
if obj.visibility != 'public': if obj.visibility != 'public':
node += addnodes.desc_annotation(obj.visibility, node += addnodes.desc_annotation(obj.visibility,
obj.visibility) obj.visibility)
...@@ -1189,6 +1192,20 @@ class OCVClassObject(OCVObject): ...@@ -1189,6 +1192,20 @@ class OCVClassObject(OCVObject):
#self.attach_name(signode, cls.name) #self.attach_name(signode, cls.name)
pass pass
class OCVStructObject(OCVObject):
def get_index_text(self, name):
return _('%s (C structure)') % name
def parse_definition(self, parser):
return parser.parse_class()
def describe_signature(self, signode, cls):
#self.attach_modifiers(signode, cls)
#signode += addnodes.desc_annotation('class ', 'class ')
#self.attach_name(signode, cls.name)
pass
class OCVTypeObject(OCVObject): class OCVTypeObject(OCVObject):
...@@ -1211,6 +1228,8 @@ class OCVTypeObject(OCVObject): ...@@ -1211,6 +1228,8 @@ class OCVTypeObject(OCVObject):
class OCVMemberObject(OCVObject): class OCVMemberObject(OCVObject):
ismember = True
def get_index_text(self, name): def get_index_text(self, name):
if self.objtype == 'member': if self.objtype == 'member':
return _('%s (C++ member)') % name return _('%s (C++ member)') % name
...@@ -1268,6 +1287,8 @@ class OCVFunctionObject(OCVObject): ...@@ -1268,6 +1287,8 @@ class OCVFunctionObject(OCVObject):
def get_index_text(self, name): def get_index_text(self, name):
lname = self.__class__.langname lname = self.__class__.langname
if lname == "C" and name.startswith("cv"):
name = name[2:]
return _('%s (%s function)') % (name, lname) return _('%s (%s function)') % (name, lname)
def parse_definition(self, parser): def parse_definition(self, parser):
...@@ -1344,6 +1365,7 @@ class OCVDomain(Domain): ...@@ -1344,6 +1365,7 @@ class OCVDomain(Domain):
label = 'C++' label = 'C++'
object_types = { object_types = {
'class': ObjType(l_('class'), 'class'), 'class': ObjType(l_('class'), 'class'),
'struct': ObjType(l_('struct'), 'struct'),
'function': ObjType(l_('function'), 'func', 'funcx'), 'function': ObjType(l_('function'), 'func', 'funcx'),
'cfunction': ObjType(l_('cfunction'), 'cfunc', 'cfuncx'), 'cfunction': ObjType(l_('cfunction'), 'cfunc', 'cfuncx'),
'jfunction': ObjType(l_('jfunction'), 'jfunc', 'jfuncx'), 'jfunction': ObjType(l_('jfunction'), 'jfunc', 'jfuncx'),
...@@ -1355,6 +1377,7 @@ class OCVDomain(Domain): ...@@ -1355,6 +1377,7 @@ class OCVDomain(Domain):
directives = { directives = {
'class': OCVClassObject, 'class': OCVClassObject,
'struct': OCVStructObject,
'function': OCVFunctionObject, 'function': OCVFunctionObject,
'cfunction': OCVCFunctionObject, 'cfunction': OCVCFunctionObject,
'jfunction': OCVJavaFunctionObject, 'jfunction': OCVJavaFunctionObject,
...@@ -1366,6 +1389,7 @@ class OCVDomain(Domain): ...@@ -1366,6 +1389,7 @@ class OCVDomain(Domain):
} }
roles = { roles = {
'class': OCVXRefRole(), 'class': OCVXRefRole(),
'struct': OCVXRefRole(),
'func' : OCVXRefRole(fix_parens=True), 'func' : OCVXRefRole(fix_parens=True),
'funcx' : OCVXRefRole(), 'funcx' : OCVXRefRole(),
'cfunc' : OCVXRefRole(fix_parens=True), 'cfunc' : OCVXRefRole(fix_parens=True),
......
############
Bibliography
############
.. [Agrawal08] Agrawal, M. and Konolige, K. and Blas, M.R. "CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching", ECCV08, 2008
.. [BT96] Tomasi, C. and Birchfield, S.T. "Depth Discontinuities by Pixel-to-Pixel Stereo", STAN-CS, 1996
.. [Bay06] Bay, H. and Tuytelaars, T. and Van Gool, L. "SURF: Speeded Up Robust Features", 9th European Conference on Computer Vision, 2006
.. [Borgefors86] Borgefors, Gunilla, "Distance transformations in digital images". Comput. Vision Graph. Image Process. 34 3, pp 344--371 (1986)
.. [Bradski00] Davis, J.W. and Bradski, G.R. "Motion Segmentation and Pose Recognition with Motion History Gradients", WACV00, 2000
.. [Bradski98] Bradski, G.R. "Computer Vision Face Tracking for Use in a Perceptual User Interface", Intel, 1998
.. [Davis97] Davis, J.W. and Bobick, A.F. "The Representation and Recognition of Action Using Temporal Templates", CVPR97, 1997
.. [Felzenszwalb04] Felzenszwalb, Pedro F. and Huttenlocher, Daniel P. "Distance Transforms of Sampled Functions", TR2004-1963, TR2004-1963 (2004)
.. [Hartley99] Hartley, R.I., "Theory and Practice of Projective Rectification". IJCV 35 2, pp 115-127 (1999)
##########################
OpenCV 1.x C API Reference
##########################
.. highlight:: python
.. toctree::
:maxdepth: 2
core
imgproc
features2d
objdetect
video
highgui
calib3d
*******************************************************
calib3d. Camera Calibration, Pose Estimation and Stereo
*******************************************************
.. toctree::
:maxdepth: 2
calib3d_camera_calibration_and_3d_reconstruction
****************************
core. The Core Functionality
****************************
.. toctree::
:maxdepth: 2
core_basic_structures
core_operations_on_arrays
core_dynamic_structures
core_drawing_functions
core_xml_yaml_persistence
core_clustering
core_utility_and_system_functions_and_macros
This diff is collapsed.
Clustering
==========
.. highlight:: c
.. index:: KMeans2
.. _KMeans2:
KMeans2
-------
.. cfunction:: int cvKMeans2(const CvArr* samples, int nclusters, CvArr* labels, CvTermCriteria termcrit, int attempts=1, CvRNG* rng=0, int flags=0, CvArr* centers=0, double* compactness=0)
Splits set of vectors by a given number of clusters.
:param samples: Floating-point matrix of input samples, one row per sample
:param nclusters: Number of clusters to split the set by
:param labels: Output integer vector storing cluster indices for every sample
:param termcrit: Specifies maximum number of iterations and/or accuracy (distance the centers can move by between subsequent iterations)
:param attempts: How many times the algorithm is executed using different initial labelings. The algorithm returns labels that yield the best compactness (see the last function parameter)
:param rng: Optional external random number generator; can be used to fully control the function behaviour
:param flags: Can be 0 or ``CV_KMEANS_USE_INITIAL_LABELS`` . The latter
value means that during the first (and possibly the only) attempt, the
function uses the user-supplied labels as the initial approximation
instead of generating random labels. For the second and further attempts,
the function will use randomly generated labels in any case
:param centers: The optional output array of the cluster centers
:param compactness: The optional output parameter, which is computed as :math:`\sum_i ||\texttt{samples}_i - \texttt{centers}_{\texttt{labels}_i}||^2`
after every attempt; the best (minimum) value is chosen and the
corresponding labels are returned by the function. Basically, the
user can use only the core of the function, set the number of
attempts to 1, initialize labels each time using a custom algorithm
( ``flags=CV_KMEANS_USE_INITIAL_LABELS`` ) and, based on the output compactness
or any other criteria, choose the best clustering.
The function
``cvKMeans2``
implements a k-means algorithm that finds the
centers of
``nclusters``
clusters and groups the input samples
around the clusters. On output,
:math:`\texttt{labels}_i`
contains a cluster index for
samples stored in the i-th row of the
``samples``
matrix.
::
#include "cxcore.h"
#include "highgui.h"
void main( int argc, char** argv )
{
#define MAX_CLUSTERS 5
CvScalar color_tab[MAX_CLUSTERS];
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
CvRNG rng = cvRNG(0xffffffff);
color_tab[0] = CV_RGB(255,0,0);
color_tab[1] = CV_RGB(0,255,0);
color_tab[2] = CV_RGB(100,100,255);
color_tab[3] = CV_RGB(255,0,255);
color_tab[4] = CV_RGB(255,255,0);
cvNamedWindow( "clusters", 1 );
for(;;)
{
int k, cluster_count = cvRandInt(&rng)
int i, sample_count = cvRandInt(&rng)
CvMat* points = cvCreateMat( sample_count, 1, CV_32FC2 );
CvMat* clusters = cvCreateMat( sample_count, 1, CV_32SC1 );
/* generate random sample from multigaussian distribution */
for( k = 0; k < cluster_count; k++ )
{
CvPoint center;
CvMat point_chunk;
center.x = cvRandInt(&rng)
center.y = cvRandInt(&rng)
cvGetRows( points,
&point_chunk,
k*sample_count/cluster_count,
(k == (cluster_count - 1)) ?
sample_count :
(k+1)*sample_count/cluster_count );
cvRandArr( &rng, &point_chunk, CV_RAND_NORMAL,
cvScalar(center.x,center.y,0,0),
cvScalar(img->width/6, img->height/6,0,0) );
}
/* shuffle samples */
for( i = 0; i < sample_count/2; i++ )
{
CvPoint2D32f* pt1 =
(CvPoint2D32f*)points->data.fl + cvRandInt(&rng)
CvPoint2D32f* pt2 =
(CvPoint2D32f*)points->data.fl + cvRandInt(&rng)
CvPoint2D32f temp;
CV_SWAP( *pt1, *pt2, temp );
}
cvKMeans2( points, cluster_count, clusters,
cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ));
cvZero( img );
for( i = 0; i < sample_count; i++ )
{
CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i];
int cluster_idx = clusters->data.i[i];
cvCircle( img,
cvPointFrom32f(pt),
2,
color_tab[cluster_idx],
CV_FILLED );
}
cvReleaseMat( &points );
cvReleaseMat( &clusters );
cvShowImage( "clusters", img );
int key = cvWaitKey(0);
if( key == 27 )
break;
}
}
..
.. index:: SeqPartition
.. _SeqPartition:
SeqPartition
------------
.. cfunction:: int cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, CvSeq** labels, CvCmpFunc is_equal, void* userdata )
Splits a sequence into equivalency classes.
:param seq: The sequence to partition
:param storage: The storage block to store the sequence of equivalency classes. If it is NULL, the function uses ``seq->storage`` for output labels
:param labels: Ouput parameter. Double pointer to the sequence of 0-based labels of input sequence elements
:param is_equal: The relation function that should return non-zero if the two particular sequence elements are from the same class, and zero otherwise. The partitioning algorithm uses transitive closure of the relation function as an equivalency criteria
:param userdata: Pointer that is transparently passed to the ``is_equal`` function
::
typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata);
..
The function
``cvSeqPartition``
implements a quadratic algorithm for
splitting a set into one or more equivalancy classes. The function
returns the number of equivalency classes.
::
#include "cxcore.h"
#include "highgui.h"
#include <stdio.h>
CvSeq* point_seq = 0;
IplImage* canvas = 0;
CvScalar* colors = 0;
int pos = 10;
int is_equal( const void* _a, const void* _b, void* userdata )
{
CvPoint a = *(const CvPoint*)_a;
CvPoint b = *(const CvPoint*)_b;
double threshold = *(double*)userdata;
return (double)((a.x - b.x)*(a.x - b.x) + (a.y - b.y)*(a.y - b.y)) <=
threshold;
}
void on_track( int pos )
{
CvSeq* labels = 0;
double threshold = pos*pos;
int i, class_count = cvSeqPartition( point_seq,
0,
&labels,
is_equal,
&threshold );
printf("
cvZero( canvas );
for( i = 0; i < labels->total; i++ )
{
CvPoint pt = *(CvPoint*)cvGetSeqElem( point_seq, i );
CvScalar color = colors[*(int*)cvGetSeqElem( labels, i )];
cvCircle( canvas, pt, 1, color, -1 );
}
cvShowImage( "points", canvas );
}
int main( int argc, char** argv )
{
CvMemStorage* storage = cvCreateMemStorage(0);
point_seq = cvCreateSeq( CV_32SC2,
sizeof(CvSeq),
sizeof(CvPoint),
storage );
CvRNG rng = cvRNG(0xffffffff);
int width = 500, height = 500;
int i, count = 1000;
canvas = cvCreateImage( cvSize(width,height), 8, 3 );
colors = (CvScalar*)cvAlloc( count*sizeof(colors[0]) );
for( i = 0; i < count; i++ )
{
CvPoint pt;
int icolor;
pt.x = cvRandInt( &rng )
pt.y = cvRandInt( &rng )
cvSeqPush( point_seq, &pt );
icolor = cvRandInt( &rng ) | 0x00404040;
colors[i] = CV_RGB(icolor & 255,
(icolor >> 8)&255,
(icolor >> 16)&255);
}
cvNamedWindow( "points", 1 );
cvCreateTrackbar( "threshold", "points", &pos, 50, on_track );
on_track(pos);
cvWaitKey(0);
return 0;
}
..
This diff is collapsed.
This diff is collapsed.
*******************************************************
features2d. Feature Detection and Descriptor Extraction
*******************************************************
.. toctree::
:maxdepth: 2
features2d_feature_detection_and_description
Feature detection and description
=================================
.. highlight:: c
* **image** The image. Keypoints (corners) will be detected on this.
* **keypoints** Keypoints detected on the image.
* **threshold** Threshold on difference between intensity of center pixel and
pixels on circle around this pixel. See description of the algorithm.
* **nonmaxSupression** If it is true then non-maximum supression will be applied to detected corners (keypoints).
.. index:: ExtractSURF
.. _ExtractSURF:
ExtractSURF
-----------
.. cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
Extracts Speeded Up Robust Features from an image.
:param image: The input 8-bit grayscale image
:param mask: The optional input 8-bit mask. The features are only found in the areas that contain more than 50 % of non-zero mask pixels
:param keypoints: The output parameter; double pointer to the sequence of keypoints. The sequence of CvSURFPoint structures is as follows:
::
typedef struct CvSURFPoint
{
CvPoint2D32f pt; // position of the feature within the image
int laplacian; // -1, 0 or +1. sign of the laplacian at the point.
// can be used to speedup feature comparison
// (normally features with laplacians of different
// signs can not match)
int size; // size of the feature
float dir; // orientation of the feature: 0..360 degrees
float hessian; // value of the hessian (can be used to
// approximately estimate the feature strengths;
// see also params.hessianThreshold)
}
CvSURFPoint;
..
:param descriptors: The optional output parameter; double pointer to the sequence of descriptors. Depending on the params.extended value, each element of the sequence will be either a 64-element or a 128-element floating-point ( ``CV_32F`` ) vector. If the parameter is NULL, the descriptors are not computed
:param storage: Memory storage where keypoints and descriptors will be stored
:param params: Various algorithm parameters put to the structure CvSURFParams:
::
typedef struct CvSURFParams
{
int extended; // 0 means basic descriptors (64 elements each),
// 1 means extended descriptors (128 elements each)
double hessianThreshold; // only features with keypoint.hessian
// larger than that are extracted.
// good default value is ~300-500 (can depend on the
// average local contrast and sharpness of the image).
// user can further filter out some features based on
// their hessian values and other characteristics.
int nOctaves; // the number of octaves to be used for extraction.
// With each next octave the feature size is doubled
// (3 by default)
int nOctaveLayers; // The number of layers within each octave
// (4 by default)
}
CvSURFParams;
CvSURFParams cvSURFParams(double hessianThreshold, int extended=0);
// returns default parameters
..
The function cvExtractSURF finds robust features in the image, as
described in
Bay06
. For each feature it returns its location, size,
orientation and optionally the descriptor, basic or extended. The function
can be used for object tracking and localization, image stitching etc.
See the
``find_obj.cpp``
demo in OpenCV samples directory.
.. index:: GetStarKeypoints
.. _GetStarKeypoints:
GetStarKeypoints
----------------
.. cfunction:: CvSeq* cvGetStarKeypoints( const CvArr* image, CvMemStorage* storage, CvStarDetectorParams params=cvStarDetectorParams() )
Retrieves keypoints using the StarDetector algorithm.
:param image: The input 8-bit grayscale image
:param storage: Memory storage where the keypoints will be stored
:param params: Various algorithm parameters given to the structure CvStarDetectorParams:
::
typedef struct CvStarDetectorParams
{
int maxSize; // maximal size of the features detected. The following
// values of the parameter are supported:
// 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128
int responseThreshold; // threshold for the approximatd laplacian,
// used to eliminate weak features
int lineThresholdProjected; // another threshold for laplacian to
// eliminate edges
int lineThresholdBinarized; // another threshold for the feature
// scale to eliminate edges
int suppressNonmaxSize; // linear size of a pixel neighborhood
// for non-maxima suppression
}
CvStarDetectorParams;
..
The function GetStarKeypoints extracts keypoints that are local
scale-space extremas. The scale-space is constructed by computing
approximate values of laplacians with different sigma's at each
pixel. Instead of using pyramids, a popular approach to save computing
time, all of the laplacians are computed at each pixel of the original
high-resolution image. But each approximate laplacian value is computed
in O(1) time regardless of the sigma, thanks to the use of integral
images. The algorithm is based on the paper
Agrawal08
, but instead
of a square, hexagon or octagon it uses an 8-end star shape, hence the name,
consisting of overlapping upright and tilted squares.
Each computed feature is represented by the following structure:
::
typedef struct CvStarKeypoint
{
CvPoint pt; // coordinates of the feature
int size; // feature size, see CvStarDetectorParams::maxSize
float response; // the approximated laplacian value at that point.
}
CvStarKeypoint;
inline CvStarKeypoint cvStarKeypoint(CvPoint pt, int size, float response);
..
Below is the small usage sample:
::
#include "cv.h"
#include "highgui.h"
int main(int argc, char** argv)
{
const char* filename = argc > 1 ? argv[1] : "lena.jpg";
IplImage* img = cvLoadImage( filename, 0 ), *cimg;
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* keypoints = 0;
int i;
if( !img )
return 0;
cvNamedWindow( "image", 1 );
cvShowImage( "image", img );
cvNamedWindow( "features", 1 );
cimg = cvCreateImage( cvGetSize(img), 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
keypoints = cvGetStarKeypoints( img, storage, cvStarDetectorParams(45) );
for( i = 0; i < (keypoints ? keypoints->total : 0); i++ )
{
CvStarKeypoint kpt = *(CvStarKeypoint*)cvGetSeqElem(keypoints, i);
int r = kpt.size/2;
cvCircle( cimg, kpt.pt, r, CV_RGB(0,255,0));
cvLine( cimg, cvPoint(kpt.pt.x + r, kpt.pt.y + r),
cvPoint(kpt.pt.x - r, kpt.pt.y - r), CV_RGB(0,255,0));
cvLine( cimg, cvPoint(kpt.pt.x - r, kpt.pt.y + r),
cvPoint(kpt.pt.x + r, kpt.pt.y - r), CV_RGB(0,255,0));
}
cvShowImage( "features", cimg );
cvWaitKey();
}
..
*************************************
highgui. High-level GUI and Media I/O
*************************************
While OpenCV was designed for use in full-scale
applications and can be used within functionally rich UI frameworks (such as Qt, WinForms or Cocoa) or without any UI at all, sometimes there is a need to try some functionality quickly and visualize the results. This is what the HighGUI module has been designed for.
It provides easy interface to:
*
create and manipulate windows that can display images and "remember" their content (no need to handle repaint events from OS)
*
add trackbars to the windows, handle simple mouse events as well as keyboard commmands
*
read and write images to/from disk or memory.
*
read video from camera or file and write video to a file.
.. toctree::
:maxdepth: 2
highgui_user_interface
highgui_reading_and_writing_images_and_video
highgui_qt_new_functions
This diff is collapsed.
This diff is collapsed.
*************************
imgproc. Image Processing
*************************
.. toctree::
:maxdepth: 2
imgproc_histograms
imgproc_image_filtering
imgproc_geometric_image_transformations
imgproc_miscellaneous_image_transformations
imgproc_structural_analysis_and_shape_descriptors
imgproc_planar_subdivisions
imgproc_motion_analysis_and_object_tracking
imgproc_feature_detection
imgproc_object_detection
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Object Detection
================
.. highlight:: c
.. index:: MatchTemplate
.. _MatchTemplate:
MatchTemplate
-------------
.. cfunction:: void cvMatchTemplate( const CvArr* image, const CvArr* templ, CvArr* result, int method )
Compares a template against overlapped image regions.
:param image: Image where the search is running; should be 8-bit or 32-bit floating-point
:param templ: Searched template; must be not greater than the source image and the same data type as the image
:param result: A map of comparison results; single-channel 32-bit floating-point.
If ``image`` is :math:`W \times H` and ``templ`` is :math:`w \times h` then ``result`` must be :math:`(W-w+1) \times (H-h+1)`
:param method: Specifies the way the template must be compared with the image regions (see below)
The function is similar to
:ref:`CalcBackProjectPatch`
. It slides through
``image``
, compares the
overlapped patches of size
:math:`w \times h`
against
``templ``
using the specified method and stores the comparison results to
``result``
. Here are the formulas for the different comparison
methods one may use (
:math:`I`
denotes
``image``
,
:math:`T`
``template``
,
:math:`R`
``result``
). The summation is done over template and/or the
image patch:
:math:`x' = 0...w-1, y' = 0...h-1`
* method=CV\_TM\_SQDIFF
.. math::
R(x,y)= \sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2
* method=CV\_TM\_SQDIFF\_NORMED
.. math::
R(x,y)= \frac{\sum_{x',y'} (T(x',y')-I(x+x',y+y'))^2}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
* method=CV\_TM\_CCORR
.. math::
R(x,y)= \sum _{x',y'} (T(x',y') \cdot I(x+x',y+y'))
* method=CV\_TM\_CCORR\_NORMED
.. math::
R(x,y)= \frac{\sum_{x',y'} (T(x',y') \cdot I(x+x',y+y'))}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}
* method=CV\_TM\_CCOEFF
.. math::
R(x,y)= \sum _{x',y'} (T'(x',y') \cdot I'(x+x',y+y'))
where
.. math::
\begin{array}{l} T'(x',y')=T(x',y') - 1/(w \cdot h) \cdot \sum _{x'',y''} T(x'',y'') \\ I'(x+x',y+y')=I(x+x',y+y') - 1/(w \cdot h) \cdot \sum _{x'',y''} I(x+x'',y+y'') \end{array}
* method=CV\_TM\_CCOEFF\_NORMED
.. math::
R(x,y)= \frac{ \sum_{x',y'} (T'(x',y') \cdot I'(x+x',y+y')) }{ \sqrt{\sum_{x',y'}T'(x',y')^2 \cdot \sum_{x',y'} I'(x+x',y+y')^2} }
After the function finishes the comparison, the best matches can be found as global minimums (
``CV_TM_SQDIFF``
) or maximums (
``CV_TM_CCORR``
and
``CV_TM_CCOEFF``
) using the
:ref:`MinMaxLoc`
function. In the case of a color image, template summation in the numerator and each sum in the denominator is done over all of the channels (and separate mean values are used for each channel).
***************************
objdetect. Object Detection
***************************
.. toctree::
:maxdepth: 2
objdetect_cascade_classification
This diff is collapsed.
*********************
video. Video Analysis
*********************
.. toctree::
:maxdepth: 2
video_motion_analysis_and_object_tracking
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment