Commit 1523fdcc authored by Maksim Shabunin's avatar Maksim Shabunin

Doxygen documentation: more fixes and cleanups

parent f9a83c28
......@@ -146,43 +146,59 @@ if(BUILD_DOCS AND HAVE_SPHINX)
endif()
# ========= Doxygen docs =========
macro(make_reference result modules_list black_list)
set(_res)
foreach(m ${${modules_list}})
list(FIND ${black_list} ${m} _pos)
if(${_pos} EQUAL -1)
set(_res "${_res} @ref ${m} | ${m} \n")
endif()
endforeach()
set(${result} ${_res})
endmacro()
if(BUILD_DOCS AND HAVE_DOXYGEN)
# documented modules list
set(candidates)
list(APPEND candidates ${BASE_MODULES} ${EXTRA_MODULES})
# blacklisted modules
ocv_list_filterout(candidates "^ts$")
# not documented modules list
list(APPEND blacklist "ts" "java" "python2" "python3" "world")
# gathering headers
set(all_headers) # files and dirs to process
set(all_images) # image search paths
set(reflist) # modules reference
foreach(m ${candidates})
set(reflist "${reflist} \n- @subpage ${m}")
set(paths_include)
set(paths_doc)
set(paths_bib)
foreach(m ${BASE_MODULES} ${EXTRA_MODULES})
list(FIND blacklist ${m} _pos)
if(${_pos} EQUAL -1)
set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include")
if(EXISTS ${header_dir})
set(all_headers ${all_headers} ${header_dir})
if(EXISTS "${header_dir}")
list(APPEND paths_include "${header_dir}")
endif()
set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
if(EXISTS ${docs_dir})
set(all_images ${all_images} ${docs_dir})
set(all_headers ${all_headers} ${docs_dir})
if(EXISTS "${docs_dir}")
list(APPEND paths_doc "${docs_dir}")
file(GLOB bib_file "${docs_dir}" "*.bib")
if(EXISTS "${bib_file}")
list(APPEND paths_bib "${bib_file}")
endif()
endif()
endif()
endforeach()
# additional config
set(doxyfile "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile")
set(rootfile "${CMAKE_CURRENT_BINARY_DIR}/root.markdown")
set(all_headers ${all_headers} ${rootfile})
string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_INPUT_LIST "${all_headers}")
string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_IMAGE_PATH "${all_images}")
set(bibfile "${CMAKE_CURRENT_SOURCE_DIR}/opencv.bib")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${paths_include} ; ${paths_doc}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp ; ${paths_doc}")
set(CMAKE_DOXYGEN_LAYOUT "${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml")
set(CMAKE_DOXYGEN_OUTPUT_PATH "doxygen")
set(CMAKE_DOXYGEN_MODULES_REFERENCE "${reflist}")
set(CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp")
set(CMAKE_EXTRA_BIB_FILES "${bibfile} ${paths_bib}")
# generate references
make_reference(CMAKE_DOXYGEN_MAIN_REFERENCE BASE_MODULES blacklist)
make_reference(CMAKE_DOXYGEN_EXTRA_REFERENCE EXTRA_MODULES blacklist)
# writing file
configure_file(Doxyfile.in ${doxyfile} @ONLY)
......
......@@ -85,7 +85,7 @@ SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE = @CMAKE_DOXYGEN_LAYOUT@
CITE_BIB_FILES = @CMAKE_CURRENT_SOURCE_DIR@/opencv.bib
CITE_BIB_FILES = @CMAKE_EXTRA_BIB_FILES@
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
......@@ -222,6 +222,7 @@ INCLUDE_FILE_PATTERNS =
PREDEFINED = __cplusplus=1 \
HAVE_IPP_A=1 \
CVAPI(x)=x \
CV_DOXYGEN= \
CV_EXPORTS= \
CV_EXPORTS_W= \
CV_EXPORTS_W_SIMPLE= \
......
......@@ -3,9 +3,14 @@ OpenCV modules {#mainpage}
@subpage intro
Module name | Folder
------------- | -------------
@ref core | core
@ref imgproc | imgproc
### Main modules
<!-- @CMAKE_DOXYGEN_MODULES_REFERENCE@ -->
Module name | Folder
-------------- | -------------
@CMAKE_DOXYGEN_MAIN_REFERENCE@
### Extra modules
Module name | Folder
-------------- | -------------
@CMAKE_DOXYGEN_EXTRA_REFERENCE@
......@@ -3,6 +3,12 @@
#include <camera_properties.h>
/** @defgroup androidcamera Android Camera Support
*/
//! @addtogroup androidcamera
//! @{
class CameraActivity
{
public:
......@@ -44,4 +50,6 @@ private:
int frameHeight;
};
//! @}
#endif
#ifndef CAMERA_PROPERTIES_H
#define CAMERA_PROPERTIES_H
//! @addtogroup androidcamera
//! @{
enum {
ANDROID_CAMERA_PROPERTY_FRAMEWIDTH = 0,
ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT = 1,
......@@ -67,4 +70,6 @@ enum {
ANDROID_CAMERA_FOCUS_DISTANCE_FAR_INDEX
};
//! @}
#endif // CAMERA_PROPERTIES_H
This diff is collapsed.
......@@ -326,11 +326,11 @@ CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCou
Its interface is also Mat-like but with additional memory type parameters.
- **PAGE\_LOCKED** sets a page locked memory type used commonly for fast and asynchronous
- **PAGE_LOCKED** sets a page locked memory type used commonly for fast and asynchronous
uploading/downloading data from/to GPU.
- **SHARED** specifies a zero copy memory allocation that enables mapping the host memory to GPU
address space, if supported.
- **WRITE\_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are
- **WRITE_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are
used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache
utilization.
......@@ -581,7 +581,7 @@ class CV_EXPORTS TargetArchs
public:
/** @brief The following method checks whether the module was built with the support of the given feature:
@param feature\_set Features to be checked. See :ocvcuda::FeatureSet.
@param feature_set Features to be checked. See :ocvcuda::FeatureSet.
*/
static bool builtWith(FeatureSet feature_set);
......@@ -611,9 +611,9 @@ public:
/** @brief The constructors.
@param device\_id System index of the CUDA device starting with 0.
@param device_id System index of the CUDA device starting with 0.
Constructs the DeviceInfo object for the specified device. If device\_id parameter is missed, it
Constructs the DeviceInfo object for the specified device. If device_id parameter is missed, it
constructs an object for the current device.
*/
DeviceInfo(int device_id);
......@@ -793,7 +793,7 @@ public:
/** @brief Provides information on CUDA feature support.
@param feature\_set Features to be checked. See cuda::FeatureSet.
@param feature_set Features to be checked. See cuda::FeatureSet.
This function returns true if the device has the specified CUDA feature. Otherwise, it returns false
*/
......
......@@ -66,7 +66,7 @@ namespace cv
class Stream;
class Event;
/** @brief Class that enables getting cudaStream\_t from cuda::Stream
/** @brief Class that enables getting cudaStream_t from cuda::Stream
because it is the only public header that depends on the CUDA Runtime API. Including it
brings a dependency to your code.
......
......@@ -83,11 +83,11 @@ as possible.
@note
- An example applying the HOG descriptor for people detection can be found at
opencv\_source\_code/samples/cpp/peopledetect.cpp
opencv_source_code/samples/cpp/peopledetect.cpp
- A CUDA example applying the HOG descriptor for people detection can be found at
opencv\_source\_code/samples/gpu/hog.cpp
opencv_source_code/samples/gpu/hog.cpp
- (Python) An example applying the HOG descriptor for people detection can be found at
opencv\_source\_code/samples/python2/peopledetect.py
opencv_source_code/samples/python2/peopledetect.py
*/
struct CV_EXPORTS HOGDescriptor
{
......@@ -97,14 +97,14 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Creates the HOG descriptor and detector.
@param win\_size Detection window size. Align to block size and block stride.
@param block\_size Block size in pixels. Align to cell size. Only (16,16) is supported for now.
@param block\_stride Block stride. It must be a multiple of cell size.
@param cell\_size Cell size. Only (8, 8) is supported for now.
@param win_size Detection window size. Align to block size and block stride.
@param block_size Block size in pixels. Align to cell size. Only (16,16) is supported for now.
@param block_stride Block stride. It must be a multiple of cell size.
@param cell_size Cell size. Only (8, 8) is supported for now.
@param nbins Number of bins. Only 9 bins per cell are supported for now.
@param win\_sigma Gaussian smoothing window parameter.
@param threshold\_L2hys L2-Hys normalization method shrinkage.
@param gamma\_correction Flag to specify whether the gamma correction preprocessing is required or
@param win_sigma Gaussian smoothing window parameter.
@param threshold_L2hys L2-Hys normalization method shrinkage.
@param gamma_correction Flag to specify whether the gamma correction preprocessing is required or
not.
@param nlevels Maximum number of detection window increases.
*/
......@@ -137,13 +137,13 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Performs object detection without a multi-scale window.
@param img Source image. CV\_8UC1 and CV\_8UC4 types are supported for now.
@param found\_locations Left-top corner points of detected objects boundaries.
@param hit\_threshold Threshold for the distance between features and SVM classifying plane.
@param img Source image. CV_8UC1 and CV_8UC4 types are supported for now.
@param found_locations Left-top corner points of detected objects boundaries.
@param hit_threshold Threshold for the distance between features and SVM classifying plane.
Usually it is 0 and should be specfied in the detector coefficients (as the last free
coefficient). But if the free coefficient is omitted (which is allowed), you can specify it
manually here.
@param win\_stride Window stride. It must be a multiple of block stride.
@param win_stride Window stride. It must be a multiple of block stride.
@param padding Mock parameter to keep the CPU interface compatibility. It must be (0,0).
*/
void detect(const GpuMat& img, std::vector<Point>& found_locations,
......@@ -153,13 +153,13 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Performs object detection with a multi-scale window.
@param img Source image. See cuda::HOGDescriptor::detect for type limitations.
@param found\_locations Detected objects boundaries.
@param hit\_threshold Threshold for the distance between features and SVM classifying plane. See
@param found_locations Detected objects boundaries.
@param hit_threshold Threshold for the distance between features and SVM classifying plane. See
cuda::HOGDescriptor::detect for details.
@param win\_stride Window stride. It must be a multiple of block stride.
@param win_stride Window stride. It must be a multiple of block stride.
@param padding Mock parameter to keep the CPU interface compatibility. It must be (0,0).
@param scale0 Coefficient of the detection window increase.
@param group\_threshold Coefficient to regulate the similarity threshold. When detected, some
@param group_threshold Coefficient to regulate the similarity threshold. When detected, some
objects can be covered by many rectangles. 0 means not to perform grouping. See groupRectangles .
*/
void detectMultiScale(const GpuMat& img, std::vector<Rect>& found_locations,
......@@ -177,11 +177,11 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Returns block descriptors computed for the whole image.
@param img Source image. See cuda::HOGDescriptor::detect for type limitations.
@param win\_stride Window stride. It must be a multiple of block stride.
@param win_stride Window stride. It must be a multiple of block stride.
@param descriptors 2D array of descriptors.
@param descr\_format Descriptor storage format:
- **DESCR\_FORMAT\_ROW\_BY\_ROW** - Row-major order.
- **DESCR\_FORMAT\_COL\_BY\_COL** - Column-major order.
@param descr_format Descriptor storage format:
- **DESCR_FORMAT_ROW_BY_ROW** - Row-major order.
- **DESCR_FORMAT_COL_BY_COL** - Column-major order.
The function is mainly used to learn the classifier.
*/
......@@ -236,9 +236,9 @@ protected:
@note
- A cascade classifier example can be found at
opencv\_source\_code/samples/gpu/cascadeclassifier.cpp
opencv_source_code/samples/gpu/cascadeclassifier.cpp
- A Nvidea API specific cascade classifier example can be found at
opencv\_source\_code/samples/gpu/cascadeclassifier\_nvidia\_api.cpp
opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
*/
class CV_EXPORTS CascadeClassifier_CUDA
{
......@@ -271,7 +271,7 @@ public:
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.2, int minNeighbors = 4, Size minSize = Size());
/** @brief Detects objects of different sizes in the input image.
@param image Matrix of type CV\_8U containing an image where objects should be detected.
@param image Matrix of type CV_8U containing an image where objects should be detected.
@param objectsBuf Buffer to store detected objects (rectangles). If it is empty, it is allocated
with the default size. If not empty, the function searches not more than N objects, where
N = sizeof(objectsBufer's data)/sizeof(cv::Rect).
......@@ -364,15 +364,15 @@ CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tve
@param object Single-row matrix of object points.
@param image Single-row matrix of image points.
@param camera\_mat 3x3 matrix of intrinsic camera parameters.
@param dist\_coef Distortion coefficients. See undistortPoints for details.
@param camera_mat 3x3 matrix of intrinsic camera parameters.
@param dist_coef Distortion coefficients. See undistortPoints for details.
@param rvec Output 3D rotation vector.
@param tvec Output 3D translation vector.
@param use\_extrinsic\_guess Flag to indicate that the function must use rvec and tvec as an
@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an
initial transformation guess. It is not supported for now.
@param num\_iters Maximum number of RANSAC iterations.
@param max\_dist Euclidean distance threshold to detect whether point is inlier or not.
@param min\_inlier\_count Flag to indicate that the function must stop if greater or equal number
@param num_iters Maximum number of RANSAC iterations.
@param max_dist Euclidean distance threshold to detect whether point is inlier or not.
@param min_inlier_count Flag to indicate that the function must stop if greater or equal number
of inliers is achieved. It is not supported for now.
@param inliers Output vector of inlier indices.
*/
......
......@@ -75,7 +75,7 @@ class implements algorithm described in @cite MOG2001.
@note
- An example on gaussian mixture based background/foreground segmantation can be found at
opencv\_source\_code/samples/gpu/bgfg\_segm.cpp
opencv_source_code/samples/gpu/bgfg_segm.cpp
*/
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
{
......@@ -216,7 +216,7 @@ class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
public:
/** @brief Returns the output foreground regions calculated by findContours.
@param foreground\_regions Output array (CPU memory).
@param foreground_regions Output array (CPU memory).
*/
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
......
......@@ -170,7 +170,7 @@ The implementation uses H264 video codec.
@note
- An example on how to use the videoWriter class can be found at
opencv\_source\_code/samples/gpu/video\_writer.cpp
opencv_source_code/samples/gpu/video_writer.cpp
*/
class CV_EXPORTS VideoWriter
{
......@@ -195,8 +195,8 @@ public:
@param fileName Name of the output video file. Only AVI file format is supported.
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
The constructors initialize video writer. FFMPEG is used to write videos. User can implement own
......@@ -208,8 +208,8 @@ CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frame
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param params Encoder parameters. See cudacodec::EncoderParams .
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
......@@ -219,8 +219,8 @@ CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frame
want to work with raw video stream.
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
......@@ -230,8 +230,8 @@ want to work with raw video stream.
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param params Encoder parameters. See cudacodec::EncoderParams .
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
......@@ -282,7 +282,7 @@ struct FormatInfo
@note
- An example on how to use the videoReader class can be found at
opencv\_source\_code/samples/gpu/video\_reader.cpp
opencv_source_code/samples/gpu/video_reader.cpp
*/
class CV_EXPORTS VideoReader
{
......
......@@ -68,7 +68,7 @@ For each descriptor in the first set, this matcher finds the closest descriptor
by trying each one. This descriptor matcher supports masking permissible matches between descriptor
sets.
The class BFMatcher\_CUDA has an interface similar to the class DescriptorMatcher. It has two groups
The class BFMatcher_CUDA has an interface similar to the class DescriptorMatcher. It has two groups
of match methods: for matching descriptors of one image with another image or with an image set.
Also, all functions have an alternative to save results either to the GPU memory or to the CPU
memory.
......@@ -250,7 +250,7 @@ public:
@param nonmaxSuppression If it is true, non-maximum suppression is applied to detected corners
(keypoints).
@param keypointsRatio Inner buffer size for keypoints store is determined as (keypointsRatio \*
image\_width \* image\_height).
image_width \* image_height).
*/
explicit FAST_CUDA(int threshold, bool nonmaxSuppression = true, double keypointsRatio = 0.05);
......@@ -261,8 +261,8 @@ public:
@param mask Optional input mask that marks the regions where we should detect features.
@param keypoints The output vector of keypoints. Can be stored both in CPU and GPU memory. For GPU
memory:
- keypoints.ptr\<Vec2s\>(LOCATION\_ROW)[i] will contain location of i'th point
- keypoints.ptr\<float\>(RESPONSE\_ROW)[i] will contain response of i'th point (if non-maximum
- keypoints.ptr\<Vec2s\>(LOCATION_ROW)[i] will contain location of i'th point
- keypoints.ptr\<float\>(RESPONSE_ROW)[i] will contain response of i'th point (if non-maximum
suppression is applied)
*/
void operator ()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints);
......@@ -363,12 +363,12 @@ public:
@param mask Optional input mask that marks the regions where we should detect features.
@param keypoints The input/output vector of keypoints. Can be stored both in CPU and GPU memory.
For GPU memory:
- keypoints.ptr\<float\>(X\_ROW)[i] contains x coordinate of the i'th feature.
- keypoints.ptr\<float\>(Y\_ROW)[i] contains y coordinate of the i'th feature.
- keypoints.ptr\<float\>(RESPONSE\_ROW)[i] contains the response of the i'th feature.
- keypoints.ptr\<float\>(ANGLE\_ROW)[i] contains orientation of the i'th feature.
- keypoints.ptr\<float\>(OCTAVE\_ROW)[i] contains the octave of the i'th feature.
- keypoints.ptr\<float\>(SIZE\_ROW)[i] contains the size of the i'th feature.
- keypoints.ptr\<float\>(X_ROW)[i] contains x coordinate of the i'th feature.
- keypoints.ptr\<float\>(Y_ROW)[i] contains y coordinate of the i'th feature.
- keypoints.ptr\<float\>(RESPONSE_ROW)[i] contains the response of the i'th feature.
- keypoints.ptr\<float\>(ANGLE_ROW)[i] contains orientation of the i'th feature.
- keypoints.ptr\<float\>(OCTAVE_ROW)[i] contains the octave of the i'th feature.
- keypoints.ptr\<float\>(SIZE_ROW)[i] contains the size of the i'th feature.
@param descriptors Computed descriptors. if blurForDescriptor is true, image will be blurred
before descriptors calculation.
*/
......
......@@ -60,7 +60,7 @@ filtering operations on 2D images.
@note
- An example containing all basic morphology operators like erode and dilate can be found at
opencv\_source\_code/samples/gpu/morphology.cpp
opencv_source_code/samples/gpu/morphology.cpp
@}
*/
......@@ -89,7 +89,7 @@ public:
/** @brief Creates a normalized 2D box filter.
@param srcType Input image type. Only CV\_8UC1 and CV\_8UC4 are supported for now.
@param srcType Input image type. Only CV_8UC1 and CV_8UC4 are supported for now.
@param dstType Output image type. Only the same type as src is supported for now.
@param ksize Kernel size.
@param anchor Anchor point. The default value Point(-1, -1) means that the anchor is at the kernel
......@@ -107,7 +107,7 @@ CV_EXPORTS Ptr<Filter> createBoxFilter(int srcType, int dstType, Size ksize, Poi
/** @brief Creates a non-separable linear 2D filter.
@param srcType Input image type. Supports CV\_8U , CV\_16U and CV\_32F one and four channel image.
@param srcType Input image type. Supports CV_8U , CV_16U and CV_32F one and four channel image.
@param dstType Output image type. Only the same type as src is supported for now.
@param kernel 2D array of filter coefficients.
@param anchor Anchor point. The default value Point(-1, -1) means that the anchor is at the kernel
......@@ -125,7 +125,7 @@ CV_EXPORTS Ptr<Filter> createLinearFilter(int srcType, int dstType, InputArray k
/** @brief Creates a Laplacian operator.
@param srcType Input image type. Supports CV\_8U , CV\_16U and CV\_32F one and four channel image.
@param srcType Input image type. Supports CV_8U , CV_16U and CV_32F one and four channel image.
@param dstType Output image type. Only the same type as src is supported for now.
@param ksize Aperture size used to compute the second-derivative filters (see getDerivKernels). It
must be positive and odd. Only ksize = 1 and ksize = 3 are supported.
......@@ -243,14 +243,14 @@ CV_EXPORTS Ptr<Filter> createGaussianFilter(int srcType, int dstType, Size ksize
/** @brief Creates a 2D morphological filter.
@param op Type of morphological operation. The following types are possible:
- **MORPH\_ERODE** erode
- **MORPH\_DILATE** dilate
- **MORPH\_OPEN** opening
- **MORPH\_CLOSE** closing
- **MORPH\_GRADIENT** morphological gradient
- **MORPH\_TOPHAT** "top hat"
- **MORPH\_BLACKHAT** "black hat"
@param srcType Input/output image type. Only CV\_8UC1 and CV\_8UC4 are supported.
- **MORPH_ERODE** erode
- **MORPH_DILATE** dilate
- **MORPH_OPEN** opening
- **MORPH_CLOSE** closing
- **MORPH_GRADIENT** morphological gradient
- **MORPH_TOPHAT** "top hat"
- **MORPH_BLACKHAT** "black hat"
@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
@param kernel 2D 8-bit structuring element for the morphological operation.
@param anchor Anchor position within the structuring element. Negative values mean that the anchor
is at the center.
......@@ -265,7 +265,7 @@ CV_EXPORTS Ptr<Filter> createMorphologyFilter(int op, int srcType, InputArray ke
/** @brief Creates the maximum filter.
@param srcType Input/output image type. Only CV\_8UC1 and CV\_8UC4 are supported.
@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
......@@ -277,7 +277,7 @@ CV_EXPORTS Ptr<Filter> createBoxMaxFilter(int srcType, Size ksize,
/** @brief Creates the minimum filter.
@param srcType Input/output image type. Only CV\_8UC1 and CV\_8UC4 are supported.
@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
......@@ -292,8 +292,8 @@ CV_EXPORTS Ptr<Filter> createBoxMinFilter(int srcType, Size ksize,
/** @brief Creates a horizontal 1D box filter.
@param srcType Input image type. Only CV\_8UC1 type is supported for now.
@param dstType Output image type. Only CV\_32FC1 type is supported for now.
@param srcType Input image type. Only CV_8UC1 type is supported for now.
@param dstType Output image type. Only CV_32FC1 type is supported for now.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
......@@ -303,8 +303,8 @@ CV_EXPORTS Ptr<Filter> createRowSumFilter(int srcType, int dstType, int ksize, i
/** @brief Creates a vertical 1D box filter.
@param srcType Input image type. Only CV\_8UC1 type is supported for now.
@param dstType Output image type. Only CV\_32FC1 type is supported for now.
@param srcType Input image type. Only CV_8UC1 type is supported for now.
@param dstType Output image type. Only CV_32FC1 type is supported for now.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
......
......@@ -110,7 +110,7 @@ iterative Lucas-Kanade method with pyramids.
@note
- An example of the Lucas Kanade optical flow algorithm can be found at
opencv\_source\_code/samples/gpu/pyrlk\_optical\_flow.cpp
opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
*/
class CV_EXPORTS PyrLKOpticalFlow
{
......@@ -122,13 +122,13 @@ public:
@param prevImg First 8-bit input image (supports both grayscale and color images).
@param nextImg Second input image of the same size and the same type as prevImg .
@param prevPts Vector of 2D points for which the flow needs to be found. It must be one row matrix
with CV\_32FC2 type.
with CV_32FC2 type.
@param nextPts Output vector of 2D points (with single-precision floating-point coordinates)
containing the calculated new positions of input features in the second image. When useInitialFlow
is true, the vector must have the same size as in the input.
@param status Output status vector (CV\_8UC1 type). Each element of the vector is set to 1 if the
@param status Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the
flow for the corresponding features has been found. Otherwise, it is set to 0.
@param err Output vector (CV\_32FC1 type) that contains the difference between patches around the
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
......@@ -145,7 +145,7 @@ public:
floating-point, single-channel
@param v Vertical component of the optical flow of the same size as input images, 32-bit
floating-point, single-channel
@param err Output vector (CV\_32FC1 type) that contains the difference between patches around the
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
*/
......@@ -374,7 +374,7 @@ private:
@param bv Backward vertical displacement.
@param pos New frame position.
@param newFrame Output image.
@param buf Temporary buffer, will have width x 6\*height size, CV\_32FC1 type and contain 6
@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
interpolated backward vertical flow.
......
......@@ -106,7 +106,7 @@ The class implements algorithm described in @cite Felzenszwalb2006 . It can comp
\f[width\_step \cdot height \cdot ndisp \cdot (1 + 0.25 + 0.0625 + \dotsm + \frac{1}{4^{levels}})\f]
width\_step is the number of bytes in a line including padding.
width_step is the number of bytes in a line including padding.
StereoBeliefPropagation uses a truncated linear model for the data cost and discontinuity terms:
......@@ -116,8 +116,8 @@ StereoBeliefPropagation uses a truncated linear model for the data cost and disc
For more details, see @cite Felzenszwalb2006.
By default, StereoBeliefPropagation uses floating-point arithmetics and the CV\_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV\_16SC1 message type for better
By default, StereoBeliefPropagation uses floating-point arithmetics and the CV_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
performance. To avoid an overflow in this case, the parameters must satisfy the following
requirement:
......@@ -135,9 +135,9 @@ public:
/** @brief Enables the stereo correspondence operator that finds the disparity for the specified data cost.
@param data User-specified data cost, a matrix of msg\_type type and
@param data User-specified data cost, a matrix of msg_type type and
Size(\<image columns\>\*ndisp, \<image rows\>) size.
@param disparity Output disparity map. If disparity is empty, the output type is CV\_16SC1 .
@param disparity Output disparity map. If disparity is empty, the output type is CV_16SC1 .
Otherwise, the type is retained.
@param stream Stream for the asynchronous version.
*/
......@@ -182,7 +182,7 @@ public:
@param ndisp Number of disparities.
@param iters Number of BP iterations on each level.
@param levels Number of levels.
@param msg\_type Type for messages. CV\_16SC1 and CV\_32FC1 types are supported.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoBeliefPropagation>
createStereoBeliefPropagation(int ndisp = 64, int iters = 5, int levels = 5, int msg_type = CV_32F);
......@@ -195,7 +195,7 @@ CV_EXPORTS Ptr<cuda::StereoBeliefPropagation>
The class implements algorithm described in @cite Yang2010. StereoConstantSpaceBP supports both local
minimum and global minimum data cost initialization algorithms. For more details, see the paper
mentioned above. By default, a local algorithm is used. To enable a global algorithm, set
use\_local\_init\_data\_cost to false .
use_local_init_data_cost to false .
StereoConstantSpaceBP uses a truncated linear model for the data cost and discontinuity terms:
......@@ -205,8 +205,8 @@ StereoConstantSpaceBP uses a truncated linear model for the data cost and discon
For more details, see @cite Yang2010.
By default, StereoConstantSpaceBP uses floating-point arithmetics and the CV\_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV\_16SC1 message type for better
By default, StereoConstantSpaceBP uses floating-point arithmetics and the CV_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
performance. To avoid an overflow in this case, the parameters must satisfy the following
requirement:
......@@ -234,8 +234,8 @@ public:
@param ndisp Number of disparities.
@param iters Number of BP iterations on each level.
@param levels Number of levels.
@param nr\_plane Number of disparity levels on the first level.
@param msg\_type Type for messages. CV\_16SC1 and CV\_32FC1 types are supported.
@param nr_plane Number of disparity levels on the first level.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoConstantSpaceBP>
createStereoConstantSpaceBP(int ndisp = 128, int iters = 8, int levels = 4, int nr_plane = 4, int msg_type = CV_32F);
......@@ -252,8 +252,8 @@ class CV_EXPORTS DisparityBilateralFilter : public cv::Algorithm
public:
/** @brief Refines a disparity map using joint bilateral filtering.
@param disparity Input disparity map. CV\_8UC1 and CV\_16SC1 types are supported.
@param image Input image. CV\_8UC1 and CV\_8UC3 types are supported.
@param disparity Input disparity map. CV_8UC1 and CV_16SC1 types are supported.
@param image Input image. CV_8UC1 and CV_8UC3 types are supported.
@param dst Destination disparity map. It has the same size and type as disparity .
@param stream Stream for the asynchronous version.
*/
......@@ -295,12 +295,12 @@ CV_EXPORTS Ptr<cuda::DisparityBilateralFilter>
/** @brief Reprojects a disparity image to 3D space.
@param disp Input disparity image. CV\_8U and CV\_16S types are supported.
@param disp Input disparity image. CV_8U and CV_16S types are supported.
@param xyzw Output 3- or 4-channel floating-point image of the same size as disp . Each element of
xyzw(x,y) contains 3D coordinates (x,y,z) or (x,y,z,1) of the point (x,y) , computed from the
disparity map.
@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained via stereoRectify .
@param dst\_cn The number of channels for output image. Can be 3 or 4.
@param dst_cn The number of channels for output image. Can be 3 or 4.
@param stream Stream for the asynchronous version.
@sa reprojectImageTo3D
......@@ -309,8 +309,8 @@ CV_EXPORTS void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray
/** @brief Colors a disparity image.
@param src\_disp Source disparity image. CV\_8UC1 and CV\_16SC1 types are supported.
@param dst\_disp Output disparity image. It has the same size as src\_disp . The type is CV\_8UC4
@param src_disp Source disparity image. CV_8UC1 and CV_16SC1 types are supported.
@param dst_disp Output disparity image. It has the same size as src_disp . The type is CV_8UC4
in BGRA format (alpha = 255).
@param ndisp Number of disparities.
@param stream Stream for the asynchronous version.
......
......@@ -66,12 +66,12 @@ namespace cv { namespace cuda {
@param src Source image.
@param dst Destination image with the size the same as xmap and the type the same as src .
@param xmap X values. Only CV\_32FC1 type is supported.
@param ymap Y values. Only CV\_32FC1 type is supported.
@param interpolation Interpolation method (see resize ). INTER\_NEAREST , INTER\_LINEAR and
INTER\_CUBIC are supported for now.
@param borderMode Pixel extrapolation method (see borderInterpolate ). BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param xmap X values. Only CV_32FC1 type is supported.
@param ymap Y values. Only CV_32FC1 type is supported.
@param interpolation Interpolation method (see resize ). INTER_NEAREST , INTER_LINEAR and
INTER_CUBIC are supported for now.
@param borderMode Pixel extrapolation method (see borderInterpolate ). BORDER_REFLECT101 ,
BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
@param borderValue Value used in case of a constant border. By default, it is 0.
@param stream Stream for the asynchronous version.
......@@ -99,7 +99,7 @@ Either dsize or both fx and fy must be non-zero.
\f[\texttt{(double)dsize.width/src.cols}\f]
@param fy Scale factor along the vertical axis. If it is zero, it is computed as:
\f[\texttt{(double)dsize.height/src.rows}\f]
@param interpolation Interpolation method. INTER\_NEAREST , INTER\_LINEAR and INTER\_CUBIC are
@param interpolation Interpolation method. INTER_NEAREST , INTER_LINEAR and INTER_CUBIC are
supported for now.
@param stream Stream for the asynchronous version.
......@@ -109,14 +109,14 @@ CV_EXPORTS void resize(InputArray src, OutputArray dst, Size dsize, double fx=0,
/** @brief Applies an affine transformation to an image.
@param src Source image. CV\_8U , CV\_16U , CV\_32S , or CV\_32F depth and 1, 3, or 4 channels are
@param src Source image. CV_8U , CV_16U , CV_32S , or CV_32F depth and 1, 3, or 4 channels are
supported.
@param dst Destination image with the same type as src . The size is dsize .
@param M *2x3* transformation matrix.
@param dsize Size of the destination image.
@param flags Combination of interpolation methods (see resize) and the optional flag
WARP\_INVERSE\_MAP specifying that M is an inverse transformation ( dst=\>src ). Only
INTER\_NEAREST , INTER\_LINEAR , and INTER\_CUBIC interpolation methods are supported.
WARP_INVERSE_MAP specifying that M is an inverse transformation ( dst=\>src ). Only
INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are supported.
@param borderMode
@param borderValue
@param stream Stream for the asynchronous version.
......@@ -131,8 +131,8 @@ CV_EXPORTS void warpAffine(InputArray src, OutputArray dst, InputArray M, Size d
@param M *2x3* transformation matrix.
@param inverse Flag specifying that M is an inverse transformation ( dst=\>src ).
@param dsize Size of the destination image.
@param xmap X values with CV\_32FC1 type.
@param ymap Y values with CV\_32FC1 type.
@param xmap X values with CV_32FC1 type.
@param ymap Y values with CV_32FC1 type.
@param stream Stream for the asynchronous version.
@sa cuda::warpAffine , cuda::remap
......@@ -141,14 +141,14 @@ CV_EXPORTS void buildWarpAffineMaps(InputArray M, bool inverse, Size dsize, Outp
/** @brief Applies a perspective transformation to an image.
@param src Source image. CV\_8U , CV\_16U , CV\_32S , or CV\_32F depth and 1, 3, or 4 channels are
@param src Source image. CV_8U , CV_16U , CV_32S , or CV_32F depth and 1, 3, or 4 channels are
supported.
@param dst Destination image with the same type as src . The size is dsize .
@param M *3x3* transformation matrix.
@param dsize Size of the destination image.
@param flags Combination of interpolation methods (see resize ) and the optional flag
WARP\_INVERSE\_MAP specifying that M is the inverse transformation ( dst =\> src ). Only
INTER\_NEAREST , INTER\_LINEAR , and INTER\_CUBIC interpolation methods are supported.
WARP_INVERSE_MAP specifying that M is the inverse transformation ( dst =\> src ). Only
INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are supported.
@param borderMode
@param borderValue
@param stream Stream for the asynchronous version.
......@@ -163,8 +163,8 @@ CV_EXPORTS void warpPerspective(InputArray src, OutputArray dst, InputArray M, S
@param M *3x3* transformation matrix.
@param inverse Flag specifying that M is an inverse transformation ( dst=\>src ).
@param dsize Size of the destination image.
@param xmap X values with CV\_32FC1 type.
@param ymap Y values with CV\_32FC1 type.
@param xmap X values with CV_32FC1 type.
@param ymap Y values with CV_32FC1 type.
@param stream Stream for the asynchronous version.
@sa cuda::warpPerspective , cuda::remap
......@@ -188,14 +188,14 @@ CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, InputArray K
/** @brief Rotates an image around the origin (0,0) and then shifts it.
@param src Source image. Supports 1, 3 or 4 channels images with CV\_8U , CV\_16U or CV\_32F
@param src Source image. Supports 1, 3 or 4 channels images with CV_8U , CV_16U or CV_32F
depth.
@param dst Destination image with the same type as src . The size is dsize .
@param dsize Size of the destination image.
@param angle Angle of rotation in degrees.
@param xShift Shift along the horizontal axis.
@param yShift Shift along the vertical axis.
@param interpolation Interpolation method. Only INTER\_NEAREST , INTER\_LINEAR , and INTER\_CUBIC
@param interpolation Interpolation method. Only INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC
are supported.
@param stream Stream for the asynchronous version.
......
......@@ -59,11 +59,11 @@ implement vector descriptor matchers inherit the DescriptorMatcher interface.
@note
- An example explaining keypoint matching can be found at
opencv\_source\_code/samples/cpp/descriptor\_extractor\_matcher.cpp
opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
- An example on descriptor matching evaluation can be found at
opencv\_source\_code/samples/cpp/detector\_descriptor\_matcher\_evaluation.cpp
opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
- An example on one to many image matching can be found at
opencv\_source\_code/samples/cpp/matching\_to\_many\_images.cpp
opencv_source_code/samples/cpp/matching_to_many_images.cpp
@defgroup features2d_draw Drawing Function of Keypoints and Matches
@defgroup features2d_category Object Categorization
......@@ -72,9 +72,9 @@ This section describes approaches based on local 2D features and used to categor
@note
- A complete Bag-Of-Words sample can be found at
opencv\_source\_code/samples/cpp/bagofwords\_classification.cpp
opencv_source_code/samples/cpp/bagofwords_classification.cpp
- (Python) An example using the features2D framework to perform object categorization can be
found at opencv\_source\_code/samples/python2/find\_obj.py
found at opencv_source_code/samples/python2/find_obj.py
@}
*/
......@@ -265,22 +265,22 @@ public:
will mean that to cover certain scale range you will need more pyramid levels and so the speed
will suffer.
@param nlevels The number of pyramid levels. The smallest level will have linear size equal to
input\_image\_linear\_size/pow(scaleFactor, nlevels).
input_image_linear_size/pow(scaleFactor, nlevels).
@param edgeThreshold This is size of the border where the features are not detected. It should
roughly match the patchSize parameter.
@param firstLevel It should be 0 in the current implementation.
@param WTA\_K The number of points that produce each element of the oriented BRIEF descriptor. The
@param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The
default value 2 means the BRIEF where we take a random point pair and compare their brightnesses,
so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3
random points (of course, those point coordinates are random, but they are generated from the
pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel
rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such
output will occupy 2 bits, and therefore it will need a special variant of Hamming distance,
denoted as NORM\_HAMMING2 (2 bits per bin). When WTA\_K=4, we take 4 random points to compute each
denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each
bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
@param scoreType The default HARRIS\_SCORE means that Harris algorithm is used to rank features
@param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features
(the score is written to KeyPoint::score and is used to retain best nfeatures features);
FAST\_SCORE is alternative value of the parameter that produces slightly less stable keypoints,
FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints,
but it is a little faster to compute.
@param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller
pyramid layers the perceived image area covered by a feature will be larger.
......@@ -325,7 +325,7 @@ The class encapsulates all the parameters of the MSER extraction algorithm (see
@note
- (Python) A complete example showing the use of the MSER detector can be found at
opencv\_source\_code/samples/python2/mser.py
opencv_source_code/samples/python2/mser.py
*/
class CV_EXPORTS_W MSER : public Feature2D
{
......@@ -366,13 +366,13 @@ circle around this pixel.
@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners
(keypoints).
@param type one of the three neighborhoods as defined in the paper:
FastFeatureDetector::TYPE\_9\_16, FastFeatureDetector::TYPE\_7\_12,
FastFeatureDetector::TYPE\_5\_8
FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12,
FastFeatureDetector::TYPE_5_8
Detects corners using the FAST algorithm by @cite Rosten06.
@note In Python API, types are given as cv2.FAST\_FEATURE\_DETECTOR\_TYPE\_5\_8,
cv2.FAST\_FEATURE\_DETECTOR\_TYPE\_7\_12 and cv2.FAST\_FEATURE\_DETECTOR\_TYPE\_9\_16. For corner
@note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8,
cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner
detection, use cv2.FAST.detect() method.
*/
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
......@@ -529,8 +529,8 @@ public:
@param threshold Detector response threshold to accept point
@param nOctaves Maximum octave evolution of the image
@param nOctaveLayers Default number of sublevels per scale level
@param diffusivity Diffusivity type. DIFF\_PM\_G1, DIFF\_PM\_G2, DIFF\_WEICKERT or
DIFF\_CHARBONNIER
@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
DIFF_CHARBONNIER
*/
CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,
float threshold = 0.001f,
......@@ -577,15 +577,15 @@ public:
/** @brief The AKAZE constructor
@param descriptor\_type Type of the extracted descriptor: DESCRIPTOR\_KAZE,
DESCRIPTOR\_KAZE\_UPRIGHT, DESCRIPTOR\_MLDB or DESCRIPTOR\_MLDB\_UPRIGHT.
@param descriptor\_size Size of the descriptor in bits. 0 -\> Full size
@param descriptor\_channels Number of channels in the descriptor (1, 2, 3)
@param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
@param descriptor_size Size of the descriptor in bits. 0 -\> Full size
@param descriptor_channels Number of channels in the descriptor (1, 2, 3)
@param threshold Detector response threshold to accept point
@param nOctaves Maximum octave evolution of the image
@param nOctaveLayers Default number of sublevels per scale level
@param diffusivity Diffusivity type. DIFF\_PM\_G1, DIFF\_PM\_G2, DIFF\_WEICKERT or
DIFF\_CHARBONNIER
@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
DIFF_CHARBONNIER
*/
CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB,
int descriptor_size = 0, int descriptor_channels = 3,
......@@ -947,9 +947,9 @@ class CV_EXPORTS_W BFMatcher : public DescriptorMatcher
public:
/** @brief Brute-force matcher constructor.
@param normType One of NORM\_L1, NORM\_L2, NORM\_HAMMING, NORM\_HAMMING2. L1 and L2 norms are
preferable choices for SIFT and SURF descriptors, NORM\_HAMMING should be used with ORB, BRISK and
BRIEF, NORM\_HAMMING2 should be used with ORB when WTA\_K==3 or 4 (see ORB::ORB constructor
@param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are
preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor
description).
@param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k
nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with
......@@ -977,7 +977,7 @@ protected:
/** @brief Flann-based descriptor matcher.
This matcher trains flann::Index\_ on a train descriptor collection and calls its nearest search
This matcher trains flann::Index_ on a train descriptor collection and calls its nearest search
methods to find the best matches. So, this matcher may be faster when matching a large train
collection than the brute force matcher. FlannBasedMatcher does not support masking permissible
matches of descriptor sets because flann::Index does not support this. :
......@@ -1053,9 +1053,9 @@ output image. See possible flags bit values below.
DrawMatchesFlags. See details above in drawMatches .
@note
For Python API, flags are modified as cv2.DRAW\_MATCHES\_FLAGS\_DEFAULT,
cv2.DRAW\_MATCHES\_FLAGS\_DRAW\_RICH\_KEYPOINTS, cv2.DRAW\_MATCHES\_FLAGS\_DRAW\_OVER\_OUTIMG,
cv2.DRAW\_MATCHES\_FLAGS\_NOT\_DRAW\_SINGLE\_POINTS
For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT,
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
*/
CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );
......
......@@ -114,7 +114,7 @@ public:
/** @brief Constructs a nearest neighbor search index for a given dataset.
@param features Matrix of containing the features(points) to index. The size of the matrix is
num\_features x feature\_dimensionality and the data type of the elements in the matrix must
num_features x feature_dimensionality and the data type of the elements in the matrix must
coincide with the type of the index.
@param params Structure containing the index parameters. The type of index that will be
constructed depends on the type of this parameter. See the description.
......
This diff is collapsed.
......@@ -92,10 +92,10 @@ enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0,
@param filename Name of file to be loaded.
@param flags Flags specifying the color type of a loaded image:
- CV\_LOAD\_IMAGE\_ANYDEPTH - If set, return 16-bit/32-bit image when the input has the
- CV_LOAD_IMAGE_ANYDEPTH - If set, return 16-bit/32-bit image when the input has the
corresponding depth, otherwise convert it to 8-bit.
- CV\_LOAD\_IMAGE\_COLOR - If set, always convert image to the color one
- CV\_LOAD\_IMAGE\_GRAYSCALE - If set, always convert image to the grayscale one
- CV_LOAD_IMAGE_COLOR - If set, always convert image to the color one
- CV_LOAD_IMAGE_GRAYSCALE - If set, always convert image to the grayscale one
- **\>0** Return a 3-channel color image.
@note In the current implementation the alpha channel, if any, is stripped from the output image.
......@@ -128,7 +128,7 @@ returns an empty matrix ( Mat::data==NULL ). Currently, the following file forma
- On Linux\*, BSD flavors and other Unix-like open-source operating systems, OpenCV looks for
codecs supplied with an OS image. Install the relevant packages (do not forget the development
files, for example, "libjpeg-dev", in Debian\* and Ubuntu\*) to get the codec support or turn
on the OPENCV\_BUILD\_3RDPARTY\_LIBS flag in CMake.
on the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake.
@note In the case of color images, the decoded images will have the channels stored in B G R order.
*/
......@@ -139,20 +139,20 @@ CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR );
@param filename Name of the file.
@param img Image to be saved.
@param params Format-specific save parameters encoded as pairs
paramId\_1, paramValue\_1, paramId\_2, paramValue\_2, ... . The following parameters are currently
paramId_1, paramValue_1, paramId_2, paramValue_2, ... . The following parameters are currently
supported:
- For JPEG, it can be a quality ( CV\_IMWRITE\_JPEG\_QUALITY ) from 0 to 100 (the higher is
- For JPEG, it can be a quality ( CV_IMWRITE_JPEG_QUALITY ) from 0 to 100 (the higher is
the better). Default value is 95.
- For WEBP, it can be a quality ( CV\_IMWRITE\_WEBP\_QUALITY ) from 1 to 100 (the higher is
- For WEBP, it can be a quality ( CV_IMWRITE_WEBP_QUALITY ) from 1 to 100 (the higher is
the better). By default (without any parameter) and for quality above 100 the lossless
compression is used.
- For PNG, it can be the compression level ( CV\_IMWRITE\_PNG\_COMPRESSION ) from 0 to 9. A
- For PNG, it can be the compression level ( CV_IMWRITE_PNG_COMPRESSION ) from 0 to 9. A
higher value means a smaller size and longer compression time. Default value is 3.
- For PPM, PGM, or PBM, it can be a binary format flag ( CV\_IMWRITE\_PXM\_BINARY ), 0 or 1.
- For PPM, PGM, or PBM, it can be a binary format flag ( CV_IMWRITE_PXM_BINARY ), 0 or 1.
Default value is 1.
The function imwrite saves the image to the specified file. The image format is chosen based on the
filename extension (see imread for the list of extensions). Only 8-bit (or 16-bit unsigned (CV\_16U)
filename extension (see imread for the list of extensions). Only 8-bit (or 16-bit unsigned (CV_16U)
in case of PNG, JPEG 2000, and TIFF) single-channel or 3-channel (with 'BGR' channel order) images
can be saved using this function. If the format, depth or channel order is different, use
Mat::convertTo , and cvtColor to convert it before saving. Or, use the universal FileStorage I/O
......@@ -239,7 +239,7 @@ CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst);
The function compresses the image and stores it in the memory buffer that is resized to fit the
result. See imwrite for the list of supported formats and flags description.
@note cvEncodeImage returns single-row matrix of type CV\_8UC1 that contains encoded image as array
@note cvEncodeImage returns single-row matrix of type CV_8UC1 that contains encoded image as array
of bytes.
*/
CV_EXPORTS_W bool imencode( const String& ext, InputArray img,
......
......@@ -1361,7 +1361,7 @@ call
is equivalent to
\f[\texttt{Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, borderType)} .\f]
\f[\texttt{Sobel(src, dst, ddepth, dx, dy, CV\_SCHARR, scale, delta, borderType)} .\f]
@param src input image.
@param dst output image of the same size and the same number of channels as src.
......
This diff is collapsed.
......@@ -94,7 +94,7 @@ To see the object detector at work, have a look at the facedetect demo:
<https://github.com/Itseez/opencv/tree/master/samples/cpp/dbt_face_detection.cpp>
The following reference is for the detection part only. There is a separate application called
opencv\_traincascade that can train a cascade of boosted classifiers from a set of samples.
opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
@note In the new C++ interface it is also possible to use LBP (local binary pattern) features in
addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection
......@@ -246,7 +246,7 @@ public:
/** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
of rectangles.
@param image Matrix of the type CV\_8U containing an image where objects are detected.
@param image Matrix of the type CV_8U containing an image where objects are detected.
@param objects Vector of rectangles where each rectangle contains the detected object, the
rectangles may be partially outside the original image.
@param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
......@@ -261,7 +261,7 @@ public:
@note
- (Python) A face detection example using cascade classifiers can be found at
opencv\_source\_code/samples/python2/facedetect.py
opencv_source_code/samples/python2/facedetect.py
*/
CV_WRAP void detectMultiScale( InputArray image,
CV_OUT std::vector<Rect>& objects,
......@@ -271,7 +271,7 @@ public:
Size maxSize = Size() );
/** @overload
@param image Matrix of the type CV\_8U containing an image where objects are detected.
@param image Matrix of the type CV_8U containing an image where objects are detected.
@param objects Vector of rectangles where each rectangle contains the detected object, the
rectangles may be partially outside the original image.
@param numDetections Vector of detection numbers for the corresponding objects. An object's number
......
......@@ -96,8 +96,8 @@ needs to be inpainted.
@param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
by the algorithm.
@param flags Inpainting method that could be one of the following:
- **INPAINT\_NS** Navier-Stokes based method [Navier01]
- **INPAINT\_TELEA** Method by Alexandru Telea @cite Telea04.
- **INPAINT_NS** Navier-Stokes based method [Navier01]
- **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04.
The function reconstructs the selected image area from the pixel near the area boundary. The
function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
......@@ -105,9 +105,9 @@ objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting
@note
- An example using the inpainting technique can be found at
opencv\_source\_code/samples/cpp/inpaint.cpp
opencv_source_code/samples/cpp/inpaint.cpp
- (Python) An example using the inpainting technique can be found at
opencv\_source\_code/samples/python2/inpaint.py
opencv_source_code/samples/python2/inpaint.py
*/
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
OutputArray dst, double inpaintRadius, int flags );
......@@ -347,8 +347,8 @@ public:
@param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
are maximum and minimum luminance values of the resulting image.
@param saturation saturation enhancement value. See createTonemapDrago
@param sigma\_space bilateral filter sigma in color space
@param sigma\_color bilateral filter sigma in coordinate space
@param sigma_space bilateral filter sigma in color space
@param sigma_color bilateral filter sigma in coordinate space
*/
CV_EXPORTS_W Ptr<TonemapDurand>
createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_space = 2.0f, float sigma_color = 2.0f);
......@@ -377,9 +377,9 @@ public:
@param gamma gamma value for gamma correction. See createTonemap
@param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
@param light\_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
@param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
value, if 0 it's global, otherwise it's a weighted mean of this two cases.
@param color\_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
@param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
if 0 adaptation level is the same for each channel.
*/
CV_EXPORTS_W Ptr<TonemapReinhard>
......@@ -484,9 +484,9 @@ public:
/** @brief Creates AlignMTB object
@param max\_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
@param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
usually good enough (31 and 63 pixels shift respectively).
@param exclude\_range range for exclusion bitmap that is constructed to suppress noise around the
@param exclude_range range for exclusion bitmap that is constructed to suppress noise around the
median value.
@param cut if true cuts images, otherwise fills the new regions with zeros.
*/
......@@ -554,7 +554,7 @@ public:
/** @brief Creates CalibrateRobertson object
@param max\_iter maximal number of Gauss-Seidel solver iterations.
@param max_iter maximal number of Gauss-Seidel solver iterations.
@param threshold target difference between results of two successive steps of the minimization.
*/
CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
......@@ -628,9 +628,9 @@ public:
/** @brief Creates MergeMertens object
@param contrast\_weight contrast measure weight. See MergeMertens.
@param saturation\_weight saturation measure weight
@param exposure\_weight well-exposedness measure weight
@param contrast_weight contrast measure weight. See MergeMertens.
@param saturation_weight saturation measure weight
@param exposure_weight well-exposedness measure weight
*/
CV_EXPORTS_W Ptr<MergeMertens>
createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f);
......@@ -660,7 +660,7 @@ black-and-white photograph rendering, and in many single channel image processin
@param src Input 8-bit 3-channel image.
@param grayscale Output 8-bit 1-channel image.
@param color\_boost Output 8-bit 3-channel image.
@param color_boost Output 8-bit 3-channel image.
This function is to be applied on color images.
*/
......@@ -681,13 +681,13 @@ content @cite PM03.
@param p Point in dst image where object is placed.
@param blend Output image with the same size and type as dst.
@param flags Cloning method that could be one of the following:
- **NORMAL\_CLONE** The power of the method is fully expressed when inserting objects with
- **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with
complex outlines into a new background
- **MIXED\_CLONE** The classic method, color-based selection and alpha masking might be time
- **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time
consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the
original image, is not effective. Mixed seamless cloning based on a loose selection proves
effective.
- **FEATURE\_EXCHANGE** Feature exchange allows the user to easily replace certain features of
- **FEATURE_EXCHANGE** Feature exchange allows the user to easily replace certain features of
one object by alternative features.
*/
CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
......@@ -699,9 +699,9 @@ seamlessly.
@param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src .
@param red\_mul R-channel multiply factor.
@param green\_mul G-channel multiply factor.
@param blue\_mul B-channel multiply factor.
@param red_mul R-channel multiply factor.
@param green_mul G-channel multiply factor.
@param blue_mul B-channel multiply factor.
Multiplication factor is between .5 to 2.5.
*/
......@@ -729,9 +729,9 @@ Detector is used.
@param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src.
@param low\_threshold Range from 0 to 100.
@param high\_threshold Value \> 100.
@param kernel\_size The size of the Sobel kernel to be used.
@param low_threshold Range from 0 to 100.
@param high_threshold Value \> 100.
@param kernel_size The size of the Sobel kernel to be used.
**NOTE:**
......@@ -754,10 +754,10 @@ filters are used in many different applications @cite EM11.
@param src Input 8-bit 3-channel image.
@param dst Output 8-bit 3-channel image.
@param flags Edge preserving filters:
- **RECURS\_FILTER** = 1
- **NORMCONV\_FILTER** = 2
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
- **RECURS_FILTER** = 1
- **NORMCONV_FILTER** = 2
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
*/
CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
float sigma_s = 60, float sigma_r = 0.4f);
......@@ -766,8 +766,8 @@ CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flag
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
*/
CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
float sigma_r = 0.15f);
......@@ -777,9 +777,9 @@ CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s =
@param src Input 8-bit 3-channel image.
@param dst1 Output 8-bit 1-channel image.
@param dst2 Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
@param shade\_factor Range between 0 to 0.1.
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
@param shade_factor Range between 0 to 0.1.
*/
CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
......@@ -790,8 +790,8 @@ contrast while preserving, or enhancing, high-contrast features.
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
*/
CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
float sigma_r = 0.45f);
......
......@@ -52,13 +52,13 @@ namespace cv { namespace cuda {
/** @brief Performs pure non local means denoising without any simplification, and thus it is not fast.
@param src Source image. Supports only CV\_8UC1, CV\_8UC2 and CV\_8UC3.
@param src Source image. Supports only CV_8UC1, CV_8UC2 and CV_8UC3.
@param dst Destination image.
@param h Filter sigma regulating filter strength for color.
@param search\_window Size of search window.
@param block\_size Size of block used for computing weights.
@param borderMode Border type. See borderInterpolate for details. BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param search_window Size of search window.
@param block_size Size of block used for computing weights.
@param borderMode Border type. See borderInterpolate for details. BORDER_REFLECT101 ,
BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
@param s Stream for the asynchronous version.
@sa
......@@ -79,10 +79,10 @@ public:
@param dst Output image with the same size and type as src .
@param h Parameter regulating filter strength. Big h value perfectly removes noise but also
removes image details, smaller h value preserves details but also preserves some noise
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
@param search_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
@param block_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.
......@@ -98,14 +98,14 @@ public:
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src .
@param h\_luminance Parameter regulating filter strength. Big h value perfectly removes noise but
@param h_luminance Parameter regulating filter strength. Big h value perfectly removes noise but
also removes image details, smaller h value preserves details but also preserves some noise
@param photo_render float The same as h but for color components. For most images value equals 10 will be
enought to remove colored noise and do not distort colors
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
@param search_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
@param block_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.
......
......@@ -200,7 +200,7 @@ public:
/** @brief Set the norm used to compute the Hausdorff value between two shapes. It can be L1 or L2 norm.
@param distanceFlag Flag indicating which norm is used to compute the Hausdorff distance
(NORM\_L1, NORM\_L2).
(NORM_L1, NORM_L2).
*/
CV_WRAP virtual void setDistanceFlag(int distanceFlag) = 0;
CV_WRAP virtual int getDistanceFlag() const = 0;
......
......@@ -89,9 +89,9 @@ familiar with the theory is recommended.
@note
- A basic example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching.cpp
opencv_source_code/samples/cpp/stitching.cpp
- A detailed example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching\_detailed.cpp
opencv_source_code/samples/cpp/stitching_detailed.cpp
*/
class CV_EXPORTS_W Stitcher
{
......@@ -108,7 +108,7 @@ public:
// Stitcher() {}
/** @brief Creates a stitcher with the default parameters.
@param try\_use\_gpu Flag indicating whether GPU should be used whenever it's possible.
@param try_use_gpu Flag indicating whether GPU should be used whenever it's possible.
@return Stitcher class instance.
*/
static Stitcher createDefault(bool try_use_gpu = false);
......
......@@ -58,8 +58,8 @@ undergoes rotations around its centre only.
@param H Homography.
@param f0 Estimated focal length along X axis.
@param f1 Estimated focal length along Y axis.
@param f0\_ok True, if f0 was estimated successfully, false otherwise.
@param f1\_ok True, if f1 was estimated successfully, false otherwise.
@param f0_ok True, if f0 was estimated successfully, false otherwise.
@param f1_ok True, if f1 was estimated successfully, false otherwise.
See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
by Heung-Yeung Shum and Richard Szeliski.
......@@ -69,7 +69,7 @@ void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool
/** @brief Estimates focal lengths for each given camera.
@param features Features of images.
@param pairwise\_matches Matches between all image pairs.
@param pairwise_matches Matches between all image pairs.
@param focals Estimated focal lengths for each camera.
*/
void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,
......
......@@ -81,7 +81,7 @@ public:
/** @brief Blends and returns the final pano.
@param dst Final pano
@param dst\_mask Final pano mask
@param dst_mask Final pano mask
*/
virtual void blend(InputOutputArray dst, InputOutputArray dst_mask);
......
......@@ -80,7 +80,7 @@ public:
@param features Found features
@param rois Regions of interest
@sa detail::ImageFeatures, Rect\_
@sa detail::ImageFeatures, Rect_
*/
void operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
/** @brief Frees unused memory allocated before if there is any. */
......@@ -88,7 +88,7 @@ public:
protected:
/** @brief This method must implement features finding logic in order to make the wrappers
detail::FeaturesFinder::operator()\_ work.
detail::FeaturesFinder::operator()_ work.
@param image Source image
@param features Found features
......@@ -181,7 +181,7 @@ public:
/** @overload
@param features1 First image features
@param features2 Second image features
@param matches\_info Found matches
@param matches_info Found matches
*/
void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) { match(features1, features2, matches_info); }
......@@ -189,7 +189,7 @@ public:
/** @brief Performs images matching.
@param features Features of the source images
@param pairwise\_matches Found pairwise matches
@param pairwise_matches Found pairwise matches
@param mask Mask indicating which image pairs must be matched
The function is parallelized with the TBB library.
......@@ -211,11 +211,11 @@ protected:
FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}
/** @brief This method must implement matching logic in order to make the wrappers
detail::FeaturesMatcher::operator()\_ work.
detail::FeaturesMatcher::operator()_ work.
@param features1 first image features
@param features2 second image features
@param matches\_info found matches
@param matches_info found matches
*/
virtual void match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) = 0;
......@@ -224,7 +224,7 @@ protected:
};
/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match\_conf
ratio between descriptor distances is greater than the threshold match_conf
@sa detail::FeaturesMatcher
*/
......@@ -233,11 +233,11 @@ class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher
public:
/** @brief Constructs a "best of 2 nearest" matcher.
@param try\_use\_gpu Should try to use GPU or not
@param match\_conf Match distances ration threshold
@param num\_matches\_thresh1 Minimum number of matches required for the 2D projective transform
@param try_use_gpu Should try to use GPU or not
@param match_conf Match distances ration threshold
@param num_matches_thresh1 Minimum number of matches required for the 2D projective transform
estimation used in the inliers classification step
@param num\_matches\_thresh2 Minimum number of matches required for the 2D projective transform
@param num_matches_thresh2 Minimum number of matches required for the 2D projective transform
re-estimation on inliers
*/
BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
......
......@@ -70,7 +70,7 @@ public:
/** @brief Estimates camera parameters.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
......@@ -81,10 +81,10 @@ public:
protected:
/** @brief This method must implement camera parameters estimation logic in order to make the wrapper
detail::Estimator::operator()\_ work.
detail::Estimator::operator()_ work.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
......@@ -130,8 +130,8 @@ public:
protected:
/** @brief Construct a bundle adjuster base instance.
@param num\_params\_per\_cam Number of parameters per camera
@param num\_errs\_per\_measurement Number of error terms (components) per match
@param num_params_per_cam Number of parameters per camera
@param num_errs_per_measurement Number of error terms (components) per match
*/
BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)
: num_params_per_cam_(num_params_per_cam),
......@@ -159,13 +159,13 @@ protected:
virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0;
/** @brief Calculates error vector.
@param err Error column-vector of length total\_num\_matches \* num\_errs\_per\_measurement
@param err Error column-vector of length total_num_matches \* num_errs_per_measurement
*/
virtual void calcError(Mat &err) = 0;
/** @brief Calculates the cost function jacobian.
@param jac Jacobian matrix of dimensions
(total\_num\_matches \* num\_errs\_per\_measurement) x (num\_images \* num\_params\_per\_cam)
(total_num_matches \* num_errs_per_measurement) x (num_images \* num_params_per_cam)
*/
virtual void calcJacobian(Mat &jac) = 0;
......
......@@ -72,7 +72,7 @@ public:
/** @brief Builds the projection maps according to the given camera data.
@param src\_size Source image size
@param src_size Source image size
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
......@@ -86,8 +86,8 @@ public:
@param src Source image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
......@@ -99,16 +99,16 @@ public:
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param dst\_size Backward-projected image size
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst_size Backward-projected image size
@param dst Backward-projected image
*/
virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst) = 0;
/**
@param src\_size Source image bounding box
@param src_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
......@@ -135,7 +135,7 @@ struct CV_EXPORTS ProjectorBase
float t[3];
};
/** @brief Base class for rotation-based warper using a detail::ProjectorBase\_ derived class.
/** @brief Base class for rotation-based warper using a detail::ProjectorBase_ derived class.
*/
template <class P>
class CV_EXPORTS RotationWarperBase : public RotationWarper
......
......@@ -390,15 +390,15 @@ class can be used: :
@note
- A basic sample on using the VideoCapture interface can be found at
opencv\_source\_code/samples/cpp/starter\_video.cpp
opencv_source_code/samples/cpp/starter_video.cpp
- Another basic video processing sample can be found at
opencv\_source\_code/samples/cpp/video\_dmtx.cpp
opencv_source_code/samples/cpp/video_dmtx.cpp
- (Python) A basic sample on using the VideoCapture interface can be found at
opencv\_source\_code/samples/python2/video.py
opencv_source_code/samples/python2/video.py
- (Python) Another basic video processing sample can be found at
opencv\_source\_code/samples/python2/video\_dmtx.py
opencv_source_code/samples/python2/video_dmtx.py
- (Python) A multi threaded video processing sample can be found at
opencv\_source\_code/samples/python2/video\_threaded.py
opencv_source_code/samples/python2/video_threaded.py
*/
class CV_EXPORTS_W VideoCapture
{
......@@ -412,7 +412,7 @@ public:
/** @overload
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img\_%02d.jpg, which will read samples like img\_00.jpg, img\_01.jpg, img\_02.jpg, ...)
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
*/
CV_WRAP VideoCapture(const String& filename);
......@@ -427,7 +427,7 @@ public:
/** @brief Open video file or a capturing device for video capturing
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img\_%02d.jpg, which will read samples like img\_00.jpg, img\_01.jpg, img\_02.jpg, ...)
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
The methods first call VideoCapture::release to close the already opened file or camera.
*/
......@@ -502,27 +502,27 @@ public:
/** @brief Sets a property in the VideoCapture.
@param propId Property identifier. It can be one of the following:
- **CV\_CAP\_PROP\_POS\_MSEC** Current position of the video file in milliseconds.
- **CV\_CAP\_PROP\_POS\_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV\_CAP\_PROP\_POS\_AVI\_RATIO** Relative position of the video file: 0 - start of the
- **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds.
- **CV_CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV_CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
film, 1 - end of the film.
- **CV\_CAP\_PROP\_FRAME\_WIDTH** Width of the frames in the video stream.
- **CV\_CAP\_PROP\_FRAME\_HEIGHT** Height of the frames in the video stream.
- **CV\_CAP\_PROP\_FPS** Frame rate.
- **CV\_CAP\_PROP\_FOURCC** 4-character code of codec.
- **CV\_CAP\_PROP\_FRAME\_COUNT** Number of frames in the video file.
- **CV\_CAP\_PROP\_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV\_CAP\_PROP\_MODE** Backend-specific value indicating the current capture mode.
- **CV\_CAP\_PROP\_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV\_CAP\_PROP\_CONTRAST** Contrast of the image (only for cameras).
- **CV\_CAP\_PROP\_SATURATION** Saturation of the image (only for cameras).
- **CV\_CAP\_PROP\_HUE** Hue of the image (only for cameras).
- **CV\_CAP\_PROP\_GAIN** Gain of the image (only for cameras).
- **CV\_CAP\_PROP\_EXPOSURE** Exposure (only for cameras).
- **CV\_CAP\_PROP\_CONVERT\_RGB** Boolean flags indicating whether images should be converted
- **CV_CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- **CV_CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- **CV_CAP_PROP_FPS** Frame rate.
- **CV_CAP_PROP_FOURCC** 4-character code of codec.
- **CV_CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- **CV_CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV_CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- **CV_CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV_CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- **CV_CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- **CV_CAP_PROP_HUE** Hue of the image (only for cameras).
- **CV_CAP_PROP_GAIN** Gain of the image (only for cameras).
- **CV_CAP_PROP_EXPOSURE** Exposure (only for cameras).
- **CV_CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
to RGB.
- **CV\_CAP\_PROP\_WHITE\_BALANCE** Currently unsupported
- **CV\_CAP\_PROP\_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
- **CV_CAP_PROP_WHITE_BALANCE** Currently unsupported
- **CV_CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
by DC1394 v 2.x backend currently)
@param value Value of the property.
*/
......@@ -531,28 +531,28 @@ public:
/** @brief Returns the specified VideoCapture property
@param propId Property identifier. It can be one of the following:
- **CV\_CAP\_PROP\_POS\_MSEC** Current position of the video file in milliseconds or video
- **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video
capture timestamp.
- **CV\_CAP\_PROP\_POS\_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV\_CAP\_PROP\_POS\_AVI\_RATIO** Relative position of the video file: 0 - start of the
- **CV_CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV_CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
film, 1 - end of the film.
- **CV\_CAP\_PROP\_FRAME\_WIDTH** Width of the frames in the video stream.
- **CV\_CAP\_PROP\_FRAME\_HEIGHT** Height of the frames in the video stream.
- **CV\_CAP\_PROP\_FPS** Frame rate.
- **CV\_CAP\_PROP\_FOURCC** 4-character code of codec.
- **CV\_CAP\_PROP\_FRAME\_COUNT** Number of frames in the video file.
- **CV\_CAP\_PROP\_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV\_CAP\_PROP\_MODE** Backend-specific value indicating the current capture mode.
- **CV\_CAP\_PROP\_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV\_CAP\_PROP\_CONTRAST** Contrast of the image (only for cameras).
- **CV\_CAP\_PROP\_SATURATION** Saturation of the image (only for cameras).
- **CV\_CAP\_PROP\_HUE** Hue of the image (only for cameras).
- **CV\_CAP\_PROP\_GAIN** Gain of the image (only for cameras).
- **CV\_CAP\_PROP\_EXPOSURE** Exposure (only for cameras).
- **CV\_CAP\_PROP\_CONVERT\_RGB** Boolean flags indicating whether images should be converted
- **CV_CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- **CV_CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- **CV_CAP_PROP_FPS** Frame rate.
- **CV_CAP_PROP_FOURCC** 4-character code of codec.
- **CV_CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- **CV_CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV_CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- **CV_CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV_CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- **CV_CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- **CV_CAP_PROP_HUE** Hue of the image (only for cameras).
- **CV_CAP_PROP_GAIN** Gain of the image (only for cameras).
- **CV_CAP_PROP_EXPOSURE** Exposure (only for cameras).
- **CV_CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
to RGB.
- **CV\_CAP\_PROP\_WHITE\_BALANCE** Currently not supported
- **CV\_CAP\_PROP\_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
- **CV_CAP_PROP_WHITE_BALANCE** Currently not supported
- **CV_CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
by DC1394 v 2.x backend currently)
**Note**: When querying a property that is not supported by the backend used by the VideoCapture
......
......@@ -70,7 +70,7 @@ namespace videostab
@param points0 Source set of 2D points (32F).
@param points1 Destination set of 2D points (32F).
@param model Motion model (up to MM\_AFFINE).
@param model Motion model (up to MM_AFFINE).
@param rmse Final root-mean-square error.
@return 3x3 2D transformation matrix (32F).
*/
......
......@@ -155,19 +155,19 @@ namespace cv
Camera(double fx, double fy, double cx, double cy, const Size &window_size);
/** @overload
@param fov Field of view (horizontal, vertical)
@param window\_size Size of the window. Principal point is at the center of the window
@param window_size Size of the window. Principal point is at the center of the window
by default.
*/
explicit Camera(const Vec2d &fov, const Size &window_size);
/** @overload
@param K Intrinsic matrix of the camera.
@param window\_size Size of the window. This together with intrinsic matrix determines
@param window_size Size of the window. This together with intrinsic matrix determines
the field of view.
*/
explicit Camera(const Matx33d &K, const Size &window_size);
/** @overload
@param proj Projection matrix of the camera.
@param window\_size Size of the window. This together with projection matrix determines
@param window_size Size of the window. This together with projection matrix determines
the field of view.
*/
explicit Camera(const Matx44d &proj, const Size &window_size);
......@@ -192,7 +192,7 @@ namespace cv
/** @brief Creates a Kinect Camera.
@param window\_size Size of the window. This together with intrinsic matrix of a Kinect Camera
@param window_size Size of the window. This together with intrinsic matrix of a Kinect Camera
determines the field of view.
*/
static Camera KinectCamera(const Size &window_size);
......
......@@ -73,7 +73,7 @@ namespace cv
/** @brief The constructors.
@param window\_name Name of the window.
@param window_name Name of the window.
*/
Viz3d(const String& window_name = String());
Viz3d(const Viz3d&);
......@@ -165,13 +165,13 @@ namespace cv
/** @brief Transforms a point in world coordinate system to window coordinate system.
@param pt Point in world coordinate system.
@param window\_coord Output point in window coordinate system.
@param window_coord Output point in window coordinate system.
*/
void convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord);
/** @brief Transforms a point in window coordinate system to a 3D ray in world coordinate system.
@param window\_coord Point in window coordinate system. @param origin Output origin of the ray.
@param window_coord Point in window coordinate system. @param origin Output origin of the ray.
@param direction Output direction of the ray.
*/
void converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction);
......@@ -181,7 +181,7 @@ namespace cv
Size getWindowSize() const;
/** @brief Sets the size of the window.
@param window\_size New size of the window.
@param window_size New size of the window.
*/
void setWindowSize(const Size &window_size);
......@@ -251,24 +251,24 @@ namespace cv
@param value The new value of the property.
**Rendering property** can be one of the following:
- **POINT\_SIZE**
- **POINT_SIZE**
- **OPACITY**
- **LINE\_WIDTH**
- **FONT\_SIZE**
- **LINE_WIDTH**
- **FONT_SIZE**
-
**REPRESENTATION**: Expected values are
- **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
- **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
-
**IMMEDIATE\_RENDERING**:
**IMMEDIATE_RENDERING**:
- Turn on immediate rendering by setting the value to 1.
- Turn off immediate rendering by setting the value to 0.
-
**SHADING**: Expected values are
- **SHADING\_FLAT**
- **SHADING\_GOURAUD**
- **SHADING\_PHONG**
- **SHADING_FLAT**
- **SHADING_GOURAUD**
- **SHADING_PHONG**
*/
void setRenderingProperty(const String &id, int property, double value);
/** @brief Returns rendering property of a widget.
......@@ -277,33 +277,33 @@ namespace cv
@param property Property.
**Rendering property** can be one of the following:
- **POINT\_SIZE**
- **POINT_SIZE**
- **OPACITY**
- **LINE\_WIDTH**
- **FONT\_SIZE**
- **LINE_WIDTH**
- **FONT_SIZE**
-
**REPRESENTATION**: Expected values are
- **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
- **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
-
**IMMEDIATE\_RENDERING**:
**IMMEDIATE_RENDERING**:
- Turn on immediate rendering by setting the value to 1.
- Turn off immediate rendering by setting the value to 0.
-
**SHADING**: Expected values are
- **SHADING\_FLAT**
- **SHADING\_GOURAUD**
- **SHADING\_PHONG**
- **SHADING_FLAT**
- **SHADING_GOURAUD**
- **SHADING_PHONG**
*/
double getRenderingProperty(const String &id, int property);
/** @brief Sets geometry representation of the widgets to surface, wireframe or points.
@param representation Geometry representation which can be one of the following:
- **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
- **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
*/
void setRepresentation(int representation);
......
......@@ -60,8 +60,8 @@ namespace cv
/** @brief Takes coordinate frame data and builds transform to global coordinate frame.
@param axis\_x X axis vector in global coordinate frame. @param axis\_y Y axis vector in global
coordinate frame. @param axis\_z Z axis vector in global coordinate frame. @param origin Origin of
@param axis_x X axis vector in global coordinate frame. @param axis_y Y axis vector in global
coordinate frame. @param axis_z Z axis vector in global coordinate frame. @param origin Origin of
the coordinate frame in global coordinate frame.
This function returns affine transform that describes transformation between global coordinate frame
......@@ -69,11 +69,11 @@ namespace cv
*/
CV_EXPORTS Affine3d makeTransformToGlobal(const Vec3d& axis_x, const Vec3d& axis_y, const Vec3d& axis_z, const Vec3d& origin = Vec3d::all(0));
/** @brief Constructs camera pose from position, focal\_point and up\_vector (see gluLookAt() for more
/** @brief Constructs camera pose from position, focal_point and up_vector (see gluLookAt() for more
infromation).
@param position Position of the camera in global coordinate frame. @param focal\_point Focal point
of the camera in global coordinate frame. @param y\_dir Up vector of the camera in global
@param position Position of the camera in global coordinate frame. @param focal_point Focal point
of the camera in global coordinate frame. @param y_dir Up vector of the camera in global
coordinate frame.
This function returns pose of the camera in global coordinate frame.
......@@ -82,7 +82,7 @@ namespace cv
/** @brief Retrieves a window by its name.
@param window\_name Name of the window that is to be retrieved.
@param window_name Name of the window that is to be retrieved.
This function returns a Viz3d object with the given name.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment