Commit cc9a1bb6 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

propagated some more fixes from 2.3 branch to the trunk

parent f4894d57
# Calculating and displaying 2D Hue-Saturation histogram of a color image
import sys
import cv
def hs_histogram(src):
# Convert to HSV
hsv = cv.CreateImage(cv.GetSize(src), 8, 3)
cv.CvtColor(src, hsv, cv.CV_BGR2HSV)
# Extract the H and S planes
h_plane = cv.CreateMat(src.rows, src.cols, cv.CV_8UC1)
s_plane = cv.CreateMat(src.rows, src.cols, cv.CV_8UC1)
cv.Split(hsv, h_plane, s_plane, None, None)
planes = [h_plane, s_plane]
h_bins = 30
s_bins = 32
hist_size = [h_bins, s_bins]
# hue varies from 0 (~0 deg red) to 180 (~360 deg red again */
h_ranges = [0, 180]
# saturation varies from 0 (black-gray-white) to
# 255 (pure spectrum color)
s_ranges = [0, 255]
ranges = [h_ranges, s_ranges]
scale = 10
hist = cv.CreateHist([h_bins, s_bins], cv.CV_HIST_ARRAY, ranges, 1)
cv.CalcHist([cv.GetImage(i) for i in planes], hist)
(_, max_value, _, _) = cv.GetMinMaxHistValue(hist)
hist_img = cv.CreateImage((h_bins*scale, s_bins*scale), 8, 3)
for h in range(h_bins):
for s in range(s_bins):
bin_val = cv.QueryHistValue_2D(hist, h, s)
intensity = cv.Round(bin_val * 255 / max_value)
cv.Rectangle(hist_img,
(h*scale, s*scale),
((h+1)*scale - 1, (s+1)*scale - 1),
cv.RGB(intensity, intensity, intensity),
cv.CV_FILLED)
return hist_img
if __name__ == '__main__':
src = cv.LoadImageM(sys.argv[1])
cv.NamedWindow("Source", 1)
cv.ShowImage("Source", src)
cv.NamedWindow("H-S Histogram", 1)
cv.ShowImage("H-S Histogram", hs_histogram(src))
cv.WaitKey(0)
import sys
import cv
def findstereocorrespondence(image_left, image_right):
# image_left and image_right are the input 8-bit single-channel images
# from the left and the right cameras, respectively
(r, c) = (image_left.rows, image_left.cols)
disparity_left = cv.CreateMat(r, c, cv.CV_16S)
disparity_right = cv.CreateMat(r, c, cv.CV_16S)
state = cv.CreateStereoGCState(16, 2)
cv.FindStereoCorrespondenceGC(image_left, image_right, disparity_left, disparity_right, state, 0)
return (disparity_left, disparity_right)
if __name__ == '__main__':
(l, r) = [cv.LoadImageM(f, cv.CV_LOAD_IMAGE_GRAYSCALE) for f in sys.argv[1:]]
(disparity_left, disparity_right) = findstereocorrespondence(l, r)
disparity_left_visual = cv.CreateMat(l.rows, l.cols, cv.CV_8U)
cv.ConvertScale(disparity_left, disparity_left_visual, -16)
cv.SaveImage("disparity.pgm", disparity_left_visual)
import cv
def precornerdetect(image):
# assume that the image is floating-point
corners = cv.CloneMat(image)
cv.PreCornerDetect(image, corners, 3)
dilated_corners = cv.CloneMat(image)
cv.Dilate(corners, dilated_corners, None, 1)
corner_mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
cv.Sub(corners, dilated_corners, corners)
cv.CmpS(corners, 0, corner_mask, cv.CV_CMP_GE)
return (corners, corner_mask)
This diff is collapsed.
......@@ -271,7 +271,7 @@ InitFont
--------
Initializes font structure (OpenCV 1.x API).
.. c:function:: void cvInitFont( CvFont* font, int fontFace, double hscale, double vscale, double shear=0, int thickness=1, int lineType=8 )
.. ocv:cfunction:: void cvInitFont( CvFont* font, int fontFace, double hscale, double vscale, double shear=0, int thickness=1, int lineType=8 )
:param font: Pointer to the font structure initialized by the function
......@@ -308,7 +308,7 @@ Initializes font structure (OpenCV 1.x API).
:param thickness: Thickness of the text strokes
:param lineType: Type of the strokes, see :ref:`Line` description
:param lineType: Type of the strokes, see :ocv:func:`line` description
The function initializes the font structure that can be passed to text rendering functions.
......
......@@ -92,7 +92,7 @@ you can use::
Ptr<T> ptr = new T(...);
That is, ``Ptr<T> ptr`` incapsulates a pointer to a ``T`` instance and a reference counter associated with the pointer. See the
:ref:`Ptr`
:ocv:class:`Ptr`
description for details.
.. _AutomaticAllocation:
......@@ -176,7 +176,7 @@ Multi-channel (``n``-channel) types can be specified using the following options
.. note:: ``CV_32FC1 == CV_32F``, ``CV_32FC2 == CV_32FC(2) == CV_MAKETYPE(CV_32F, 2)``, and ``CV_MAKETYPE(depth, n) == ((x&7)<<3) + (n-1)``. This means that the constant type is formed from the ``depth``, taking the lowest 3 bits, and the number of channels minus 1, taking the next ``log2(CV_CN_MAX)`` bits.
Examples::
Examples: ::
Mat mtx(3, 3, CV_32F); // make a 3x3 floating-point matrix
Mat cmtx(10, 1, CV_64FC2); // make a 10x1 2-channel floating-point
......
......@@ -18,7 +18,7 @@ Computes an absolute value of each matrix element.
* ``C = abs(A)`` is equivalent to ``absdiff(A, Scalar::all(0), C)``
* ``C = Mat_<Vec<uchar,n> >(abs(A*alpha + beta))`` is equivalent to :ocv:funcx:`convertScaleAbs`(A, C, alpha, beta)`
* ``C = Mat_<Vec<uchar,n> >(abs(A*alpha + beta))`` is equivalent to :ocv:funcx:`convertScaleAbs` (A, C, alpha, beta)
The output matrix has the same size and the same type as the input one except for the last case, where ``C`` is ``depth=CV_8U`` .
......@@ -2172,7 +2172,7 @@ PCA constructors
:param maxComponents: Maximum number of components that PCA should retain. By default, all the components are retained.
The default constructor initializes an empty PCA structure. The second constructor initializes the structure and calls
:ocv:func:`PCA::operator ()` .
:ocv:func:`PCA::operator()` .
......@@ -3114,7 +3114,7 @@ The constructors.
* **SVD::FULL_UV** When the matrix is not square, by default the algorithm produces ``u`` and ``vt`` matrices of sufficiently large size for the further ``A`` reconstruction. If, however, ``FULL_UV`` flag is specified, ``u`` and ``vt`` will be full-size square orthogonal matrices.
The first constructor initializes an empty ``SVD`` structure. The second constructor initializes an empty ``SVD`` structure and then calls
:ocv:func:`SVD::operator ()` .
:ocv:func:`SVD::operator()` .
SVD::operator ()
......@@ -3163,7 +3163,7 @@ Performs SVD of a matrix
:param flags: Opertion flags - see :ocv:func:`SVD::SVD`.
The methods/functions perform SVD of matrix. Unlike ``SVD::SVD`` constructor and ``SVD::operator ()``, they store the results to the user-provided matrices. ::
The methods/functions perform SVD of matrix. Unlike ``SVD::SVD`` constructor and ``SVD::operator()``, they store the results to the user-provided matrices. ::
Mat A, w, u, vt;
SVD::compute(A, w, u, vt);
......@@ -3231,7 +3231,7 @@ Calculates the sum of array elements.
.. ocv:function:: Scalar sum(InputArray arr)
.. ocv:function:: cv2.sumElems(arr) -> retval
.. ocv:pyfunction:: cv2.sumElems(arr) -> retval
.. ocv:cfunction:: CvScalar cvSum(const CvArr* arr)
.. ocv:pyoldfunction:: cv.Sum(arr)-> CvScalar
......
......@@ -988,9 +988,14 @@ void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
c = src1.channels();
}
Size sz = getContinuousSize(src1, src2, dst, c);
func(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, 0);
return;
Size sz = getContinuousSize(src1, src2, dst);
size_t len = sz.width*(size_t)c;
if( len == (size_t)(int)len )
{
sz.width = (int)len;
func(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, 0);
return;
}
}
if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 ||
......@@ -1045,6 +1050,9 @@ void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
NAryMatIterator it(arrays, ptrs);
size_t total = it.size, blocksize = total;
if( blocksize*c > INT_MAX )
blocksize = INT_MAX/c;
if( haveMask )
{
blocksize = std::min(blocksize, blocksize0);
......
......@@ -171,11 +171,12 @@ void Mat::copyTo( OutputArray _dst ) const
// to handle the copying 1xn matrix => nx1 std vector.
Size sz = size() == dst.size() ?
getContinuousSize(*this, dst, (int)elemSize()) :
getContinuousSize(*this, (int)elemSize());
getContinuousSize(*this, dst) :
getContinuousSize(*this);
size_t len = sz.width*elemSize();
for( ; sz.height--; sptr += step, dptr += dst.step )
memcpy( dptr, sptr, sz.width );
memcpy( dptr, sptr, len );
}
return;
}
......
......@@ -195,6 +195,13 @@ struct AddWeightedOp : public BaseAddOp
struct MulOp : public BaseElemWiseOp
{
MulOp() : BaseElemWiseOp(2, FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {};
void getValueRange(int depth, double& minval, double& maxval)
{
minval = depth < CV_32S ? cvtest::getMinVal(depth) : depth == CV_32S ? -1000000 : -1000.;
maxval = depth < CV_32S ? cvtest::getMaxVal(depth) : depth == CV_32S ? 1000000 : 1000.;
minval = std::max(minval, -30000.);
maxval = std::min(maxval, 30000.);
}
void op(const vector<Mat>& src, Mat& dst, const Mat&)
{
cv::multiply(src[0], src[1], dst, alpha);
......
......@@ -528,7 +528,7 @@ That's all there is to it!
#if defined _MSC_VER && _MSC_VER >= 1200
#if !defined(_MT) || defined(_DLL)
#if !defined(_MT) || defined(_DLL) || defined(_MSC_VER)
extern "C" { __declspec(dllexport) unsigned int __lc_codepage = 0; }
#endif
#ifdef _M_X64
......
......@@ -487,7 +487,7 @@ gpu::reprojectImageTo3D
:param xyzw: Output 4-channel floating-point image of the same size as ``disp`` . Each element of ``xyzw(x,y)`` contains 3D coordinates ``(x,y,z,1)`` of the point ``(x,y)`` , computed from the disparity map.
:param Q: :math:`4 \times 4` perspective transformation matrix that can be obtained via :ref:`StereoRectify` .
:param Q: :math:`4 \times 4` perspective transformation matrix that can be obtained via :ocv:func:`stereoRectify` .
:param stream: Stream for the asynchronous version.
......
......@@ -115,6 +115,7 @@ By using ``FilterEngine_GPU`` instead of functions you can avoid unnecessary mem
.. note:: The GPU filters do not support the in-place mode.
.. seealso::
:ocv:class:`gpu::BaseRowFilter_GPU`,
:ocv:class:`gpu::BaseColumnFilter_GPU`,
:ocv:class:`gpu::BaseFilter_GPU`,
......@@ -454,11 +455,13 @@ gpu::getLinearRowFilter_GPU
:param borderType: Pixel extrapolation method. For details, see :ocv:func:`borderInterpolate`. For details on limitations, see below.
There are two versions of the algorithm: NPP and OpenCV.
* NPP version is called when ``srcType == CV_8UC1`` or ``srcType == CV_8UC4`` and ``bufType == srcType`` . Otherwise, the OpenCV version is called. NPP supports only ``BORDER_CONSTANT`` border type and does not check indices outside the image.
* OpenCV version supports only ``CV_32F`` buffer depth and ``BORDER_REFLECT101``,``BORDER_REPLICATE``, and ``BORDER_CONSTANT`` border types. It checks indices outside the image.
There are two versions of the algorithm: NPP and OpenCV.
* NPP version is called when ``srcType == CV_8UC1`` or ``srcType == CV_8UC4`` and ``bufType == srcType`` . Otherwise, the OpenCV version is called. NPP supports only ``BORDER_CONSTANT`` border type and does not check indices outside the image.
* OpenCV version supports only ``CV_32F`` buffer depth and ``BORDER_REFLECT101``,``BORDER_REPLICATE``, and ``BORDER_CONSTANT`` border types. It checks indices outside the image.
See Also:,:ocv:func:`createSeparableLinearFilter` .
.. seealso:: :ocv:func:`createSeparableLinearFilter` .
.. index:: gpu::getLinearColumnFilter_GPU
......@@ -496,12 +499,15 @@ gpu::createSeparableLinearFilter_GPU
:param dstType: Destination array type. ``CV_8UC1``, ``CV_8UC4``, ``CV_16SC1``, ``CV_16SC2``, ``CV_32SC1``, ``CV_32FC1`` destination types are supported.
:param rowKernel, columnKernel: Filter coefficients.
:param rowKernel: Horizontal filter coefficients.
:param columnKernel: Vertical filter coefficients.
:param anchor: Anchor position within the kernel. Negative values mean that anchor is positioned at the aperture center.
:param rowBorderType, columnBorderType: Pixel extrapolation method in the horizontal and vertical directions For details, see :ocv:func:`borderInterpolate`. For details on limitations, see :ocv:func:`gpu::getLinearRowFilter_GPU`, cpp:ocv:func:`gpu::getLinearColumnFilter_GPU`.
:param rowBorderType: Pixel extrapolation method in the vertical direction For details, see :ocv:func:`borderInterpolate`. For details on limitations, see :ocv:func:`gpu::getLinearRowFilter_GPU`, cpp:ocv:func:`gpu::getLinearColumnFilter_GPU`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
.. seealso:: :ocv:func:`gpu::getLinearRowFilter_GPU`, :ocv:func:`gpu::getLinearColumnFilter_GPU`, :ocv:func:`createSeparableLinearFilter`
......@@ -519,11 +525,15 @@ gpu::sepFilter2D
:param ddepth: Destination image depth. ``CV_8U``, ``CV_16S``, ``CV_32S``, and ``CV_32F`` are supported.
:param kernelX, kernelY: Filter coefficients.
:param kernelX: Horizontal filter coefficients.
:param kernelY: Vertical filter coefficients.
:param anchor: Anchor position within the kernel. The default value ``(-1, 1)`` means that the anchor is at the kernel center.
:param rowBorderType, columnBorderType: Pixel extrapolation method. For details, see :ocv:func:`borderInterpolate`.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
.. seealso:: :ocv:func:`gpu::createSeparableLinearFilter_GPU`, :ocv:func:`sepFilter2D`
......@@ -545,7 +555,10 @@ gpu::createDerivFilter_GPU
:param ksize: Aperture size. See :ocv:func:`getDerivKernels` for details.
:param rowBorderType, columnBorderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
.. seealso:: :ocv:func:`gpu::createSeparableLinearFilter_GPU`, :ocv:func:`createDerivFilter`
......@@ -571,7 +584,9 @@ gpu::Sobel
:param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. For details, see :ocv:func:`getDerivKernels` .
:param rowBorderType, columnBorderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
.. seealso:: :ocv:func:`gpu::createSeparableLinearFilter_GPU`, :ocv:func:`Sobel`
......@@ -595,7 +610,9 @@ gpu::Scharr
:param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :ocv:func:`getDerivKernels` for details.
:param rowBorderType, columnBorderType: Pixel extrapolation method. For details, see :ocv:func:`borderInterpolate` and :ocv:func:`Scharr` .
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
.. seealso:: :ocv:func:`gpu::createSeparableLinearFilter_GPU`, :ocv:func:`Scharr`
......@@ -615,7 +632,9 @@ gpu::createGaussianFilter_GPU
:param sigmaY: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigmaY}\leftarrow\texttt{sigmaX}` .
:param rowBorderType, columnBorderType: Border type to use. See :ocv:func:`borderInterpolate` for details.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
.. seealso:: :ocv:func:`gpu::createSeparableLinearFilter_GPU`, :ocv:func:`createGaussianFilter`
......@@ -633,9 +652,13 @@ gpu::GaussianBlur
:param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. If they are zeros, they are computed from ``sigmaX`` and ``sigmaY`` .
:param sigmaX, sigmaY: Gaussian kernel standard deviations in X and Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize``, ``sigmaX``, and ``sigmaY`` .
:param sigmaX: Gaussian kernel standard deviation in X direction.
:param sigmaY: Gaussian kernel standard deviation in Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :ocv:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize``, ``sigmaX``, and ``sigmaY`` .
:param rowBorderType, columnBorderType: Pixel extrapolation method. See :ocv:func:`borderInterpolate` for details.
:param rowBorderType: Pixel extrapolation method in the vertical direction. For details, see :ocv:func:`borderInterpolate`.
:param columnBorderType: Pixel extrapolation method in the horizontal direction.
.. seealso:: :ocv:func:`gpu::createGaussianFilter_GPU`, :ocv:func:`GaussianBlur`
......
......@@ -3,7 +3,7 @@ Image Processing
.. highlight:: cpp
.. index:: gpu::meanShiftFiltering
gpu::meanShiftFiltering
---------------------------
......@@ -21,7 +21,7 @@ gpu::meanShiftFiltering
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
.. index:: gpu::meanShiftProc
gpu::meanShiftProc
----------------------
......@@ -41,10 +41,8 @@ gpu::meanShiftProc
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
.. seealso::
:ocv:func:`gpu::meanShiftFiltering`
.. seealso:: :ocv:func:`gpu::meanShiftFiltering`
.. index:: gpu::meanShiftSegmentation
gpu::meanShiftSegmentation
------------------------------
......@@ -64,7 +62,7 @@ gpu::meanShiftSegmentation
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
.. index:: gpu::integral
gpu::integral
-----------------
......@@ -80,10 +78,9 @@ gpu::integral
:param sqsum: Squared integral image of the ``CV_32FC1`` type.
.. seealso::
:ocv:func:`integral`
.. seealso:: :ocv:func:`integral`
.. index:: gpu::sqrIntegral
gpu::sqrIntegral
--------------------
......@@ -95,7 +92,7 @@ gpu::sqrIntegral
:param sqsum: Squared integral image containing 64-bit unsigned integer values packed into ``CV_64FC1`` .
.. index:: gpu::columnSum
gpu::columnSum
------------------
......@@ -107,7 +104,7 @@ gpu::columnSum
:param sum: Destination image of the ``CV_32FC1`` type.
.. index:: gpu::cornerHarris
gpu::cornerHarris
---------------------
......@@ -127,10 +124,8 @@ gpu::cornerHarris
:param borderType: Pixel extrapolation method. Only ``BORDER_REFLECT101`` and ``BORDER_REPLICATE`` are supported for now.
.. seealso::
:ocv:func:`cornerHarris`
.. seealso:: :ocv:func:`cornerHarris`
.. index:: gpu::cornerMinEigenVal
gpu::cornerMinEigenVal
--------------------------
......@@ -152,7 +147,7 @@ gpu::cornerMinEigenVal
.. seealso:: :ocv:func:`cornerMinEigenVal`
.. index:: gpu::mulSpectrums
gpu::mulSpectrums
---------------------
......@@ -172,10 +167,8 @@ gpu::mulSpectrums
Only full (not packed) ``CV_32FC2`` complex spectrums in the interleaved format are supported for now.
.. seealso::
:ocv:func:`mulSpectrums`
.. seealso:: :ocv:func:`mulSpectrums`
.. index:: gpu::mulAndScaleSpectrums
gpu::mulAndScaleSpectrums
-----------------------------
......@@ -197,10 +190,9 @@ gpu::mulAndScaleSpectrums
Only full (not packed) ``CV_32FC2`` complex spectrums in the interleaved format are supported for now.
.. seealso::
:ocv:func:`mulSpectrums`
.. seealso:: :ocv:func:`mulSpectrums`
.. index:: gpu::dft
gpu::dft
------------
......@@ -236,10 +228,9 @@ gpu::dft
*
If the source matrix is real (its type is ``CV_32FC1`` ), forward DFT is performed. The result of the DFT is packed into complex ( ``CV_32FC2`` ) matrix. So, the width of the destination matrix is ``dft_size.width / 2 + 1`` . But if the source is a single column, the height is reduced instead of the width.
.. seealso::
:ocv:func:`dft`
.. seealso:: :ocv:func:`dft`
.. index:: gpu::convolve
gpu::convolve
-----------------
......@@ -259,7 +250,7 @@ gpu::convolve
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
.. index:: gpu::ConvolveBuf
gpu::ConvolveBuf
----------------
......@@ -280,7 +271,7 @@ Class providing a memory buffer for the :ocv:func:`gpu::convolve` function.
};
.. index:: gpu::ConvolveBuf::ConvolveBuf
gpu::ConvolveBuf::ConvolveBuf
---------------------------------
......@@ -294,7 +285,7 @@ gpu::ConvolveBuf::ConvolveBuf
Constructs a buffer for the
:ocv:func:`convolve` function with respective arguments.
.. index:: gpu::matchTemplate
gpu::matchTemplate
----------------------
......@@ -324,10 +315,8 @@ gpu::matchTemplate
* ``CV_TM_SQDIFF``
* ``CV_TM_CCORR``
.. seealso::
:ocv:func:`matchTemplate`
.. seealso:: :ocv:func:`matchTemplate`
.. index:: gpu::remap
gpu::remap
--------------
......@@ -351,10 +340,8 @@ The function transforms the source image using the specified map:
Values of pixels with non-integer coordinates are computed using the bilinear interpolation.
.. seealso::
:ocv:func:`remap`
.. seealso:: :ocv:func:`remap`
.. index:: gpu::cvtColor
gpu::cvtColor
-----------------
......@@ -376,10 +363,9 @@ gpu::cvtColor
3-channel color spaces (like ``HSV``, ``XYZ``, and so on) can be stored in a 4-channel image for better perfomance.
.. seealso::
:ocv:func:`cvtColor`
.. seealso:: :ocv:func:`cvtColor`
.. index:: gpu::threshold
gpu::threshold
------------------
......@@ -401,10 +387,9 @@ gpu::threshold
:param stream: Stream for the asynchronous version.
.. seealso::
:ocv:func:`threshold`
.. seealso:: :ocv:func:`threshold`
.. index:: gpu::resize
gpu::resize
---------------
......@@ -439,7 +424,7 @@ gpu::resize
.. seealso:: :ocv:func:`resize`
.. index:: gpu::warpAffine
gpu::warpAffine
-------------------
......@@ -457,10 +442,9 @@ gpu::warpAffine
:param flags: Combination of interpolation methods (see :ocv:func:`resize`) and the optional flag ``WARP_INVERSE_MAP`` specifying that ``M`` is an inverse transformation (``dst=>src``). Only ``INTER_NEAREST``, ``INTER_LINEAR``, and ``INTER_CUBIC`` interpolation methods are supported.
.. seealso::
:ocv:func:`warpAffine`
.. seealso:: :ocv:func:`warpAffine`
.. index:: gpu::warpPerspective
gpu::warpPerspective
------------------------
......@@ -478,10 +462,9 @@ gpu::warpPerspective
:param flags: Combination of interpolation methods (see :ocv:func:`resize` ) and the optional flag ``WARP_INVERSE_MAP`` specifying that ``M`` is the inverse transformation (``dst => src``). Only ``INTER_NEAREST``, ``INTER_LINEAR``, and ``INTER_CUBIC`` interpolation methods are supported.
.. seealso::
:ocv:func:`warpPerspective`
.. seealso:: :ocv:func:`warpPerspective`
.. index:: gpu::rotate
gpu::rotate
---------------
......@@ -503,10 +486,9 @@ gpu::rotate
:param interpolation: Interpolation method. Only ``INTER_NEAREST``, ``INTER_LINEAR``, and ``INTER_CUBIC`` are supported.
.. seealso::
:ocv:func:`gpu::warpAffine`
.. seealso:: :ocv:func:`gpu::warpAffine`
.. index:: gpu::copyMakeBorder
gpu::copyMakeBorder
-----------------------
......@@ -518,14 +500,18 @@ gpu::copyMakeBorder
:param dst: Destination image with the same type as ``src``. The size is ``Size(src.cols+left+right, src.rows+top+bottom)`` .
:param top, bottom, left, right: Number of pixels in each direction from the source image rectangle to extrapolate. For example: ``top=1, bottom=1, left=1, right=1`` mean that 1 pixel-wide border needs to be built.
:param top:
:param bottom:
:param left:
:param right: Number of pixels in each direction from the source image rectangle to extrapolate. For example: ``top=1, bottom=1, left=1, right=1`` mean that 1 pixel-wide border needs to be built.
:param value: Border value.
.. seealso::
:ocv:func:`copyMakeBorder`
.. seealso:: :ocv:func:`copyMakeBorder`
.. index:: gpu::rectStdDev
gpu::rectStdDev
-------------------
......@@ -541,7 +527,7 @@ gpu::rectStdDev
:param rect: Rectangular window.
.. index:: gpu::evenLevels
gpu::evenLevels
-------------------
......@@ -557,13 +543,11 @@ gpu::evenLevels
:param upperLevel: Upper boundary value of the greatest level.
.. index:: gpu::histEven
gpu::histEven
-----------------
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel)
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4])
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat* hist, int* histSize, int* lowerLevel, int* upperLevel)
Calculates a histogram with evenly distributed bins.
......@@ -577,13 +561,11 @@ gpu::histEven
:param upperLevel: Upper boundary of highest-level bin.
.. index:: gpu::histRange
gpu::histRange
------------------
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels)
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4])
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat* hist, const GpuMat* levels)
Calculates a histogram with bins determined by the ``levels`` array.
......
......@@ -77,10 +77,11 @@ After that it finds eigenvectors and eigenvalues of
The output of the function can be used for robust edge or corner detection.
See Also:
:ocv:func:`cornerMinEigenVal`,
:ocv:func:`cornerHarris`,
:ocv:func:`preCornerDetect`
.. seealso::
:ocv:func:`cornerMinEigenVal`,
:ocv:func:`cornerHarris`,
:ocv:func:`preCornerDetect`
......@@ -221,7 +222,8 @@ Determines strong corners on an image.
.. ocv:pyfunction:: cv2.goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) -> corners
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image CvArr* eigImage, CvArr* tempImage CvPoint2D32f* corners int* cornerCount double qualityLevel double minDistance const CvArr* mask=NULL int blockSize=3 int useHarris=0 double k=0.04 )
.. ocv:cfunction:: void cvGoodFeaturesToTrack( const CvArr* image, CvArr* eigImage, CvArr* tempImage CvPoint2D32f* corners, int* cornerCount, double qualityLevel, double minDistance, const CvArr* mask=NULL, int blockSize=3, int useHarris=0, double k=0.04 )
.. ocv:pyoldfunction:: cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=None, blockSize=3, useHarris=0, k=0.04)-> corners
:param image: Input 8-bit or floating-point 32-bit, single-channel image.
......@@ -266,12 +268,14 @@ The function can be used to initialize a point-based tracker of an object.
**Note**: If the function is called with different values ``A`` and ``B`` of the parameter ``qualityLevel`` , and ``A`` > {B}, the vector of returned corners with ``qualityLevel=A`` will be the prefix of the output vector with ``qualityLevel=B`` .
See Also: :ocv:func:`cornerMinEigenVal`,
:ocv:func:`cornerHarris`,
:ocv:func:`calcOpticalFlowPyrLK`,
:ocv:func:`estimateRigidMotion`,
:ocv:func:`PlanarObjectDetector`,
:ocv:func:`OneWayDescriptor`
.. seealso::
:ocv:func:`cornerMinEigenVal`,
:ocv:func:`cornerHarris`,
:ocv:func:`calcOpticalFlowPyrLK`,
:ocv:func:`estimateRigidMotion`,
:ocv:func:`PlanarObjectDetector`,
:ocv:func:`OneWayDescriptor`
......@@ -336,10 +340,10 @@ The function finds circles in a grayscale image using a modification of the Houg
**Note**: Usually the function detects the centers of circles well. However, it may fail to find correct radii. You can assist to the function by specifying the radius range ( ``minRadius`` and ``maxRadius`` ) if you know it. Or, you may ignore the returned radius, use only the center, and find the correct radius using an additional procedure.
See Also:
:ocv:func:`fitEllipse`,
:ocv:func:`minEnclosingCircle`
.. seealso::
:ocv:func:`fitEllipse`,
:ocv:func:`minEnclosingCircle`
HoughLines
......
This diff is collapsed.
......@@ -145,9 +145,7 @@ The functions ``calcBackProject`` calculate the back project of the histogram. T
This is an approximate algorithm of the
:ocv:func:`CAMShift` color object tracker.
See Also:
:ocv:func:`calcHist`
.. seealso:: :ocv:func:`calcHist`
.. _compareHist:
compareHist
......@@ -312,7 +310,7 @@ Locates a template within an image by using a histogram comparison.
:param hist: Histogram
:param method: Comparison method, passed to :ref:`CompareHist` (see description of that function)
:param method: Comparison method, passed to :ocv:cfunc:`CompareHist` (see description of that function)
:param factor: Normalization factor for histograms, will affect the normalization scale of the destination image, pass 1 if unsure
......@@ -420,7 +418,11 @@ Returns a pointer to the histogram bin.
:param hist: Histogram
:param idx0, idx1, idx2, idx3: Indices of the bin
:param idx0: 0-th index
:param idx1: 1-st index
:param idx2: 2-nd index
:param idx: Array of indices
......@@ -475,9 +477,9 @@ Makes a histogram out of an array.
:param data: Array that will be used to store histogram bins
:param ranges: Histogram bin ranges, see :ref:`CreateHist`
:param ranges: Histogram bin ranges, see :ocv:cfunc:`CreateHist`
:param uniform: Uniformity flag, see :ref:`CreateHist`
:param uniform: Uniformity flag, see :ocv:cfunc:`CreateHist`
The function initializes the histogram, whose header and bins are allocated by the user. :ocv:cfunc:`ReleaseHist` does not need to be called afterwards. Only dense histograms can be initialized this way. The function returns ``hist``.
......@@ -511,7 +513,11 @@ Queries the value of the histogram bin.
:param hist: Histogram
:param idx0, idx1, idx2, idx3: Indices of the bin
:param idx0: 0-th index
:param idx1: 1-st index
:param idx2: 2-nd index
:param idx: Array of indices
......@@ -536,9 +542,9 @@ Sets the bounds of the histogram bins.
:param hist: Histogram
:param ranges: Array of bin ranges arrays, see :ref:`CreateHist`
:param ranges: Array of bin ranges arrays, see :ocv:cfunc:`CreateHist`
:param uniform: Uniformity flag, see :ref:`CreateHist`
:param uniform: Uniformity flag, see :ocv:cfunc:`CreateHist`
The function is a stand-alone function for setting bin ranges in the histogram. For a more detailed description of the parameters ``ranges`` and ``uniform`` see the :ocv:cfunc:`CalcHist` function, that can initialize the ranges as well. Ranges for the histogram bins must be set before the histogram is calculated or the backproject of the histogram is calculated.
......@@ -581,5 +587,4 @@ between the calculated minimum and maximum distances are incremented
.. [RubnerSept98] Y. Rubner. C. Tomasi, L.J. Guibas. The Earth Mover’s Distance as a Metric for Image Retrieval. Technical Report STAN-CS-TN-98-86, Department of Computer Science, Stanford University, September 1998.
.. [Iivarinen97] Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. Comparison of Combined Shape Descriptors for Irregular Objects, 8th British Machine Vision Conference, BMVC'97.
http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html
\ No newline at end of file
.. [Iivarinen97] Jukka Iivarinen, Markus Peura, Jaakko Srel, and Ari Visa. Comparison of Combined Shape Descriptors for Irregular Objects, 8th British Machine Vision Conference, BMVC'97. http://www.cis.hut.fi/research/IA/paper/publications/bmvc97/bmvc97.html
\ No newline at end of file
......@@ -60,10 +60,11 @@ where
The function can process the image in-place.
See Also:
:ocv:func:`threshold`,
:ocv:func:`blur`,
:ocv:func:`GaussianBlur`
.. seealso::
:ocv:func:`threshold`,
:ocv:func:`blur`,
:ocv:func:`GaussianBlur`
......@@ -579,10 +580,7 @@ where
Use these functions to either mark a connected component with the specified color in-place, or build a mask and then extract the contour, or copy the region to another image, and so on. Various modes of the function are demonstrated in the ``floodfill.cpp`` sample.
See Also:
:ocv:func:`findContours`
.. seealso:: :ocv:func:`findContours`
......@@ -743,13 +741,13 @@ Currently, Otsu's method is implemented only for 8-bit images.
.. image:: pics/threshold.png
See Also:
:ocv:func:`adaptiveThreshold`,
:ocv:func:`findContours`,
:ocv:func:`compare`,
:ocv:func:`min`,
:ocv:func:`max`
.. seealso::
:ocv:func:`adaptiveThreshold`,
:ocv:func:`findContours`,
:ocv:func:`compare`,
:ocv:func:`min`,
:ocv:func:`max`
watershed
......@@ -787,9 +785,7 @@ example, when such tangent components exist in the initial
marker image. Visual demonstration and usage example of the function
can be found in the OpenCV samples directory (see the ``watershed.cpp`` demo).
See Also:
:ocv:func:`findContours`
.. seealso:: :ocv:func:`findContours`
grabCut
......@@ -814,7 +810,9 @@ Runs the GrabCut algorithm.
:param rect: ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". The parameter is only used when ``mode==GC_INIT_WITH_RECT`` .
:param bgdModel, fgdModel: Temporary arrays used for segmentation. Do not modify them while you are processing the same image.
:param bgdModel: Temporary array for the background model. Do not modify it while you are processing the same image.
:param fgdModel: Temporary arrays for the foreground model. Do not modify it while you are processing the same image.
:param iterCount: Number of iterations the algorithm should make before returning the result. Note that the result can be refined with further calls with ``mode==GC_INIT_WITH_MASK`` or ``mode==GC_EVAL`` .
......
......@@ -137,7 +137,7 @@ ClearSubdivVoronoi2D
The function removes all of the virtual points. It
is called internally in
:ref:`CalcSubdivVoronoi2D`
:ocv:cfunc:`CalcSubdivVoronoi2D`
if the subdivision
was modified after previous call to the function.
......@@ -154,7 +154,7 @@ Creates an empty Delaunay triangulation.
The function creates an empty Delaunay
subdivision, where 2d points can be added using the function
:ref:`SubdivDelaunay2DInsert`
:ocv:cfunc:`SubdivDelaunay2DInsert`
. All of the points to be added must be within
the specified rectangle, otherwise a runtime error will be raised.
......@@ -177,7 +177,7 @@ The function is another function that
locates the input point within the subdivision. It finds the subdivision vertex that
is the closest to the input point. It is not necessarily one of vertices
of the facet containing the input point, though the facet (located using
:ref:`Subdiv2DLocate`
:ocv:cfunc:`Subdiv2DLocate`
) is used as a starting
point. The function returns a pointer to the found subdivision vertex.
......@@ -194,8 +194,7 @@ The function returns the edge destination. The
returned pointer may be NULL if the edge is from dual subdivision and
the virtual point coordinates are not calculated yet. The virtual points
can be calculated using the function
:ref:`CalcSubdivVoronoi2D`
.
:ocv:cfunc:`CalcSubdivVoronoi2D`.
Subdiv2DGetEdge
---------------
......@@ -319,7 +318,7 @@ Inserts a single point into a Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt)
.. ocv:pyoldfunction:: cv.SubdivDelaunay2DInsert(subdiv, pt)-> point
:param subdiv: Delaunay subdivision created by the function :ref:`CreateSubdivDelaunay2D`
:param subdiv: Delaunay subdivision created by the function :ocv:cfunc:`CreateSubdivDelaunay2D`
:param pt: Inserted point
......
......@@ -71,9 +71,10 @@ The moments of a contour are defined in the same way but computed using Green's
http://en.wikipedia.org/wiki/Green_theorem
). So, due to a limited raster resolution, the moments computed for a contour are slightly different from the moments computed for the same rasterized contour.
See Also:
:ocv:func:`contourArea`,
:ocv:func:`arcLength`
.. seealso::
:ocv:func:`contourArea`,
:ocv:func:`arcLength`
......@@ -81,7 +82,7 @@ HuMoments
-------------
Calculates the seven Hu invariants.
.. ocv:function:: void HuMoments( const Moments& moments, double h[7] )
.. ocv:function:: void HuMoments( const Moments& moments, double* hu )
.. ocv:pyfunction:: cv2.HuMoments(m) -> hu
......@@ -90,14 +91,14 @@ Calculates the seven Hu invariants.
.. ocv:pyoldfunction:: cv.GetHuMoments(moments) -> hu
:param moments: Input moments computed with :ocv:func:`moments` .
:param h: Output Hu invariants.
:param hu: Output Hu invariants.
The function calculates the seven Hu invariants (introduced in [Hu62]_; see also
http://en.wikipedia.org/wiki/Image_moment) defined as:
.. math::
\begin{array}{l} h[0]= \eta _{20}+ \eta _{02} \\ h[1]=( \eta _{20}- \eta _{02})^{2}+4 \eta _{11}^{2} \\ h[2]=( \eta _{30}-3 \eta _{12})^{2}+ (3 \eta _{21}- \eta _{03})^{2} \\ h[3]=( \eta _{30}+ \eta _{12})^{2}+ ( \eta _{21}+ \eta _{03})^{2} \\ h[4]=( \eta _{30}-3 \eta _{12})( \eta _{30}+ \eta _{12})[( \eta _{30}+ \eta _{12})^{2}-3( \eta _{21}+ \eta _{03})^{2}]+(3 \eta _{21}- \eta _{03})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}] \\ h[5]=( \eta _{20}- \eta _{02})[( \eta _{30}+ \eta _{12})^{2}- ( \eta _{21}+ \eta _{03})^{2}]+4 \eta _{11}( \eta _{30}+ \eta _{12})( \eta _{21}+ \eta _{03}) \\ h[6]=(3 \eta _{21}- \eta _{03})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}]-( \eta _{30}-3 \eta _{12})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}] \\ \end{array}
\begin{array}{l} hu[0]= \eta _{20}+ \eta _{02} \\ hu[1]=( \eta _{20}- \eta _{02})^{2}+4 \eta _{11}^{2} \\ hu[2]=( \eta _{30}-3 \eta _{12})^{2}+ (3 \eta _{21}- \eta _{03})^{2} \\ hu[3]=( \eta _{30}+ \eta _{12})^{2}+ ( \eta _{21}+ \eta _{03})^{2} \\ hu[4]=( \eta _{30}-3 \eta _{12})( \eta _{30}+ \eta _{12})[( \eta _{30}+ \eta _{12})^{2}-3( \eta _{21}+ \eta _{03})^{2}]+(3 \eta _{21}- \eta _{03})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}] \\ hu[5]=( \eta _{20}- \eta _{02})[( \eta _{30}+ \eta _{12})^{2}- ( \eta _{21}+ \eta _{03})^{2}]+4 \eta _{11}( \eta _{30}+ \eta _{12})( \eta _{21}+ \eta _{03}) \\ hu[6]=(3 \eta _{21}- \eta _{03})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}]-( \eta _{30}-3 \eta _{12})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}] \\ \end{array}
where
:math:`\eta_{ji}` stands for
......@@ -105,8 +106,7 @@ where
These values are proved to be invariants to the image scale, rotation, and reflection except the seventh one, whose sign is changed by reflection. This invariance is proved with the assumption of infinite image resolution. In case of raster images, the computed Hu invariants for the original and transformed images are a bit different.
See Also:
:ocv:func:`matchShapes`
.. seealso:: :ocv:func:`matchShapes`
findContours
......@@ -280,7 +280,7 @@ Approximates Freeman chain(s) with a polygonal curve.
:param storage: Storage location for the resulting polylines
:param method: Approximation method (see the description of the function :ref:`FindContours` )
:param method: Approximation method (see the description of the function :ocv:cfunc:`FindContours` )
:param parameter: Method parameter (not used now)
......@@ -463,7 +463,9 @@ Fits a line to a 2D or 3D point set.
:param param: Numerical parameter ( ``C`` ) for some types of distances. If it is 0, an optimal value is chosen.
:param reps, aeps: Sufficient accuracy for the radius (distance between the coordinate origin and the line) and angle, respectively. 0.01 would be a good default value for both.
:param reps: Sufficient accuracy for the radius (distance between the coordinate origin and the line).
:param aeps: Sufficient accuracy for the angle. 0.01 would be a good default value for ``reps`` and ``aeps``.
The function ``fitLine`` fits a line to a 2D or 3D point set by minimizing
:math:`\sum_i \rho(r_i)` where
......
......@@ -78,7 +78,7 @@ The structure represents a possible decision tree node split. It has public memb
Pointer to the next split in the node list of splits.
.. ocv:member:: int subset[2]
.. ocv:member:: int[] subset
Bit array indicating the value subset in case of split on a categorical variable. The rule is:
......@@ -90,13 +90,11 @@ The structure represents a possible decision tree node split. It has public memb
.. ocv:member:: float ord.c
The threshold value in case of split on an ordered variable. The rule is:
The threshold value in case of split on an ordered variable. The rule is: ::
::
if var_value < c
then next_node<-left
else next_node<-right
if var_value < c
then next_node<-left
else next_node<-right
.. ocv:member:: int ord.split_point
......@@ -125,7 +123,7 @@ The structure represents a node in a decision tree. It has public members:
Pointer to the parent node.
.. ocv:mebmer:: CvDTreeNode* left
.. ocv:member:: CvDTreeNode* left
Pointer to the left child node.
......@@ -137,7 +135,7 @@ The structure represents a node in a decision tree. It has public members:
Pointer to the first (primary) split in the node list of splits.
.. ocv:mebmer:: int sample_count
.. ocv:member:: int sample_count
The number of samples that fall into the node at the training stage. It is used to resolve the difficult cases - when the variable for the primary split is missing and all the variables for other surrogate splits are missing too. In this case the sample is directed to the left if ``left->sample_count > right->sample_count`` and to the right otherwise.
......
......@@ -62,7 +62,7 @@ Alternatively, the algorithm may start with the M-step when the initial values f
:math:`p_{i,k}` can be provided. Another alternative when
:math:`p_{i,k}` are unknown is to use a simpler clustering algorithm to pre-cluster the input samples and thus obtain initial
:math:`p_{i,k}` . Often (including macnine learning) the
:ref:`kmeans` algorithm is used for that purpose.
:ocv:func:`kmeans` algorithm is used for that purpose.
One of the main problems of the EM algorithm is a large number
of parameters to estimate. The majority of the parameters reside in
......@@ -176,7 +176,7 @@ Unlike many of the ML models, EM is an unsupervised learning algorithm and it do
:math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample).
The trained model can be used further for prediction, just like any other classifier. The trained model is similar to the
:ref:`Bayes classifier`.
:ocv:class:`CvBayesClassifier`.
For an example of clustering random samples of the multi-Gaussian distribution using EM, see ``em.cpp`` sample in the OpenCV distribution.
......
......@@ -13,6 +13,7 @@ differential loss function, some popular ones are implemented.
Decision trees (:ocv:class:`CvDTree`) usage as base learners allows to process ordered
and categorical variables.
.. _Training GBT:
Training the GBT model
----------------------
......@@ -67,7 +68,7 @@ The following loss functions are implemented for regression problems:
\delta\cdot\left(|y-f(x)|-\dfrac{\delta}{2}\right) & : |y-f(x)|>\delta\\
\dfrac{1}{2}\cdot(y-f(x))^2 & : |y-f(x)|\leq\delta \end{array} \right.`,
where :math:`\delta` is the :math:`\alpha`-quantile estimation of the
where :math:`\delta` is the :math:`\alpha`-quantile estimation of the
:math:`|y-f(x)|`. In the current implementation :math:`\alpha=0.2`.
......@@ -88,9 +89,10 @@ where :math:`f_0` is the initial guess (the best constant model) and :math:`\nu`
is a regularization parameter from the interval :math:`(0,1]`, futher called
*shrinkage*.
.. _Predicting with GBT:
Predicting with the GBT Model
-------------------------
-----------------------------
To get the GBT model prediciton, you need to compute the sum of responses of
all the trees in the ensemble. For regression problems, it is the answer.
......@@ -118,7 +120,7 @@ CvGBTreesParams::CvGBTreesParams
.. ocv:function:: CvGBTreesParams::CvGBTreesParams( int loss_function_type, int weak_count, float shrinkage, float subsample_portion, int max_depth, bool use_surrogates )
:param loss_function_type: Type of the loss function used for training
(see :ref:`Training the GBT model`). It must be one of the
(see :ref:`Training GBT`). It must be one of the
following types: ``CvGBTrees::SQUARED_LOSS``, ``CvGBTrees::ABSOLUTE_LOSS``,
``CvGBTrees::HUBER_LOSS``, ``CvGBTrees::DEVIANCE_LOSS``. The first three
types are used for regression problems, and the last one for
......@@ -128,7 +130,7 @@ CvGBTreesParams::CvGBTreesParams
count of trees in the GBT model, where ``K`` is the output classes count
(equal to one in case of a regression).
:param shrinkage: Regularization parameter (see :ref:`Training the GBT model`).
:param shrinkage: Regularization parameter (see :ref:`Training GBT`).
:param subsample_portion: Portion of the whole training set used for each algorithm iteration.
Subset is generated randomly. For more information see
......@@ -222,13 +224,13 @@ Predicts a response for an input sample.
only one model.
:param k: Number of tree ensembles built in case of the classification problem
(see :ref:`Training the GBT model`). Use this
(see :ref:`Training GBT`). Use this
parameter to change the ouput to sum of the trees' predictions in the
``k``-th ensemble only. To get the total GBT model prediction, ``k`` value
must be -1. For regression problems, ``k`` is also equal to -1.
The method predicts the response corresponding to the given sample
(see :ref:`Predicting with the GBT model`).
(see :ref:`Predicting with GBT`).
The result is either the class label or the estimated function value. The
:ocv:func:`predict` method enables using the parallel version of the GBT model
prediction if the OpenCV is built with the TBB library. In this case, predictions
......
......@@ -71,7 +71,7 @@ so the error on the test set usually starts increasing after the network
size reaches a limit. Besides, the larger networks are trained much
longer than the smaller ones, so it is reasonable to pre-process the data,
using
:ocv:func:`PCA::operator ()` or similar technique, and train a smaller network
:ocv:func:`PCA::operator()` or similar technique, and train a smaller network
on only essential features.
Another MPL feature is an inability to handle categorical
......
......@@ -32,25 +32,13 @@ For the random trees usage example, please, see letter_recog.cpp sample in OpenC
**References:**
*
*Machine Learning*, Wald I, July 2002.
* *Machine Learning*, Wald I, July 2002. http://stat-www.berkeley.edu/users/breiman/wald2002-1.pdf
http://stat-www.berkeley.edu/users/breiman/wald2002-1.pdf
* *Looking Inside the Black Box*, Wald II, July 2002. http://stat-www.berkeley.edu/users/breiman/wald2002-2.pdf
*
*Looking Inside the Black Box*, Wald II, July 2002.
* *Software for the Masses*, Wald III, July 2002. http://stat-www.berkeley.edu/users/breiman/wald2002-3.pdf
http://stat-www.berkeley.edu/users/breiman/wald2002-2.pdf
*
*Software for the Masses*, Wald III, July 2002.
http://stat-www.berkeley.edu/users/breiman/wald2002-3.pdf
*
And other articles from the web site
http://www.stat.berkeley.edu/users/breiman/RandomForests/cc_home.htm
.
* And other articles from the web site http://www.stat.berkeley.edu/users/breiman/RandomForests/cc_home.htm
CvRTParams
----------
......
......@@ -659,7 +659,7 @@ class FunctionTests(OpenCVTests):
self.assert_(li[0] != None)
def test_InPaint(self):
src = self.get_sample("doc/pics/building.jpg")
src = self.get_sample("samples/cpp/building.jpg")
msk = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
damaged = cv.CloneMat(src)
repaired = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 3)
......@@ -866,7 +866,7 @@ class FunctionTests(OpenCVTests):
def yield_line_image(self):
""" Needed by HoughLines tests """
src = self.get_sample("doc/pics/building.jpg", 0)
src = self.get_sample("samples/cpp/building.jpg", 0)
dst = cv.CreateImage(cv.GetSize(src), 8, 1)
cv.Canny(src, dst, 50, 200, 3)
return dst
......@@ -2104,7 +2104,7 @@ class DocumentFragmentTests(OpenCVTests):
""" Test the fragments of code that are included in the documentation """
def setUp(self):
OpenCVTests.setUp(self)
sys.path.append("../doc/python_fragments")
sys.path.append(".")
def test_precornerdetect(self):
from precornerdetect import precornerdetect
......@@ -2118,7 +2118,7 @@ class DocumentFragmentTests(OpenCVTests):
def test_findstereocorrespondence(self):
from findstereocorrespondence import findstereocorrespondence
(l,r) = [self.get_sample("doc/pics/tsukuba_%s.png" % c, cv.CV_LOAD_IMAGE_GRAYSCALE) for c in "lr"]
(l,r) = [self.get_sample("samples/cpp/tsukuba_%s.png" % c, cv.CV_LOAD_IMAGE_GRAYSCALE) for c in "lr"]
(disparity_left, disparity_right) = findstereocorrespondence(l, r)
......@@ -2129,7 +2129,7 @@ class DocumentFragmentTests(OpenCVTests):
def test_calchist(self):
from calchist import hs_histogram
i1 = self.get_sample("samples/c/lena.jpg")
i2 = self.get_sample("doc/pics/building.jpg")
i2 = self.get_sample("samples/cpp/building.jpg")
i3 = cv.CloneMat(i1)
cv.Flip(i3, i3, 1)
h1 = hs_histogram(i1)
......
......@@ -205,10 +205,10 @@ In case of point sets, the problem is formulated as follows: you need to find a
when ``fullAffine=false`` .
.. seealso::
:ocv:func:`getAffineTransform`,
:ocv:func:`getPerspectiveTransform`,
:ocv:func:`findHomography`
:ocv:func:`getAffineTransform`,
:ocv:func:`getPerspectiveTransform`,
:ocv:func:`findHomography`
......@@ -242,14 +242,10 @@ That is, MHI pixels where the motion occurs are set to the current ``timestamp``
The function, together with
:ocv:func:`calcMotionGradient` and
:ocv:func:`calcGlobalOrientation` , implements a motion templates technique described in
[Davis97]_
and
[Bradski00]_
.
[Davis97]_ and [Bradski00]_.
See also the OpenCV sample ``motempl.c`` that demonstrates the use of all the motion template functions.
calcMotionGradient
----------------------
Calculates a gradient orientation of a motion history image.
......@@ -267,7 +263,9 @@ Calculates a gradient orientation of a motion history image.
:param orientation: Output motion gradient orientation image that has the same type and the same size as ``mhi`` . Each pixel of the image is a motion orientation, from 0 to 360 degrees.
:param delta1, delta2: Minimum and maximum allowed difference between ``mhi`` values within a pixel neighorhood. That is, the function finds the minimum ( :math:`m(x,y)` ) and maximum ( :math:`M(x,y)` ) ``mhi`` values over :math:`3 \times 3` neighborhood of each pixel and marks the motion orientation at :math:`(x, y)` as valid only if
:param delta1: Minimal (or maximal) allowed difference between ``mhi`` values within a pixel neighorhood.
:param delta2: Maximal (or minimal) allowed difference between ``mhi`` values within a pixel neighorhood. That is, the function finds the minimum ( :math:`m(x,y)` ) and maximum ( :math:`M(x,y)` ) ``mhi`` values over :math:`3 \times 3` neighborhood of each pixel and marks the motion orientation at :math:`(x, y)` as valid only if
.. math::
......@@ -354,6 +352,7 @@ Finds an object center, size, and orientation.
.. ocv:pyfunction:: cv2.CamShift(probImage, window, criteria) -> retval, window
.. ocv:cfunction:: int cvCamShift( const CvArr* probImage, CvRect window, CvTermCriteria criteria, CvConnectedComp* comp, CvBox2D* box=NULL )
.. ocv:pyoldfunction:: cv.CamShift(probImage, window, criteria)-> (int, comp, box)
:param probImage: Back projection of the object histogram. See :ocv:func:`calcBackProject` .
......@@ -501,7 +500,7 @@ BackgroundSubtractor::operator()
--------------------------------
Computes a foreground mask.
.. ocv:function:: virtual void BackgroundSubtractor::operator()(InputArray image, OutputArray fgmask, double learningRate=0)
.. ocv:function:: void BackgroundSubtractor::operator()(InputArray image, OutputArray fgmask, double learningRate=0)
.. ocv:pyfunction:: cv2.BackgroundSubtractor.apply(image[, fgmask[, learningRate]]) -> fgmask
......@@ -514,7 +513,7 @@ BackgroundSubtractor::getBackgroundImage
----------------------------------------
Computes a background image.
.. ocv:function:: virtual void BackgroundSubtractor::getBackgroundImage(OutputArray backgroundImage) const
.. ocv:function:: void BackgroundSubtractor::getBackgroundImage(OutputArray backgroundImage) const
:param backgroundImage: The output background image.
......@@ -559,7 +558,7 @@ BackgroundSubtractorMOG::operator()
-----------------------------------
Updates the background model and returns the foreground mask
.. ocv:function:: virtual void BackgroundSubtractorMOG::operator()(InputArray image, OutputArray fgmask, double learningRate=0)
.. ocv:function:: void BackgroundSubtractorMOG::operator()(InputArray image, OutputArray fgmask, double learningRate=0)
Parameters are the same as in ``BackgroundSubtractor::operator()``
......@@ -636,17 +635,16 @@ BackgroundSubtractorMOG2::operator()
------------------------------------
Updates the background model and computes the foreground mask
.. ocv:function:: virtual void BackgroundSubtractorMOG2::operator()(InputArray image, OutputArray fgmask, double learningRate=-1)
See ``BackgroundSubtractor::operator ()``.
.. ocv:function:: void BackgroundSubtractorMOG2::operator()(InputArray image, OutputArray fgmask, double learningRate=-1)
See :ocv:func:`BackgroundSubtractor::operator()`.
BackgroundSubtractorMOG2::getBackgroundImage
--------------------------------------------
Returns background image
.. ocv:function:: virtual void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage)
.. ocv:function:: void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage)
See :ocv:func:`BackgroundSubtractor::getBackgroundImage`.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment