Commit f61db41d authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #3453 from mshabunin:doxygen-others

parents 46858c43 03e213cc
......@@ -146,46 +146,76 @@ if(BUILD_DOCS AND HAVE_SPHINX)
endif()
# ========= Doxygen docs =========
macro(make_reference result modules_list black_list)
set(_res)
foreach(m ${${modules_list}})
list(FIND ${black_list} ${m} _pos)
if(${_pos} EQUAL -1)
set(_res "${_res} @ref ${m} | ${m} \n")
endif()
endforeach()
set(${result} ${_res})
endmacro()
if(BUILD_DOCS AND HAVE_DOXYGEN)
# documented modules list
set(candidates)
list(APPEND candidates ${BASE_MODULES} ${EXTRA_MODULES})
# blacklisted modules
ocv_list_filterout(candidates "^ts$")
# not documented modules list
list(APPEND blacklist "ts" "java" "python2" "python3" "world")
# gathering headers
set(all_headers) # files and dirs to process
set(all_images) # image search paths
set(reflist) # modules reference
foreach(m ${candidates})
set(reflist "${reflist} \n- @subpage ${m}")
set(all_headers ${all_headers} "${OPENCV_MODULE_opencv_${m}_HEADERS}")
set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
if(EXISTS ${docs_dir})
set(all_images ${all_images} ${docs_dir})
set(all_headers ${all_headers} ${docs_dir})
set(paths_include)
set(paths_doc)
set(paths_bib)
set(deps)
foreach(m ${BASE_MODULES} ${EXTRA_MODULES})
list(FIND blacklist ${m} _pos)
if(${_pos} EQUAL -1)
# include folder
set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include")
if(EXISTS "${header_dir}")
list(APPEND paths_include "${header_dir}")
list(APPEND deps ${header_dir})
endif()
# doc folder
set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
if(EXISTS "${docs_dir}")
list(APPEND paths_doc "${docs_dir}")
list(APPEND deps ${docs_dir})
endif()
# BiBTeX file
set(bib_file "${docs_dir}/${m}.bib")
if(EXISTS "${bib_file}")
set(paths_bib "${paths_bib} ${bib_file}")
list(APPEND deps ${bib_file})
endif()
endif()
endforeach()
# additional config
set(doxyfile "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile")
set(rootfile "${CMAKE_CURRENT_BINARY_DIR}/root.markdown")
set(all_headers ${all_headers} ${rootfile})
string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_INPUT_LIST "${all_headers}")
string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_IMAGE_PATH "${all_images}")
set(bibfile "${CMAKE_CURRENT_SOURCE_DIR}/opencv.bib")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${paths_include} ; ${paths_doc}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp ; ${paths_doc}")
set(CMAKE_DOXYGEN_LAYOUT "${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml")
set(CMAKE_DOXYGEN_OUTPUT_PATH "doxygen")
set(CMAKE_DOXYGEN_MODULES_REFERENCE "${reflist}")
set(CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp")
set(CMAKE_EXTRA_BIB_FILES "${bibfile} ${paths_bib}")
# generate references
make_reference(CMAKE_DOXYGEN_MAIN_REFERENCE BASE_MODULES blacklist)
make_reference(CMAKE_DOXYGEN_EXTRA_REFERENCE EXTRA_MODULES blacklist)
# writing file
configure_file(Doxyfile.in ${doxyfile} @ONLY)
configure_file(root.markdown.in ${rootfile} @ONLY)
configure_file(mymath.sty "${CMAKE_DOXYGEN_OUTPUT_PATH}/html/mymath.sty" @ONLY)
configure_file(mymath.sty "${CMAKE_DOXYGEN_OUTPUT_PATH}/latex/mymath.sty" @ONLY)
add_custom_target(doxygen
COMMAND ${DOXYGEN_BUILD} ${doxyfile}
DEPENDS ${doxyfile} ${all_headers} ${all_images})
DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps})
endif()
if(HAVE_DOC_GENERATOR)
......
......@@ -85,7 +85,7 @@ SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE = @CMAKE_DOXYGEN_LAYOUT@
CITE_BIB_FILES = @CMAKE_CURRENT_SOURCE_DIR@/opencv.bib
CITE_BIB_FILES = @CMAKE_EXTRA_BIB_FILES@
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
......@@ -99,7 +99,7 @@ FILE_PATTERNS =
RECURSIVE = YES
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS =
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp
EXCLUDE_SYMBOLS = cv::DataType<*> int
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = *
......@@ -119,7 +119,7 @@ REFERENCES_LINK_SOURCE = YES
SOURCE_TOOLTIPS = YES
USE_HTAGS = NO
VERBATIM_HEADERS = NO
ALPHABETICAL_INDEX = NO
ALPHABETICAL_INDEX = YES
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
GENERATE_HTML = YES
......@@ -222,6 +222,7 @@ INCLUDE_FILE_PATTERNS =
PREDEFINED = __cplusplus=1 \
HAVE_IPP_A=1 \
CVAPI(x)=x \
CV_DOXYGEN= \
CV_EXPORTS= \
CV_EXPORTS_W= \
CV_EXPORTS_W_SIMPLE= \
......@@ -241,7 +242,8 @@ PREDEFINED = __cplusplus=1 \
CV_INLINE= \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_NEON=1
CV_NEON=1 \
FLANN_DEPRECATED=
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =
......
# doxygen citelist build workaround
citelist : .*Unexpected new line character.*
......@@ -8,7 +8,8 @@ MathJax.Hub.Config(
forkthree: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ #5 & \\mbox{#6}\\\\ \\end{array} \\right.", 6],
vecthree: ["\\begin{bmatrix} #1\\\\ #2\\\\ #3 \\end{bmatrix}", 3],
vecthreethree: ["\\begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \\end{bmatrix}", 9],
hdotsfor: ["\\dots", 1]
hdotsfor: ["\\dots", 1],
mathbbm: ["\\mathbb{#1}", 1]
}
}
}
......
......@@ -3,6 +3,7 @@
\usepackage{euler}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{bbm}
\newcommand{\matTT}[9]{
\[
......
This diff is collapsed.
......@@ -3,9 +3,14 @@ OpenCV modules {#mainpage}
@subpage intro
Module name | Folder
------------- | -------------
@ref core | core
@ref imgproc | imgproc
### Main modules
<!-- @CMAKE_DOXYGEN_MODULES_REFERENCE@ -->
Module name | Folder
-------------- | -------------
@CMAKE_DOXYGEN_MAIN_REFERENCE@
### Extra modules
Module name | Folder
-------------- | -------------
@CMAKE_DOXYGEN_EXTRA_REFERENCE@
......@@ -3,6 +3,12 @@
#include <camera_properties.h>
/** @defgroup androidcamera Android Camera Support
*/
//! @addtogroup androidcamera
//! @{
class CameraActivity
{
public:
......@@ -44,4 +50,6 @@ private:
int frameHeight;
};
//! @}
#endif
#ifndef CAMERA_PROPERTIES_H
#define CAMERA_PROPERTIES_H
//! @addtogroup androidcamera
//! @{
enum {
ANDROID_CAMERA_PROPERTY_FRAMEWIDTH = 0,
ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT = 1,
......@@ -67,4 +70,6 @@ enum {
ANDROID_CAMERA_FOCUS_DISTANCE_FAR_INDEX
};
//! @}
#endif // CAMERA_PROPERTIES_H
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -50,6 +50,10 @@
extern "C" {
#endif
/** @addtogroup calib3d_c
@{
*/
/****************************************************************************************\
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
......@@ -371,6 +375,8 @@ CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
/** @} calib3d_c */
#ifdef __cplusplus
} // extern "C"
......
......@@ -75,6 +75,9 @@
@defgroup core_opengl OpenGL interoperability
@defgroup core_ipp Intel IPP Asynchronous C/C++ Converters
@defgroup core_optim Optimization Algorithms
@defgroup core_directx DirectX interoperability
@defgroup core_eigen Eigen support
@defgroup core_opencl OpenCL support
@}
*/
......
This diff is collapsed.
......@@ -66,6 +66,11 @@ namespace cv
class Stream;
class Event;
/** @brief Class that enables getting cudaStream_t from cuda::Stream
because it is the only public header that depends on the CUDA Runtime API. Including it
brings a dependency to your code.
*/
struct StreamAccessor
{
CV_EXPORTS static cudaStream_t getStream(const Stream& stream);
......
......@@ -89,6 +89,11 @@ namespace cv
size_t size;
};
/** @brief Structure similar to cuda::PtrStepSz but containing only a pointer and row step.
Width and height fields are excluded due to performance reasons. The structure is intended
for internal use or for users who write device code.
*/
template <typename T> struct PtrStep : public DevPtr<T>
{
__CV_CUDA_HOST_DEVICE__ PtrStep() : step(0) {}
......@@ -104,6 +109,12 @@ namespace cv
__CV_CUDA_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
/** @brief Lightweight class encapsulating pitched memory on a GPU and passed to nvcc-compiled code (CUDA
kernels).
Typically, it is used internally by OpenCV and by users who write device code. You can call
its members from both host and device code.
*/
template <typename T> struct PtrStepSz : public PtrStep<T>
{
__CV_CUDA_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
......
CUDA Module Introduction {#cuda_intro}
========================
General Information
-------------------
The OpenCV CUDA module is a set of classes and functions to utilize CUDA computational capabilities.
It is implemented using NVIDIA\* CUDA\* Runtime API and supports only NVIDIA GPUs. The OpenCV CUDA
module includes utility functions, low-level vision primitives, and high-level algorithms. The
utility functions and low-level primitives provide a powerful infrastructure for developing fast
vision algorithms taking advantage of CUDA whereas the high-level functionality includes some
state-of-the-art algorithms (such as stereo correspondence, face and people detectors, and others)
ready to be used by the application developers.
The CUDA module is designed as a host-level API. This means that if you have pre-compiled OpenCV
CUDA binaries, you are not required to have the CUDA Toolkit installed or write any extra code to
make use of the CUDA.
The OpenCV CUDA module is designed for ease of use and does not require any knowledge of CUDA.
Though, such a knowledge will certainly be useful to handle non-trivial cases or achieve the highest
performance. It is helpful to understand the cost of various operations, what the GPU does, what the
preferred data formats are, and so on. The CUDA module is an effective instrument for quick
implementation of CUDA-accelerated computer vision algorithms. However, if your algorithm involves
many simple operations, then, for the best possible performance, you may still need to write your
own kernels to avoid extra write and read operations on the intermediate results.
To enable CUDA support, configure OpenCV using CMake with WITH\_CUDA=ON . When the flag is set and
if CUDA is installed, the full-featured OpenCV CUDA module is built. Otherwise, the module is still
built but at runtime all functions from the module throw Exception with CV\_GpuNotSupported error
code, except for cuda::getCudaEnabledDeviceCount(). The latter function returns zero GPU count in
this case. Building OpenCV without CUDA support does not perform device code compilation, so it does
not require the CUDA Toolkit installed. Therefore, using the cuda::getCudaEnabledDeviceCount()
function, you can implement a high-level algorithm that will detect GPU presence at runtime and
choose an appropriate implementation (CPU or GPU) accordingly.
Compilation for Different NVIDIA\* Platforms
--------------------------------------------
NVIDIA\* compiler enables generating binary code (cubin and fatbin) and intermediate code (PTX).
Binary code often implies a specific GPU architecture and generation, so the compatibility with
other GPUs is not guaranteed. PTX is targeted for a virtual platform that is defined entirely by the
set of capabilities or features. Depending on the selected virtual platform, some of the
instructions are emulated or disabled, even if the real hardware supports all the features.
At the first call, the PTX code is compiled to binary code for the particular GPU using a JIT
compiler. When the target GPU has a compute capability (CC) lower than the PTX code, JIT fails. By
default, the OpenCV CUDA module includes:
\*
Binaries for compute capabilities 1.3 and 2.0 (controlled by CUDA\_ARCH\_BIN in CMake)
\*
PTX code for compute capabilities 1.1 and 1.3 (controlled by CUDA\_ARCH\_PTX in CMake)
This means that for devices with CC 1.3 and 2.0 binary images are ready to run. For all newer
platforms, the PTX code for 1.3 is JIT'ed to a binary image. For devices with CC 1.1 and 1.2, the
PTX for 1.1 is JIT'ed. For devices with CC 1.0, no code is available and the functions throw
Exception. For platforms where JIT compilation is performed first, the run is slow.
On a GPU with CC 1.0, you can still compile the CUDA module and most of the functions will run
flawlessly. To achieve this, add "1.0" to the list of binaries, for example,
CUDA\_ARCH\_BIN="1.0 1.3 2.0" . The functions that cannot be run on CC 1.0 GPUs throw an exception.
You can always determine at runtime whether the OpenCV GPU-built binaries (or PTX code) are
compatible with your GPU. The function cuda::DeviceInfo::isCompatible returns the compatibility
status (true/false).
Utilizing Multiple GPUs
-----------------------
In the current version, each of the OpenCV CUDA algorithms can use only a single GPU. So, to utilize
multiple GPUs, you have to manually distribute the work between GPUs. Switching active devie can be
done using cuda::setDevice() function. For more details please read Cuda C Programming Guide.
While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions
and small images, it can be significant, which may eliminate all the advantages of having multiple
GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo
Block Matching algorithm has been successfully parallelized using the following algorithm:
1. Split each image of the stereo pair into two horizontal overlapping stripes.
2. Process each pair of stripes (from the left and right images) on a separate Fermi\* GPU.
3. Merge the results into a single disparity map.
With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU.
For a source code example, see <https://github.com/Itseez/opencv/tree/master/samples/gpu/>.
This diff is collapsed.
......@@ -50,11 +50,33 @@
#include "opencv2/core/cuda.hpp"
#include "opencv2/video/background_segm.hpp"
/**
@addtogroup cuda
@{
@defgroup cudabgsegm Background Segmentation
@}
*/
namespace cv { namespace cuda {
//! @addtogroup cudabgsegm
//! @{
////////////////////////////////////////////////////
// MOG
/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
The class discriminates between foreground and background pixels by building and maintaining a model
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
class implements algorithm described in @cite MOG2001 .
@sa BackgroundSubtractorMOG
@note
- An example on gaussian mixture based background/foreground segmantation can be found at
opencv_source_code/samples/gpu/bgfg_segm.cpp
*/
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
{
public:
......@@ -78,6 +100,14 @@ public:
virtual void setNoiseSigma(double noiseSigma) = 0;
};
/** @brief Creates mixture-of-gaussian background subtractor
@param history Length of the history.
@param nmixtures Number of Gaussian mixtures.
@param backgroundRatio Background ratio.
@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
means some automatic value.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history = 200, int nmixtures = 5,
double backgroundRatio = 0.7, double noiseSigma = 0);
......@@ -85,6 +115,14 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG>
////////////////////////////////////////////////////
// MOG2
/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
The class discriminates between foreground and background pixels by building and maintaining a model
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
class implements algorithm described in @cite Zivkovic2004 .
@sa BackgroundSubtractorMOG2
*/
class CV_EXPORTS BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
{
public:
......@@ -96,6 +134,15 @@ public:
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
};
/** @brief Creates MOG2 Background Subtractor
@param history Length of the history.
@param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
to decide whether a pixel is well described by the background model. This parameter does not
affect the background update.
@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
bool detectShadows = true);
......@@ -103,6 +150,12 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
////////////////////////////////////////////////////
// GMG
/** @brief Background/Foreground Segmentation Algorithm.
The class discriminates between foreground and background pixels by building and maintaining a model
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
class implements algorithm described in @cite Gold2012 .
*/
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
{
public:
......@@ -140,54 +193,71 @@ public:
virtual void setMaxVal(double val) = 0;
};
/** @brief Creates GMG Background Subtractor
@param initializationFrames Number of frames of video to use to initialize histograms.
@param decisionThreshold Value above which pixel is determined to be FG.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
////////////////////////////////////////////////////
// FGD
/**
* Foreground Object Detection from Videos Containing Complex Background.
* Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
* ACM MM2003 9p
/** @brief The class discriminates between foreground and background pixels by building and maintaining a model
of the background.
Any pixel which does not fit this model is then deemed to be foreground. The class implements
algorithm described in @cite FGD2003 .
@sa BackgroundSubtractor
*/
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
/** @brief Returns the output foreground regions calculated by findContours.
@param foreground_regions Output array (CPU memory).
*/
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
struct CV_EXPORTS FGDParams
{
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
// Used to allow the first N1c vectors to adapt over time to changing background.
int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
//!< Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
// Used to allow the first N1cc vectors to adapt over time to changing background.
int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
//!< Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
//!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
float delta; //!< Affects color and color co-occurrence quantization, typically set to 2.
float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
// default Params
//! default Params
FGDParams();
};
/** @brief Creates FGD Background Subtractor
@param params Algorithm's parameters. See @cite FGD2003 for explanation.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorFGD>
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
//! @}
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDABGSEGM_HPP__ */
......@@ -49,4 +49,11 @@
#include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
#include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp"
/**
@addtogroup cuda
@{
@defgroup cudalegacy Legacy support
@}
*/
#endif /* __OPENCV_CUDALEGACY_HPP__ */
......@@ -60,6 +60,8 @@
//
//==============================================================================
//! @addtogroup cudalegacy
//! @{
/**
* Compile-time assert namespace
......@@ -203,6 +205,7 @@ struct NcvPoint2D32u
__host__ __device__ NcvPoint2D32u(Ncv32u x_, Ncv32u y_) : x(x_), y(y_) {}
};
//! @cond IGNORED
NCV_CT_ASSERT(sizeof(NcvBool) <= 4);
NCV_CT_ASSERT(sizeof(Ncv64s) == 8);
......@@ -221,6 +224,7 @@ NCV_CT_ASSERT(sizeof(NcvRect32u) == 4 * sizeof(Ncv32u));
NCV_CT_ASSERT(sizeof(NcvSize32u) == 2 * sizeof(Ncv32u));
NCV_CT_ASSERT(sizeof(NcvPoint2D32u) == 2 * sizeof(Ncv32u));
//! @endcond
//==============================================================================
//
......@@ -1023,6 +1027,6 @@ CV_EXPORTS NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst, Ncv32u dstStride, Nc
NCVMatrixAlloc<type> name(alloc, width, height); \
ncvAssertReturn(name.isMemAllocated(), err);
//! @}
#endif // _ncv_hpp_
......@@ -62,6 +62,9 @@
#include "opencv2/cudalegacy/NCV.hpp"
//! @addtogroup cudalegacy
//! @{
/// \brief Model and solver parameters
struct NCVBroxOpticalFlowDescriptor
{
......@@ -89,6 +92,7 @@ struct NCVBroxOpticalFlowDescriptor
/// \param [in] frame1 frame to track
/// \param [out] u flow horizontal component (along \b x axis)
/// \param [out] v flow vertical component (along \b y axis)
/// \param stream
/// \return computation status
/////////////////////////////////////////////////////////////////////////////////////////
......@@ -101,4 +105,6 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
NCVMatrix<Ncv32f> &v,
cudaStream_t stream);
//! @}
#endif
......@@ -61,6 +61,8 @@
#include "opencv2/cudalegacy/NCV.hpp"
//! @addtogroup cudalegacy
//! @{
//==============================================================================
//
......@@ -456,6 +458,6 @@ CV_EXPORTS NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures);
//! @}
#endif // _ncvhaarobjectdetection_hpp_
......@@ -48,6 +48,8 @@
#include "opencv2/cudalegacy/NCV.hpp"
#include "opencv2/core/cuda/common.hpp"
//! @cond IGNORED
namespace cv { namespace cuda { namespace device
{
namespace pyramid
......@@ -106,4 +108,6 @@ private:
#endif //_WIN32
//! @endcond
#endif //_ncvpyramid_hpp_
......@@ -45,19 +45,14 @@
#include "opencv2/cudalegacy/NCV.hpp"
/**
* \file NPP_staging.hpp
* NPP Staging Library
*/
//! @addtogroup cudalegacy
//! @{
/** \defgroup core_npp NPPST Core
* Basic functions for CUDA streams management.
* @{
*/
/**
* Gets an active CUDA stream used by NPPST
* NOT THREAD SAFE
......@@ -168,6 +163,7 @@ NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState);
* \param nSrcStep [IN] Source image line step
* \param pDst [OUT] Destination image pointer (CUDA device memory)
* \param dstSize [OUT] Destination image size
* \param nDstStep
* \param oROI [IN] Region of interest in the source image
* \param borderType [IN] Type of border
* \param pKernel [IN] Pointer to row kernel values (CUDA device memory)
......@@ -201,6 +197,7 @@ NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc,
* \param nSrcStep [IN] Source image line step
* \param pDst [OUT] Destination image pointer (CUDA device memory)
* \param dstSize [OUT] Destination image size
* \param nDstStep [IN]
* \param oROI [IN] Region of interest in the source image
* \param borderType [IN] Type of border
* \param pKernel [IN] Pointer to column kernel values (CUDA device memory)
......@@ -228,7 +225,7 @@ NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc,
/** Size of buffer required for vector image warping.
*
* \param srcSize [IN] Source image size
* \param nStep [IN] Source image line step
* \param nSrcStep [IN] Source image line step
* \param hpSize [OUT] Where to store computed size (host memory)
*
* \return NCV status code
......@@ -285,6 +282,7 @@ NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc,
* \param pU [IN] Pointer to horizontal displacement field (CUDA device memory)
* \param pV [IN] Pointer to vertical displacement field (CUDA device memory)
* \param nVFStep [IN] Displacement field line step
* \param pBuffer
* \param timeScale [IN] Value by which displacement field will be scaled for warping
* \param pDst [OUT] Destination image pointer (CUDA device memory)
*
......@@ -903,5 +901,6 @@ NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen,
/*@}*/
//! @}
#endif // _npp_staging_hpp_
......@@ -56,6 +56,8 @@
#include "opencv2/cudalegacy.hpp"
//! @cond IGNORED
namespace cv { namespace cuda
{
class NppStStreamHandler
......@@ -89,4 +91,6 @@ namespace cv { namespace cuda
#define ncvSafeCall(expr) cv::cuda::checkNcvError(expr, __FILE__, __LINE__, CV_Func)
//! @endcond
#endif // __OPENCV_CORE_CUDALEGACY_PRIVATE_HPP__
......@@ -49,8 +49,21 @@
#include "opencv2/core/cuda.hpp"
/**
@addtogroup cuda
@{
@defgroup cudaoptflow Optical Flow
@}
*/
namespace cv { namespace cuda {
//! @addtogroup cudaoptflow
//! @{
/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm
(@cite Brox2004). :
*/
class CV_EXPORTS BroxOpticalFlow
{
public:
......@@ -88,16 +101,58 @@ public:
GpuMat buf;
};
/** @brief Class used for calculating an optical flow.
The class can calculate an optical flow for a sparse feature set or dense optical flow using the
iterative Lucas-Kanade method with pyramids.
@sa calcOpticalFlowPyrLK
@note
- An example of the Lucas Kanade optical flow algorithm can be found at
opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
*/
class CV_EXPORTS PyrLKOpticalFlow
{
public:
PyrLKOpticalFlow();
/** @brief Calculate an optical flow for a sparse feature set.
@param prevImg First 8-bit input image (supports both grayscale and color images).
@param nextImg Second input image of the same size and the same type as prevImg .
@param prevPts Vector of 2D points for which the flow needs to be found. It must be one row matrix
with CV_32FC2 type.
@param nextPts Output vector of 2D points (with single-precision floating-point coordinates)
containing the calculated new positions of input features in the second image. When useInitialFlow
is true, the vector must have the same size as in the input.
@param status Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the
flow for the corresponding features has been found. Otherwise, it is set to 0.
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
@sa calcOpticalFlowPyrLK
*/
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err = 0);
/** @brief Calculate dense optical flow.
@param prevImg First 8-bit grayscale input image.
@param nextImg Second input image of the same size and the same type as prevImg .
@param u Horizontal component of the optical flow of the same size as input images, 32-bit
floating-point, single-channel
@param v Vertical component of the optical flow of the same size as input images, 32-bit
floating-point, single-channel
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
*/
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
/** @brief Releases inner buffers memory.
*/
void releaseMemory();
Size winSize;
......@@ -115,6 +170,8 @@ private:
GpuMat vPyr_[2];
};
/** @brief Class computing a dense optical flow using the Gunnar Farneback’s algorithm. :
*/
class CV_EXPORTS FarnebackOpticalFlow
{
public:
......@@ -139,8 +196,20 @@ public:
double polySigma;
int flags;
/** @brief Computes a dense optical flow using the Gunnar Farneback’s algorithm.
@param frame0 First 8-bit gray-scale input image
@param frame1 Second 8-bit gray-scale input image
@param flowx Flow horizontal component
@param flowy Flow vertical component
@param s Stream
@sa calcOpticalFlowFarneback
*/
void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null());
/** @brief Releases unused auxiliary memory buffers.
*/
void releaseMemory()
{
frames_[0].release();
......@@ -295,20 +364,22 @@ private:
GpuMat extended_I1;
};
//! Interpolate frames (images) using provided optical flow (displacement field).
//! frame0 - frame 0 (32-bit floating point images, single channel)
//! frame1 - frame 1 (the same type and size)
//! fu - forward horizontal displacement
//! fv - forward vertical displacement
//! bu - backward horizontal displacement
//! bv - backward vertical displacement
//! pos - new frame position
//! newFrame - new frame
//! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat;
//! occlusion masks 0, occlusion masks 1,
//! interpolated forward flow 0, interpolated forward flow 1,
//! interpolated backward flow 0, interpolated backward flow 1
//!
/** @brief Interpolates frames (images) using provided optical flow (displacement field).
@param frame0 First frame (32-bit floating point images, single channel).
@param frame1 Second frame. Must have the same type and size as frame0 .
@param fu Forward horizontal displacement.
@param fv Forward vertical displacement.
@param bu Backward horizontal displacement.
@param bv Backward vertical displacement.
@param pos New frame position.
@param newFrame Output image.
@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
interpolated backward vertical flow.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
const GpuMat& fu, const GpuMat& fv,
const GpuMat& bu, const GpuMat& bv,
......@@ -317,6 +388,8 @@ CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
//! @}
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDAOPTFLOW_HPP__ */
......@@ -109,4 +109,11 @@
#include "cudev/expr/unary_op.hpp"
#include "cudev/expr/warping.hpp"
/**
@addtogroup cuda
@{
@defgroup cudev Device layer
@}
*/
#endif
......@@ -50,6 +50,9 @@
namespace cv { namespace cudev {
//! @addtogroup cudev
//! @{
struct Block
{
__device__ __forceinline__ static uint blockId()
......@@ -122,6 +125,9 @@ __device__ __forceinline__ static void blockTransfrom(InIt1 beg1, InIt1 end1, In
for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE)
*o = op(*t1, *t2);
}
//! @}
}}
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment