Commit 90f47eb9 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #12234 from cv3d:python/cuda/wrapping_functionalities

parents 15e57d28 532eace7
This diff is collapsed.
......@@ -4,51 +4,27 @@
typedef std::vector<cuda::GpuMat> vector_GpuMat;
typedef cuda::GpuMat::Allocator GpuMat_Allocator;
typedef cuda::HostMem::AllocType HostMem_AllocType;
typedef cuda::Event::CreateFlags Event_CreateFlags;
template<> bool pyopencv_to(PyObject* o, Ptr<cuda::GpuMat>& m, const char* name);
template<> PyObject* pyopencv_from(const Ptr<cuda::GpuMat>& m);
template<>
bool pyopencv_to(PyObject* o, cuda::GpuMat& m, const char* name)
{
if (!o || o == Py_None)
return true;
Ptr<cuda::GpuMat> mPtr(new cuda::GpuMat());
if (!pyopencv_to(o, mPtr, name)) return false;
m = *mPtr;
return true;
}
template<>
PyObject* pyopencv_from(const cuda::GpuMat& m)
{
Ptr<cuda::GpuMat> mPtr(new cuda::GpuMat());
*mPtr = m;
return pyopencv_from(mPtr);
}
template<>
bool pyopencv_to(PyObject *o, cuda::GpuMat::Allocator* &allocator, const char *name)
{
(void)name;
if (!o || o == Py_None)
return true;
failmsg("Python binding for cv::cuda::GpuMat::Allocator is not implemented yet.");
return false;
}
template<>
bool pyopencv_to(PyObject *o, cuda::Stream &stream, const char *name)
{
(void)name;
if (!o || o == Py_None)
return true;
failmsg("Python binding for cv::cuda::Stream is not implemented yet.");
return false;
}
CV_PY_TO_CLASS(cuda::GpuMat);
CV_PY_TO_CLASS(cuda::Stream);
CV_PY_TO_CLASS(cuda::Event);
CV_PY_TO_CLASS(cuda::HostMem);
CV_PY_TO_CLASS_PTR(cuda::GpuMat);
CV_PY_TO_CLASS_PTR(cuda::GpuMat::Allocator);
CV_PY_TO_ENUM(cuda::Event::CreateFlags);
CV_PY_TO_ENUM(cuda::HostMem::AllocType);
CV_PY_TO_ENUM(cuda::FeatureSet);
CV_PY_FROM_CLASS(cuda::GpuMat);
CV_PY_FROM_CLASS(cuda::Stream);
CV_PY_FROM_CLASS(cuda::HostMem);
CV_PY_FROM_CLASS_PTR(cuda::GpuMat::Allocator);
CV_PY_FROM_ENUM(cuda::DeviceInfo::ComputeMode);
#endif
......@@ -6,7 +6,7 @@ set(the_description "CUDA-accelerated Operations on Matrices")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
ocv_add_module(cudaarithm opencv_core OPTIONAL opencv_cudev)
ocv_add_module(cudaarithm opencv_core OPTIONAL opencv_cudev WRAP python)
ocv_module_include_directories()
ocv_glob_module_sources()
......
......@@ -77,27 +77,27 @@ class implements algorithm described in @cite MOG2001 .
- An example on gaussian mixture based background/foreground segmantation can be found at
opencv_source_code/samples/gpu/bgfg_segm.cpp
*/
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
class CV_EXPORTS_W BackgroundSubtractorMOG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
using cv::BackgroundSubtractor::getBackgroundImage;
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
virtual int getHistory() const = 0;
virtual void setHistory(int nframes) = 0;
CV_WRAP virtual int getHistory() const = 0;
CV_WRAP virtual void setHistory(int nframes) = 0;
virtual int getNMixtures() const = 0;
virtual void setNMixtures(int nmix) = 0;
CV_WRAP virtual int getNMixtures() const = 0;
CV_WRAP virtual void setNMixtures(int nmix) = 0;
virtual double getBackgroundRatio() const = 0;
virtual void setBackgroundRatio(double backgroundRatio) = 0;
CV_WRAP virtual double getBackgroundRatio() const = 0;
CV_WRAP virtual void setBackgroundRatio(double backgroundRatio) = 0;
virtual double getNoiseSigma() const = 0;
virtual void setNoiseSigma(double noiseSigma) = 0;
CV_WRAP virtual double getNoiseSigma() const = 0;
CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;
};
/** @brief Creates mixture-of-gaussian background subtractor
......@@ -108,7 +108,7 @@ public:
@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
means some automatic value.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG>
CV_EXPORTS_W Ptr<cuda::BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history = 200, int nmixtures = 5,
double backgroundRatio = 0.7, double noiseSigma = 0);
......@@ -123,15 +123,15 @@ class implements algorithm described in @cite Zivkovic2004 .
@sa BackgroundSubtractorMOG2
*/
class CV_EXPORTS BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
class CV_EXPORTS_W BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG2
{
public:
using cv::BackgroundSubtractorMOG2::apply;
using cv::BackgroundSubtractorMOG2::getBackgroundImage;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0;
};
/** @brief Creates MOG2 Background Subtractor
......@@ -143,7 +143,7 @@ affect the background update.
@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
CV_EXPORTS_W Ptr<cuda::BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
bool detectShadows = true);
......
......@@ -6,7 +6,7 @@ set(the_description "CUDA-accelerated Video Encoding/Decoding")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wshadow)
ocv_add_module(cudacodec opencv_core opencv_videoio OPTIONAL opencv_cudev)
ocv_add_module(cudacodec opencv_core opencv_videoio OPTIONAL opencv_cudev WRAP python)
ocv_module_include_directories()
ocv_glob_module_sources()
......
......@@ -80,7 +80,7 @@ enum SurfaceFormat
/** @brief Different parameters for CUDA video encoder.
*/
struct CV_EXPORTS EncoderParams
struct CV_EXPORTS_W EncoderParams
{
int P_Interval; //!< NVVE_P_INTERVAL,
int IDR_Period; //!< NVVE_IDR_PERIOD,
......@@ -125,7 +125,7 @@ struct CV_EXPORTS EncoderParams
/** @brief Callbacks for CUDA video encoder.
*/
class CV_EXPORTS EncoderCallBack
class CV_EXPORTS_W EncoderCallBack
{
public:
enum PicType
......@@ -152,14 +152,14 @@ public:
@param frameNumber
@param picType Specify frame type (I-Frame, P-Frame or B-Frame).
*/
virtual void onBeginFrame(int frameNumber, PicType picType) = 0;
CV_WRAP virtual void onBeginFrame(int frameNumber, EncoderCallBack::PicType picType) = 0;
/** @brief Callback function signals that the encoding operation on the frame has finished.
@param frameNumber
@param picType Specify frame type (I-Frame, P-Frame or B-Frame).
*/
virtual void onEndFrame(int frameNumber, PicType picType) = 0;
CV_WRAP virtual void onEndFrame(int frameNumber, EncoderCallBack::PicType picType) = 0;
};
/** @brief Video writer interface.
......@@ -172,7 +172,7 @@ The implementation uses H264 video codec.
- An example on how to use the videoWriter class can be found at
opencv_source_code/samples/gpu/video_writer.cpp
*/
class CV_EXPORTS VideoWriter
class CV_EXPORTS_W VideoWriter
{
public:
virtual ~VideoWriter() {}
......@@ -185,9 +185,9 @@ public:
The method write the specified image to video file. The image must have the same size and the same
surface format as has been specified when opening the video writer.
*/
virtual void write(InputArray frame, bool lastFrame = false) = 0;
CV_WRAP virtual void write(InputArray frame, bool lastFrame = false) = 0;
virtual EncoderParams getEncoderParams() const = 0;
CV_WRAP virtual EncoderParams getEncoderParams() const = 0;
};
/** @brief Creates video writer.
......@@ -202,7 +202,7 @@ encoding, frames with other formats will be used as is.
The constructors initialize video writer. FFMPEG is used to write videos. User can implement own
multiplexing with cudacodec::EncoderCallBack .
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
/** @overload
@param fileName Name of the output video file. Only AVI file format is supported.
@param frameSize Size of the input video frames.
......@@ -212,7 +212,7 @@ CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frame
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
/** @overload
@param encoderCallback Callbacks for video encoder. See cudacodec::EncoderCallBack . Use it if you
......@@ -223,7 +223,7 @@ want to work with raw video stream.
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
/** @overload
@param encoderCallback Callbacks for video encoder. See cudacodec::EncoderCallBack . Use it if you
want to work with raw video stream.
......@@ -234,7 +234,7 @@ want to work with raw video stream.
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
CV_EXPORTS_W Ptr<cudacodec::VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
////////////////////////////////// Video Decoding //////////////////////////////////////////
......@@ -284,7 +284,7 @@ struct FormatInfo
- An example on how to use the videoReader class can be found at
opencv_source_code/samples/gpu/video_reader.cpp
*/
class CV_EXPORTS VideoReader
class CV_EXPORTS_W VideoReader
{
public:
virtual ~VideoReader() {}
......@@ -294,7 +294,7 @@ public:
If no frames has been grabbed (there are no more frames in video file), the methods return false .
The method throws Exception if error occurs.
*/
virtual bool nextFrame(OutputArray frame) = 0;
CV_WRAP virtual bool nextFrame(OutputArray frame) = 0;
/** @brief Returns information about video file format.
*/
......@@ -305,7 +305,7 @@ public:
User can implement own demultiplexing by implementing this interface.
*/
class CV_EXPORTS RawVideoSource
class CV_EXPORTS_W RawVideoSource
{
public:
virtual ~RawVideoSource() {}
......@@ -329,11 +329,11 @@ public:
FFMPEG is used to read videos. User can implement own demultiplexing with cudacodec::RawVideoSource
*/
CV_EXPORTS Ptr<VideoReader> createVideoReader(const String& filename);
CV_EXPORTS_W Ptr<VideoReader> createVideoReader(const String& filename);
/** @overload
@param source RAW video source implemented by user.
*/
CV_EXPORTS Ptr<VideoReader> createVideoReader(const Ptr<RawVideoSource>& source);
CV_EXPORTS_W Ptr<VideoReader> createVideoReader(const Ptr<RawVideoSource>& source);
//! @}
......
#ifdef HAVE_OPENCV_CUDACODEC
#include "opencv2/cudacodec.hpp"
typedef cudacodec::EncoderCallBack::PicType EncoderCallBack_PicType;
CV_PY_TO_CLASS(cudacodec::EncoderParams);
CV_PY_TO_ENUM(cudacodec::EncoderCallBack::PicType);
CV_PY_TO_ENUM(cudacodec::SurfaceFormat);
CV_PY_FROM_CLASS(cudacodec::EncoderParams);
#endif
......@@ -75,7 +75,7 @@ namespace cv { namespace cuda {
- (Python) An example applying the HOG descriptor for people detection can be found at
opencv_source_code/samples/python/peopledetect.py
*/
class CV_EXPORTS HOG : public Algorithm
class CV_EXPORTS_W HOG : public Algorithm
{
public:
enum
......@@ -92,70 +92,70 @@ public:
@param cell_size Cell size. Only (8, 8) is supported for now.
@param nbins Number of bins. Only 9 bins per cell are supported for now.
*/
static Ptr<HOG> create(Size win_size = Size(64, 128),
CV_WRAP static Ptr<HOG> create(Size win_size = Size(64, 128),
Size block_size = Size(16, 16),
Size block_stride = Size(8, 8),
Size cell_size = Size(8, 8),
int nbins = 9);
//! Gaussian smoothing window parameter.
virtual void setWinSigma(double win_sigma) = 0;
virtual double getWinSigma() const = 0;
CV_WRAP virtual void setWinSigma(double win_sigma) = 0;
CV_WRAP virtual double getWinSigma() const = 0;
//! L2-Hys normalization method shrinkage.
virtual void setL2HysThreshold(double threshold_L2hys) = 0;
virtual double getL2HysThreshold() const = 0;
CV_WRAP virtual void setL2HysThreshold(double threshold_L2hys) = 0;
CV_WRAP virtual double getL2HysThreshold() const = 0;
//! Flag to specify whether the gamma correction preprocessing is required or not.
virtual void setGammaCorrection(bool gamma_correction) = 0;
virtual bool getGammaCorrection() const = 0;
CV_WRAP virtual void setGammaCorrection(bool gamma_correction) = 0;
CV_WRAP virtual bool getGammaCorrection() const = 0;
//! Maximum number of detection window increases.
virtual void setNumLevels(int nlevels) = 0;
virtual int getNumLevels() const = 0;
CV_WRAP virtual void setNumLevels(int nlevels) = 0;
CV_WRAP virtual int getNumLevels() const = 0;
//! Threshold for the distance between features and SVM classifying plane.
//! Usually it is 0 and should be specified in the detector coefficients (as the last free
//! coefficient). But if the free coefficient is omitted (which is allowed), you can specify it
//! manually here.
virtual void setHitThreshold(double hit_threshold) = 0;
virtual double getHitThreshold() const = 0;
CV_WRAP virtual void setHitThreshold(double hit_threshold) = 0;
CV_WRAP virtual double getHitThreshold() const = 0;
//! Window stride. It must be a multiple of block stride.
virtual void setWinStride(Size win_stride) = 0;
virtual Size getWinStride() const = 0;
CV_WRAP virtual void setWinStride(Size win_stride) = 0;
CV_WRAP virtual Size getWinStride() const = 0;
//! Coefficient of the detection window increase.
virtual void setScaleFactor(double scale0) = 0;
virtual double getScaleFactor() const = 0;
CV_WRAP virtual void setScaleFactor(double scale0) = 0;
CV_WRAP virtual double getScaleFactor() const = 0;
//! Coefficient to regulate the similarity threshold. When detected, some
//! objects can be covered by many rectangles. 0 means not to perform grouping.
//! See groupRectangles.
virtual void setGroupThreshold(int group_threshold) = 0;
virtual int getGroupThreshold() const = 0;
CV_WRAP virtual void setGroupThreshold(int group_threshold) = 0;
CV_WRAP virtual int getGroupThreshold() const = 0;
//! Descriptor storage format:
//! - **DESCR_FORMAT_ROW_BY_ROW** - Row-major order.
//! - **DESCR_FORMAT_COL_BY_COL** - Column-major order.
virtual void setDescriptorFormat(int descr_format) = 0;
virtual int getDescriptorFormat() const = 0;
CV_WRAP virtual void setDescriptorFormat(int descr_format) = 0;
CV_WRAP virtual int getDescriptorFormat() const = 0;
/** @brief Returns the number of coefficients required for the classification.
*/
virtual size_t getDescriptorSize() const = 0;
CV_WRAP virtual size_t getDescriptorSize() const = 0;
/** @brief Returns the block histogram size.
*/
virtual size_t getBlockHistogramSize() const = 0;
CV_WRAP virtual size_t getBlockHistogramSize() const = 0;
/** @brief Sets coefficients for the linear SVM classifier.
*/
virtual void setSVMDetector(InputArray detector) = 0;
CV_WRAP virtual void setSVMDetector(InputArray detector) = 0;
/** @brief Returns coefficients of the classifier trained for people detection.
*/
virtual Mat getDefaultPeopleDetector() const = 0;
CV_WRAP virtual Mat getDefaultPeopleDetector() const = 0;
/** @brief Performs object detection without a multi-scale window.
......@@ -183,7 +183,7 @@ public:
@param descriptors 2D array of descriptors.
@param stream CUDA stream.
*/
virtual void compute(InputArray img,
CV_WRAP virtual void compute(InputArray img,
OutputArray descriptors,
Stream& stream = Stream::Null()) = 0;
};
......@@ -200,7 +200,7 @@ public:
- A Nvidea API specific cascade classifier example can be found at
opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
*/
class CV_EXPORTS CascadeClassifier : public Algorithm
class CV_EXPORTS_W CascadeClassifier : public Algorithm
{
public:
/** @brief Loads the classifier from a file. Cascade type is detected automatically by constructor parameter.
......@@ -209,36 +209,36 @@ public:
(trained by the haar training application) and NVIDIA's nvbin are supported for HAAR and only new
type of OpenCV XML cascade supported for LBP. The working haar models can be found at opencv_folder/data/haarcascades_cuda/
*/
static Ptr<CascadeClassifier> create(const String& filename);
CV_WRAP static Ptr<cuda::CascadeClassifier> create(const String& filename);
/** @overload
*/
static Ptr<CascadeClassifier> create(const FileStorage& file);
static Ptr<cuda::CascadeClassifier> create(const FileStorage& file);
//! Maximum possible object size. Objects larger than that are ignored. Used for
//! second signature and supported only for LBP cascades.
virtual void setMaxObjectSize(Size maxObjectSize) = 0;
virtual Size getMaxObjectSize() const = 0;
CV_WRAP virtual void setMaxObjectSize(Size maxObjectSize) = 0;
CV_WRAP virtual Size getMaxObjectSize() const = 0;
//! Minimum possible object size. Objects smaller than that are ignored.
virtual void setMinObjectSize(Size minSize) = 0;
virtual Size getMinObjectSize() const = 0;
CV_WRAP virtual void setMinObjectSize(Size minSize) = 0;
CV_WRAP virtual Size getMinObjectSize() const = 0;
//! Parameter specifying how much the image size is reduced at each image scale.
virtual void setScaleFactor(double scaleFactor) = 0;
virtual double getScaleFactor() const = 0;
CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0;
CV_WRAP virtual double getScaleFactor() const = 0;
//! Parameter specifying how many neighbors each candidate rectangle should have
//! to retain it.
virtual void setMinNeighbors(int minNeighbors) = 0;
virtual int getMinNeighbors() const = 0;
CV_WRAP virtual void setMinNeighbors(int minNeighbors) = 0;
CV_WRAP virtual int getMinNeighbors() const = 0;
virtual void setFindLargestObject(bool findLargestObject) = 0;
virtual bool getFindLargestObject() = 0;
CV_WRAP virtual void setFindLargestObject(bool findLargestObject) = 0;
CV_WRAP virtual bool getFindLargestObject() = 0;
virtual void setMaxNumObjects(int maxNumObjects) = 0;
virtual int getMaxNumObjects() const = 0;
CV_WRAP virtual void setMaxNumObjects(int maxNumObjects) = 0;
CV_WRAP virtual int getMaxNumObjects() const = 0;
virtual Size getClassifierSize() const = 0;
CV_WRAP virtual Size getClassifierSize() const = 0;
/** @brief Detects objects of different sizes in the input image.
......@@ -268,7 +268,7 @@ public:
@sa CascadeClassifier::detectMultiScale
*/
virtual void detectMultiScale(InputArray image,
CV_WRAP virtual void detectMultiScale(InputArray image,
OutputArray objects,
Stream& stream = Stream::Null()) = 0;
......@@ -277,7 +277,7 @@ public:
@param gpu_objects Objects array in internal representation.
@param objects Resulting array.
*/
virtual void convert(OutputArray gpu_objects,
CV_WRAP virtual void convert(OutputArray gpu_objects,
std::vector<Rect>& objects) = 0;
};
......
......@@ -69,12 +69,12 @@ namespace cv { namespace cuda {
@sa StereoBM
*/
class CV_EXPORTS StereoBM : public cv::StereoBM
class CV_EXPORTS_W StereoBM : public cv::StereoBM
{
public:
using cv::StereoBM::compute;
virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
CV_WRAP virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
};
/** @brief Creates StereoBM object.
......@@ -87,7 +87,7 @@ shifted by changing the minimum disparity.
accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher
chance for algorithm to find a wrong correspondence.
*/
CV_EXPORTS Ptr<cuda::StereoBM> createStereoBM(int numDisparities = 64, int blockSize = 19);
CV_EXPORTS_W Ptr<cuda::StereoBM> createStereoBM(int numDisparities = 64, int blockSize = 19);
/////////////////////////////////////////
// StereoBeliefPropagation
......@@ -125,13 +125,13 @@ requirement:
@sa StereoMatcher
*/
class CV_EXPORTS StereoBeliefPropagation : public cv::StereoMatcher
class CV_EXPORTS_W StereoBeliefPropagation : public cv::StereoMatcher
{
public:
using cv::StereoMatcher::compute;
/** @overload */
virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
CV_WRAP virtual void compute(InputArray left, InputArray right, OutputArray disparity, Stream& stream) = 0;
/** @brief Enables the stereo correspondence operator that finds the disparity for the specified data cost.
......@@ -142,40 +142,40 @@ public:
fractional bits.
@param stream Stream for the asynchronous version.
*/
virtual void compute(InputArray data, OutputArray disparity, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void compute(InputArray data, OutputArray disparity, Stream& stream = Stream::Null()) = 0;
//! number of BP iterations on each level
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int iters) = 0;
//! number of levels
virtual int getNumLevels() const = 0;
virtual void setNumLevels(int levels) = 0;
CV_WRAP virtual int getNumLevels() const = 0;
CV_WRAP virtual void setNumLevels(int levels) = 0;
//! truncation of data cost
virtual double getMaxDataTerm() const = 0;
virtual void setMaxDataTerm(double max_data_term) = 0;
CV_WRAP virtual double getMaxDataTerm() const = 0;
CV_WRAP virtual void setMaxDataTerm(double max_data_term) = 0;
//! data weight
virtual double getDataWeight() const = 0;
virtual void setDataWeight(double data_weight) = 0;
CV_WRAP virtual double getDataWeight() const = 0;
CV_WRAP virtual void setDataWeight(double data_weight) = 0;
//! truncation of discontinuity cost
virtual double getMaxDiscTerm() const = 0;
virtual void setMaxDiscTerm(double max_disc_term) = 0;
CV_WRAP virtual double getMaxDiscTerm() const = 0;
CV_WRAP virtual void setMaxDiscTerm(double max_disc_term) = 0;
//! discontinuity single jump
virtual double getDiscSingleJump() const = 0;
virtual void setDiscSingleJump(double disc_single_jump) = 0;
CV_WRAP virtual double getDiscSingleJump() const = 0;
CV_WRAP virtual void setDiscSingleJump(double disc_single_jump) = 0;
//! type for messages (CV_16SC1 or CV_32FC1)
virtual int getMsgType() const = 0;
virtual void setMsgType(int msg_type) = 0;
CV_WRAP virtual int getMsgType() const = 0;
CV_WRAP virtual void setMsgType(int msg_type) = 0;
/** @brief Uses a heuristic method to compute the recommended parameters ( ndisp, iters and levels ) for the
specified image size ( width and height ).
*/
static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels);
CV_WRAP static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels);
};
/** @brief Creates StereoBeliefPropagation object.
......@@ -185,7 +185,7 @@ public:
@param levels Number of levels.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoBeliefPropagation>
CV_EXPORTS_W Ptr<cuda::StereoBeliefPropagation>
createStereoBeliefPropagation(int ndisp = 64, int iters = 5, int levels = 5, int msg_type = CV_32F);
/////////////////////////////////////////
......@@ -214,20 +214,20 @@ requirement:
\f[10 \cdot 2^{levels-1} \cdot max \_ data \_ term < SHRT \_ MAX\f]
*/
class CV_EXPORTS StereoConstantSpaceBP : public cuda::StereoBeliefPropagation
class CV_EXPORTS_W StereoConstantSpaceBP : public cuda::StereoBeliefPropagation
{
public:
//! number of active disparity on the first level
virtual int getNrPlane() const = 0;
virtual void setNrPlane(int nr_plane) = 0;
CV_WRAP virtual int getNrPlane() const = 0;
CV_WRAP virtual void setNrPlane(int nr_plane) = 0;
virtual bool getUseLocalInitDataCost() const = 0;
virtual void setUseLocalInitDataCost(bool use_local_init_data_cost) = 0;
CV_WRAP virtual bool getUseLocalInitDataCost() const = 0;
CV_WRAP virtual void setUseLocalInitDataCost(bool use_local_init_data_cost) = 0;
/** @brief Uses a heuristic method to compute parameters (ndisp, iters, levelsand nrplane) for the specified
image size (widthand height).
*/
static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane);
CV_WRAP static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane);
};
/** @brief Creates StereoConstantSpaceBP object.
......@@ -238,7 +238,7 @@ public:
@param nr_plane Number of disparity levels on the first level.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoConstantSpaceBP>
CV_EXPORTS_W Ptr<cuda::StereoConstantSpaceBP>
createStereoConstantSpaceBP(int ndisp = 128, int iters = 8, int levels = 4, int nr_plane = 4, int msg_type = CV_32F);
/////////////////////////////////////////
......@@ -248,7 +248,7 @@ CV_EXPORTS Ptr<cuda::StereoConstantSpaceBP>
The class implements @cite Yang2010 algorithm.
*/
class CV_EXPORTS DisparityBilateralFilter : public cv::Algorithm
class CV_EXPORTS_W DisparityBilateralFilter : public cv::Algorithm
{
public:
/** @brief Refines a disparity map using joint bilateral filtering.
......@@ -258,28 +258,28 @@ public:
@param dst Destination disparity map. It has the same size and type as disparity .
@param stream Stream for the asynchronous version.
*/
virtual void apply(InputArray disparity, InputArray image, OutputArray dst, Stream& stream = Stream::Null()) = 0;
CV_WRAP virtual void apply(InputArray disparity, InputArray image, OutputArray dst, Stream& stream = Stream::Null()) = 0;
virtual int getNumDisparities() const = 0;
virtual void setNumDisparities(int numDisparities) = 0;
CV_WRAP virtual int getNumDisparities() const = 0;
CV_WRAP virtual void setNumDisparities(int numDisparities) = 0;
virtual int getRadius() const = 0;
virtual void setRadius(int radius) = 0;
CV_WRAP virtual int getRadius() const = 0;
CV_WRAP virtual void setRadius(int radius) = 0;
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
CV_WRAP virtual int getNumIters() const = 0;
CV_WRAP virtual void setNumIters(int iters) = 0;
//! truncation of data continuity
virtual double getEdgeThreshold() const = 0;
virtual void setEdgeThreshold(double edge_threshold) = 0;
CV_WRAP virtual double getEdgeThreshold() const = 0;
CV_WRAP virtual void setEdgeThreshold(double edge_threshold) = 0;
//! truncation of disparity continuity
virtual double getMaxDiscThreshold() const = 0;
virtual void setMaxDiscThreshold(double max_disc_threshold) = 0;
CV_WRAP virtual double getMaxDiscThreshold() const = 0;
CV_WRAP virtual void setMaxDiscThreshold(double max_disc_threshold) = 0;
//! filter range sigma
virtual double getSigmaRange() const = 0;
virtual void setSigmaRange(double sigma_range) = 0;
CV_WRAP virtual double getSigmaRange() const = 0;
CV_WRAP virtual void setSigmaRange(double sigma_range) = 0;
};
/** @brief Creates DisparityBilateralFilter object.
......@@ -288,7 +288,7 @@ public:
@param radius Filter radius.
@param iters Number of iterations.
*/
CV_EXPORTS Ptr<cuda::DisparityBilateralFilter>
CV_EXPORTS_W Ptr<cuda::DisparityBilateralFilter>
createDisparityBilateralFilter(int ndisp = 64, int radius = 3, int iters = 1);
/////////////////////////////////////////
......@@ -308,7 +308,7 @@ disparity map.
@sa reprojectImageTo3D
*/
CV_EXPORTS void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray Q, int dst_cn = 4, Stream& stream = Stream::Null());
CV_EXPORTS_W void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray Q, int dst_cn = 4, Stream& stream = Stream::Null());
/** @brief Colors a disparity image.
......@@ -324,7 +324,7 @@ This function draws a colored disparity map by converting disparity values from
first to HSV color space (where different disparity values correspond to different hues) and then
converting the pixels to RGB for visualization.
*/
CV_EXPORTS void drawColorDisp(InputArray src_disp, OutputArray dst_disp, int ndisp, Stream& stream = Stream::Null());
CV_EXPORTS_W void drawColorDisp(InputArray src_disp, OutputArray dst_disp, int ndisp, Stream& stream = Stream::Null());
//! @}
......
......@@ -83,7 +83,7 @@ Values of pixels with non-integer coordinates are computed using the bilinear in
@sa remap
*/
CV_EXPORTS void remap(InputArray src, OutputArray dst, InputArray xmap, InputArray ymap,
CV_EXPORTS_W void remap(InputArray src, OutputArray dst, InputArray xmap, InputArray ymap,
int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(),
Stream& stream = Stream::Null());
......@@ -105,7 +105,7 @@ supported for now.
@sa resize
*/
CV_EXPORTS void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
CV_EXPORTS_W void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
/** @brief Applies an affine transformation to an image.
......@@ -123,7 +123,7 @@ INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are support
@sa warpAffine
*/
CV_EXPORTS void warpAffine(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
CV_EXPORTS_W void warpAffine(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
/** @brief Builds transformation maps for affine transformation.
......@@ -137,7 +137,7 @@ CV_EXPORTS void warpAffine(InputArray src, OutputArray dst, InputArray M, Size d
@sa cuda::warpAffine , cuda::remap
*/
CV_EXPORTS void buildWarpAffineMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
CV_EXPORTS_W void buildWarpAffineMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
/** @brief Applies a perspective transformation to an image.
......@@ -155,7 +155,7 @@ INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are support
@sa warpPerspective
*/
CV_EXPORTS void warpPerspective(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
CV_EXPORTS_W void warpPerspective(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags = INTER_LINEAR,
int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
/** @brief Builds transformation maps for perspective transformation.
......@@ -169,7 +169,7 @@ CV_EXPORTS void warpPerspective(InputArray src, OutputArray dst, InputArray M, S
@sa cuda::warpPerspective , cuda::remap
*/
CV_EXPORTS void buildWarpPerspectiveMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
CV_EXPORTS_W void buildWarpPerspectiveMaps(InputArray M, bool inverse, Size dsize, OutputArray xmap, OutputArray ymap, Stream& stream = Stream::Null());
/** @brief Rotates an image around the origin (0,0) and then shifts it.
......@@ -186,7 +186,7 @@ are supported.
@sa cuda::warpAffine
*/
CV_EXPORTS void rotate(InputArray src, OutputArray dst, Size dsize, double angle, double xShift = 0, double yShift = 0,
CV_EXPORTS_W void rotate(InputArray src, OutputArray dst, Size dsize, double angle, double xShift = 0, double yShift = 0,
int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
/** @brief Smoothes an image and downsamples it.
......@@ -198,7 +198,7 @@ type as src .
@sa pyrDown
*/
CV_EXPORTS void pyrDown(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void pyrDown(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Upsamples an image and then smoothes it.
......@@ -207,7 +207,7 @@ CV_EXPORTS void pyrDown(InputArray src, OutputArray dst, Stream& stream = Stream
src .
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void pyrUp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
CV_EXPORTS_W void pyrUp(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
//! @}
......
......@@ -829,55 +829,55 @@ enum MarkerTypes
};
//! finds arbitrary template in the grayscale image using Generalized Hough Transform
class CV_EXPORTS GeneralizedHough : public Algorithm
class CV_EXPORTS_W GeneralizedHough : public Algorithm
{
public:
//! set template to search
virtual void setTemplate(InputArray templ, Point templCenter = Point(-1, -1)) = 0;
virtual void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)) = 0;
CV_WRAP virtual void setTemplate(InputArray templ, Point templCenter = Point(-1, -1)) = 0;
CV_WRAP virtual void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)) = 0;
//! find template on image
virtual void detect(InputArray image, OutputArray positions, OutputArray votes = noArray()) = 0;
virtual void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = noArray()) = 0;
CV_WRAP virtual void detect(InputArray image, OutputArray positions, OutputArray votes = noArray()) = 0;
CV_WRAP virtual void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = noArray()) = 0;
//! Canny low threshold.
virtual void setCannyLowThresh(int cannyLowThresh) = 0;
virtual int getCannyLowThresh() const = 0;
CV_WRAP virtual void setCannyLowThresh(int cannyLowThresh) = 0;
CV_WRAP virtual int getCannyLowThresh() const = 0;
//! Canny high threshold.
virtual void setCannyHighThresh(int cannyHighThresh) = 0;
virtual int getCannyHighThresh() const = 0;
CV_WRAP virtual void setCannyHighThresh(int cannyHighThresh) = 0;
CV_WRAP virtual int getCannyHighThresh() const = 0;
//! Minimum distance between the centers of the detected objects.
virtual void setMinDist(double minDist) = 0;
virtual double getMinDist() const = 0;
CV_WRAP virtual void setMinDist(double minDist) = 0;
CV_WRAP virtual double getMinDist() const = 0;
//! Inverse ratio of the accumulator resolution to the image resolution.
virtual void setDp(double dp) = 0;
virtual double getDp() const = 0;
CV_WRAP virtual void setDp(double dp) = 0;
CV_WRAP virtual double getDp() const = 0;
//! Maximal size of inner buffers.
virtual void setMaxBufferSize(int maxBufferSize) = 0;
virtual int getMaxBufferSize() const = 0;
CV_WRAP virtual void setMaxBufferSize(int maxBufferSize) = 0;
CV_WRAP virtual int getMaxBufferSize() const = 0;
};
//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.
//! Detects position only without translation and rotation
class CV_EXPORTS GeneralizedHoughBallard : public GeneralizedHough
class CV_EXPORTS_W GeneralizedHoughBallard : public GeneralizedHough
{
public:
//! R-Table levels.
virtual void setLevels(int levels) = 0;
virtual int getLevels() const = 0;
CV_WRAP virtual void setLevels(int levels) = 0;
CV_WRAP virtual int getLevels() const = 0;
//! The accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected.
virtual void setVotesThreshold(int votesThreshold) = 0;
virtual int getVotesThreshold() const = 0;
CV_WRAP virtual void setVotesThreshold(int votesThreshold) = 0;
CV_WRAP virtual int getVotesThreshold() const = 0;
};
//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.
//! Detects position, translation and rotation
class CV_EXPORTS GeneralizedHoughGuil : public GeneralizedHough
class CV_EXPORTS_W GeneralizedHoughGuil : public GeneralizedHough
{
public:
//! Angle difference in degrees between two points in feature.
......
......@@ -534,13 +534,13 @@ class FuncVariant(object):
class FuncInfo(object):
def __init__(self, classname, name, cname, isconstructor, namespace, isclassmethod):
def __init__(self, classname, name, cname, isconstructor, namespace, is_static):
self.classname = classname
self.name = name
self.cname = cname
self.isconstructor = isconstructor
self.namespace = namespace
self.isclassmethod = isclassmethod
self.is_static = is_static
self.variants = []
def add_variant(self, decl, isphantom=False):
......@@ -555,8 +555,8 @@ class FuncInfo(object):
else:
classname = ""
if self.isclassmethod:
name += "_cls"
if self.is_static:
name += "_static"
return "pyopencv_" + self.namespace.replace('.','_') + '_' + classname + name
......@@ -615,7 +615,7 @@ class FuncInfo(object):
return Template(' {"$py_funcname", CV_PY_FN_WITH_KW_($wrap_funcname, $flags), "$py_docstring"},\n'
).substitute(py_funcname = self.variants[0].wname, wrap_funcname=self.get_wrapper_name(),
flags = 'METH_CLASS' if self.isclassmethod else '0', py_docstring = full_docstring)
flags = 'METH_STATIC' if self.is_static else '0', py_docstring = full_docstring)
def gen_code(self, codegen):
all_classes = codegen.classes
......@@ -632,7 +632,7 @@ class FuncInfo(object):
selfinfo = all_classes[self.classname]
if not self.isconstructor:
amp = "&" if selfinfo.issimple else ""
if self.isclassmethod:
if self.is_static:
pass
elif selfinfo.isalgorithm:
code += gen_template_check_self_algo.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp)
......@@ -652,7 +652,7 @@ class FuncInfo(object):
all_cargs = []
parse_arglist = []
if v.isphantom and ismethod and not self.isclassmethod:
if v.isphantom and ismethod and not self.is_static:
code_args += "_self_"
# declare all the C function arguments,
......@@ -740,7 +740,7 @@ class FuncInfo(object):
if v.rettype:
code_decl += " " + v.rettype + " retval;\n"
code_fcall += "retval = "
if ismethod and not self.isclassmethod:
if ismethod and not self.is_static:
code_fcall += "_self_->" + self.cname
else:
code_fcall += self.cname
......@@ -821,7 +821,7 @@ class FuncInfo(object):
#if dump: pprint(vars(classinfo))
if self.isconstructor:
py_name = 'cv.' + classinfo.wname
elif self.isclassmethod:
elif self.is_static:
py_name = '.'.join([self.namespace, classinfo.sname + '_' + self.variants[0].wname])
else:
cname = classinfo.cname + '::' + cname
......@@ -929,12 +929,12 @@ class PythonWrapperGenerator(object):
namespace = '.'.join(namespace)
isconstructor = name == bareclassname
isclassmethod = False
is_static = False
isphantom = False
mappable = None
for m in decl[2]:
if m == "/S":
isclassmethod = True
is_static = True
elif m == "/phantom":
isphantom = True
cname = cname.replace("::", "_")
......@@ -948,10 +948,10 @@ class PythonWrapperGenerator(object):
if isconstructor:
name = "_".join(classes[:-1]+[name])
if isclassmethod:
if is_static:
# Add it as a method to the class
func_map = self.classes[classname].methods
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, isclassmethod))
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static))
func.add_variant(decl, isphantom)
# Add it as global function
......@@ -966,7 +966,7 @@ class PythonWrapperGenerator(object):
else:
func_map = self.namespaces.setdefault(namespace, Namespace()).funcs
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, isclassmethod))
func = func_map.setdefault(name, FuncInfo(classname, name, cname, isconstructor, namespace, is_static))
func.add_variant(decl, isphantom)
if classname and isconstructor:
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment