Commit 1b1eab8e authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

added helper macros to the function declarations

parent b59b0fd7
......@@ -212,7 +212,7 @@ CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback,
#define CV_DbgAssert(expr)
#endif
CV_EXPORTS void setNumThreads(int);
CV_EXPORTS void setNumThreads(int nthreads);
CV_EXPORTS int getNumThreads();
CV_EXPORTS int getThreadNum();
......@@ -330,7 +330,7 @@ static inline size_t alignSize(size_t sz, int n)
\note{Since optimization may imply using special data structures, it may be unsafe
to call this function anywhere in the code. Instead, call it somewhere at the top level.}
*/
CV_EXPORTS void setUseOptimized(bool);
CV_EXPORTS void setUseOptimized(bool onoff);
/*!
Returns the current optimization status
......
......@@ -158,9 +158,12 @@ typedef unsigned short ushort;
typedef signed char schar;
/* special informative macros for wrapper generators */
#define CV_OUT
#define CV_CARRAY(counter)
#define CV_CUSTOM_CARRAY(args)
#define CV_METHOD
#define CV_NO_WRAP
#define CV_OUT
#define CV_WRAP_AS(synonym)
/* CvArr* is used to pass arbitrary
* array-like data structures
......
......@@ -232,10 +232,12 @@ public:
: pt(x, y), size(_size), angle(_angle),
response(_response), octave(_octave), class_id(_class_id) {}
//! converts vector of keypoints to vector of points
static void convert(const std::vector<KeyPoint>& keypoints, std::vector<Point2f>& points2f,
static void convert(const std::vector<KeyPoint>& keypoints,
CV_OUT std::vector<Point2f>& points2f,
const std::vector<int>& keypointIndexes=std::vector<int>());
//! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation
static void convert(const std::vector<Point2f>& points2f, std::vector<KeyPoint>& keypoints,
static void convert(const std::vector<Point2f>& points2f,
CV_OUT std::vector<KeyPoint>& keypoints,
float size=1, float response=1, int octave=0, int class_id=-1);
//! computes overlap for pair of keypoints;
......@@ -254,7 +256,7 @@ public:
//! writes vector of keypoints to the file storage
CV_EXPORTS void write(FileStorage& fs, const string& name, const vector<KeyPoint>& keypoints);
//! reads vector of keypoints from the specified file storage node
CV_EXPORTS void read(const FileNode& node, vector<KeyPoint>& keypoints);
CV_EXPORTS void read(const FileNode& node, CV_OUT vector<KeyPoint>& keypoints);
/*!
SIFT implementation.
......@@ -357,12 +359,12 @@ public:
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! finds the keypoints using fast hessian detector used in SURF
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints) const;
CV_WRAP_AS(detect) void operator()(const Mat& img, const Mat& mask,
CV_OUT vector<KeyPoint>& keypoints) const;
//! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
void operator()(const Mat& img, const Mat& mask,
vector<KeyPoint>& keypoints,
vector<float>& descriptors,
CV_WRAP_AS(detect) void operator()(const Mat& img, const Mat& mask,
CV_OUT vector<KeyPoint>& keypoints,
CV_OUT vector<float>& descriptors,
bool useProvidedKeypoints=false) const;
};
......@@ -386,7 +388,8 @@ public:
int _max_evolution, double _area_threshold,
double _min_margin, int _edge_blur_size );
//! the operator that extracts the MSERs from the image or the specific part of it
void operator()( const Mat& image, vector<vector<Point> >& msers, const Mat& mask ) const;
CV_WRAP_AS(detect) void operator()( const Mat& image,
CV_OUT vector<vector<Point> >& msers, const Mat& mask ) const;
};
/*!
......@@ -405,11 +408,13 @@ public:
int _lineThresholdBinarized,
int _suppressNonmaxSize);
//! finds the keypoints in the image
void operator()(const Mat& image, vector<KeyPoint>& keypoints) const;
CV_WRAP_AS(detect) void operator()(const Mat& image,
CV_OUT vector<KeyPoint>& keypoints) const;
};
//! detects corners using FAST algorithm by E. Rosten
CV_EXPORTS void FAST( const Mat& image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true );
CV_EXPORTS void FAST( const Mat& image, CV_OUT vector<KeyPoint>& keypoints,
int threshold, bool nonmaxSupression=true );
/*!
The Patch Generator class
......@@ -423,13 +428,14 @@ public:
double _lambdaMin=0.6, double _lambdaMax=1.5,
double _thetaMin=-CV_PI, double _thetaMax=CV_PI,
double _phiMin=-CV_PI, double _phiMax=CV_PI );
void operator()(const Mat& image, Point2f pt, Mat& patch, Size patchSize, RNG& rng) const;
void operator()(const Mat& image, const Mat& transform, Mat& patch,
CV_WRAP_AS(generate) void operator()(const Mat& image, Point2f pt, Mat& patch, Size patchSize, RNG& rng) const;
CV_WRAP_AS(generate) void operator()(const Mat& image, const Mat& transform, Mat& patch,
Size patchSize, RNG& rng) const;
void warpWholeImage(const Mat& image, Mat& matT, Mat& buf,
Mat& warped, int border, RNG& rng) const;
CV_OUT Mat& warped, int border, RNG& rng) const;
void generateRandomTransform(Point2f srcCenter, Point2f dstCenter,
Mat& transform, RNG& rng, bool inverse=false) const;
CV_OUT Mat& transform, RNG& rng,
bool inverse=false) const;
void setAffineParam(double lambda, double theta, double phi);
double backgroundMin, backgroundMax;
......@@ -447,9 +453,13 @@ public:
LDetector();
LDetector(int _radius, int _threshold, int _nOctaves,
int _nViews, double _baseFeatureSize, double _clusteringDistance);
void operator()(const Mat& image, vector<KeyPoint>& keypoints, int maxCount=0, bool scaleCoords=true) const;
void operator()(const vector<Mat>& pyr, vector<KeyPoint>& keypoints, int maxCount=0, bool scaleCoords=true) const;
void getMostStable2D(const Mat& image, vector<KeyPoint>& keypoints,
CV_WRAP_AS(detect) void operator()(const Mat& image,
CV_OUT vector<KeyPoint>& keypoints,
int maxCount=0, bool scaleCoords=true) const;
CV_WRAP_AS(detect) void operator()(const vector<Mat>& pyr,
CV_OUT vector<KeyPoint>& keypoints,
int maxCount=0, bool scaleCoords=true) const;
void getMostStable2D(const Mat& image, CV_OUT vector<KeyPoint>& keypoints,
int maxCount, const PatchGenerator& patchGenerator) const;
void setVerbose(bool verbose);
......@@ -561,6 +571,7 @@ protected:
vector<float> posteriors;
};
class CV_EXPORTS PlanarObjectDetector
{
public:
......@@ -596,9 +607,10 @@ public:
void read(const FileNode& node);
void write(FileStorage& fs, const String& name=String()) const;
bool operator()(const Mat& image, Mat& H, vector<Point2f>& corners) const;
bool operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
Mat& H, vector<Point2f>& corners, vector<int>* pairs=0) const;
CV_WRAP_AS(detect) bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector<Point2f>& corners) const;
CV_WRAP_AS(detect) bool operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
CV_OUT Mat& H, CV_OUT vector<Point2f>& corners,
CV_OUT vector<int>* pairs=0) const;
protected:
bool verbose;
......@@ -735,7 +747,6 @@ struct CV_EXPORTS RTreeNode
short offset1, offset2;
RTreeNode() {}
RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
: offset1(y1*RandomizedTree::PATCH_SIZE + x1),
offset2(y2*RandomizedTree::PATCH_SIZE + x2)
......@@ -755,7 +766,6 @@ public:
static const size_t DEFAULT_NUM_QUANT_BITS = 4;
RTreeClassifier();
void train(std::vector<BaseKeypoint> const& base_set,
RNG &rng,
int num_trees = RTreeClassifier::DEFAULT_TREES,
......
......@@ -106,7 +106,7 @@ CV_EXPORTS bool imwrite( const string& filename, const Mat& img,
const vector<int>& params=vector<int>());
CV_EXPORTS Mat imdecode( const Mat& buf, int flags );
CV_EXPORTS bool imencode( const string& ext, const Mat& img,
vector<uchar>& buf,
CV_OUT vector<uchar>& buf,
const vector<int>& params=vector<int>());
CV_EXPORTS int waitKey(int delay=0);
......@@ -130,8 +130,8 @@ public:
virtual void release();
virtual bool grab();
virtual bool retrieve(Mat& image, int channel=0);
virtual VideoCapture& operator >> (Mat& image);
virtual bool retrieve(CV_OUT Mat& image, int channel=0);
virtual CV_WRAP_AS(query) VideoCapture& operator >> (Mat& image);
virtual bool set(int propId, double value);
virtual double get(int propId);
......@@ -145,12 +145,14 @@ class CV_EXPORTS VideoWriter
{
public:
VideoWriter();
VideoWriter(const string& filename, int fourcc, double fps, Size frameSize, bool isColor=true);
VideoWriter(const string& filename, int fourcc, double fps,
Size frameSize, bool isColor=true);
virtual ~VideoWriter();
virtual bool open(const string& filename, int fourcc, double fps, Size frameSize, bool isColor=true);
virtual bool open(const string& filename, int fourcc, double fps,
Size frameSize, bool isColor=true);
virtual bool isOpened() const;
virtual VideoWriter& operator << (const Mat& image);
virtual CV_WRAP_AS(write) VideoWriter& operator << (const Mat& image);
protected:
Ptr<CvVideoWriter> writer;
......
......@@ -247,23 +247,21 @@ public:
CvNormalBayesClassifier();
virtual ~CvNormalBayesClassifier();
CvNormalBayesClassifier( const CvMat* _train_data, const CvMat* _responses,
CV_NO_WRAP CvNormalBayesClassifier( const CvMat* _train_data, const CvMat* _responses,
const CvMat* _var_idx=0, const CvMat* _sample_idx=0 );
virtual bool train( const CvMat* _train_data, const CvMat* _responses,
CV_NO_WRAP virtual bool train( const CvMat* _train_data, const CvMat* _responses,
const CvMat* _var_idx = 0, const CvMat* _sample_idx=0, bool update=false );
virtual float predict( const CvMat* _samples, CvMat* results=0 ) const;
CV_NO_WRAP virtual float predict( const CvMat* _samples, CvMat* results=0 ) const;
virtual void clear();
#ifndef SWIG
CvNormalBayesClassifier( const cv::Mat& _train_data, const cv::Mat& _responses,
const cv::Mat& _var_idx=cv::Mat(), const cv::Mat& _sample_idx=cv::Mat() );
virtual bool train( const cv::Mat& _train_data, const cv::Mat& _responses,
const cv::Mat& _var_idx = cv::Mat(), const cv::Mat& _sample_idx=cv::Mat(),
bool update=false );
virtual float predict( const cv::Mat& _samples, cv::Mat* results=0 ) const;
#endif
virtual void write( CvFileStorage* storage, const char* name ) const;
virtual void read( CvFileStorage* storage, CvFileNode* node );
......
......@@ -271,7 +271,7 @@ namespace cv
///////////////////////////// Object Detection ////////////////////////////
CV_EXPORTS void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps=0.2);
CV_EXPORTS void groupRectangles(vector<Rect>& rectList, vector<int>& weights, int groupThreshold, double eps=0.2);
CV_EXPORTS void groupRectangles(vector<Rect>& rectList, CV_OUT vector<int>& weights, int groupThreshold, double eps=0.2);
class CV_EXPORTS FeatureEvaluator
{
......@@ -328,7 +328,7 @@ public:
bool load(const string& filename);
bool read(const FileNode& node);
void detectMultiScale( const Mat& image,
vector<Rect>& objects,
CV_OUT vector<Rect>& objects,
double scaleFactor=1.1,
int minNeighbors=3, int flags=0,
Size minSize=Size());
......@@ -401,18 +401,18 @@ public:
virtual void copyTo(HOGDescriptor& c) const;
virtual void compute(const Mat& img,
vector<float>& descriptors,
CV_OUT vector<float>& descriptors,
Size winStride=Size(), Size padding=Size(),
const vector<Point>& locations=vector<Point>()) const;
virtual void detect(const Mat& img, vector<Point>& foundLocations,
virtual void detect(const Mat& img, CV_OUT vector<Point>& foundLocations,
double hitThreshold=0, Size winStride=Size(),
Size padding=Size(),
const vector<Point>& searchLocations=vector<Point>()) const;
virtual void detectMultiScale(const Mat& img, vector<Rect>& foundLocations,
virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,
double hitThreshold=0, Size winStride=Size(),
Size padding=Size(), double scale=1.05,
int groupThreshold=2) const;
virtual void computeGradient(const Mat& img, Mat& grad, Mat& angleOfs,
virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,
Size paddingTL=Size(), Size paddingBR=Size()) const;
static vector<float> getDefaultPeopleDetector();
......
......@@ -112,17 +112,11 @@ typedef struct CvBGStatModel
//
// Releases memory used by BGStatModel
CV_INLINE void cvReleaseBGStatModel( CvBGStatModel** bg_model )
{
if( bg_model && *bg_model && (*bg_model)->release )
(*bg_model)->release( bg_model );
}
CVAPI(void) cvReleaseBGStatModel( CvBGStatModel** bg_model );
// Updates statistical model and returns number of found foreground regions
CV_INLINE int cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel* bg_model, double learningRate CV_DEFAULT(-1))
{
return bg_model && bg_model->update ? bg_model->update( current_frame, bg_model, learningRate ) : 0;
}
CVAPI(int) cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel* bg_model,
double learningRate CV_DEFAULT(-1));
// Performs FG post-processing using segmentation
// (all pixels of a region will be classified as foreground if majority of pixels of the region are FG).
......@@ -365,7 +359,8 @@ public:
//! the virtual destructor
virtual ~BackgroundSubtractor();
//! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
virtual void operator()(const Mat& image, Mat& fgmask, double learningRate=0);
virtual CV_WRAP_AS(apply) void operator()(const Mat& image, CV_OUT Mat& fgmask,
double learningRate=0);
};
......
......@@ -248,8 +248,8 @@ CV_EXPORTS void updateMotionHistory( const Mat& silhouette, Mat& mhi,
double timestamp, double duration );
//! computes the motion gradient orientation image from the motion history image
CV_EXPORTS void calcMotionGradient( const Mat& mhi, Mat& mask,
Mat& orientation,
CV_EXPORTS void calcMotionGradient( const Mat& mhi, CV_OUT Mat& mask,
CV_OUT Mat& orientation,
double delta1, double delta2,
int apertureSize=3 );
......@@ -260,11 +260,11 @@ CV_EXPORTS double calcGlobalOrientation( const Mat& orientation, const Mat& mask
// TODO: need good API for cvSegmentMotion
//! updates the object tracking window using CAMSHIFT algorithm
CV_EXPORTS RotatedRect CamShift( const Mat& probImage, Rect& window,
CV_EXPORTS RotatedRect CamShift( const Mat& probImage, CV_OUT Rect& window,
TermCriteria criteria );
//! updates the object tracking window using meanshift algorithm
CV_EXPORTS int meanShift( const Mat& probImage, Rect& window,
CV_EXPORTS int meanShift( const Mat& probImage, CV_OUT Rect& window,
TermCriteria criteria );
/*!
......@@ -313,8 +313,8 @@ enum { OPTFLOW_USE_INITIAL_FLOW=4, OPTFLOW_FARNEBACK_GAUSSIAN=256 };
//! computes sparse optical flow using multi-scale Lucas-Kanade algorithm
CV_EXPORTS void calcOpticalFlowPyrLK( const Mat& prevImg, const Mat& nextImg,
const vector<Point2f>& prevPts, vector<Point2f>& nextPts,
vector<uchar>& status, vector<float>& err,
const vector<Point2f>& prevPts, CV_OUT vector<Point2f>& nextPts,
CV_OUT vector<uchar>& status, CV_OUT vector<float>& err,
Size winSize=Size(15,15), int maxLevel=3,
TermCriteria criteria=TermCriteria(
TermCriteria::COUNT+TermCriteria::EPS,
......@@ -323,8 +323,8 @@ CV_EXPORTS void calcOpticalFlowPyrLK( const Mat& prevImg, const Mat& nextImg,
int flags=0 );
//! computes dense optical flow using Farneback algorithm
CV_EXPORTS void calcOpticalFlowFarneback( const Mat& prev0, const Mat& next0,
Mat& flow0, double pyr_scale, int levels, int winsize,
CV_EXPORTS void calcOpticalFlowFarneback( const Mat& prev, const Mat& next,
CV_OUT Mat& flow, double pyr_scale, int levels, int winsize,
int iterations, int poly_n, double poly_sigma, int flags );
}
......
......@@ -56,9 +56,22 @@ CvBGStatModel* cvCreateBGStatModel( IplImage* first_frame, int model_type, void*
return bg_model;
}
void cvReleaseBGStatModel( CvBGStatModel** bg_model )
{
if( bg_model && *bg_model && (*bg_model)->release )
(*bg_model)->release( bg_model );
}
int cvUpdateBGStatModel( IplImage* current_frame,
CvBGStatModel* bg_model,
double learningRate )
{
return bg_model && bg_model->update ? bg_model->update( current_frame, bg_model, learningRate ) : 0;
}
/* FOREGROUND DETECTOR INTERFACE */
class CvFGDetectorBase:public CvFGDetector
class CvFGDetectorBase : public CvFGDetector
{
protected:
CvBGStatModel* m_pFG;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment