Commit 22ff1e88 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #3339 from vpisarev:refactor_features2d_take4

parents af1d29db 4038beb6
......@@ -16,24 +16,15 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu
Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
#.
Detect keypoints in both images. ::
Detect keypoints in both images and compute descriptors for each of the keypoints. ::
// detecting keypoints
FastFeatureDetector detector(15);
Ptr<Feature2D> surf = SURF::create();
vector<KeyPoint> keypoints1;
detector.detect(img1, keypoints1);
... // do the same for the second image
#.
Compute descriptors for each of the keypoints. ::
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1;
extractor.compute(img1, keypoints1, descriptors1);
surf->detectAndCompute(img1, Mat(), keypoints1, descriptors1);
... // process keypoints from the second image as well
... // do the same for the second image
#.
Now, find the closest matches between descriptors from the first image to the second: ::
......
......@@ -65,18 +65,18 @@ Let us break the code down. ::
We load two images and check if they are loaded correctly.::
// detecting keypoints
FastFeatureDetector detector(15);
Ptr<FeatureDetector> detector = FastFeatureDetector::create(15);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
detector->detect(img1, keypoints1);
detector->detect(img2, keypoints2);
First, we create an instance of a keypoint detector. All detectors inherit the abstract ``FeatureDetector`` interface, but the constructors are algorithm-dependent. The first argument to each detector usually controls the balance between the amount of keypoints and their stability. The range of values is different for different detectors (For instance, *FAST* threshold has the meaning of pixel intensity difference and usually varies in the region *[0,40]*. *SURF* threshold is applied to a Hessian of an image and usually takes on values larger than *100*), so use defaults in case of doubt. ::
// computing descriptors
SurfDescriptorExtractor extractor;
Ptr<SURF> extractor = SURF::create();
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
extractor->compute(img1, keypoints1, descriptors1);
extractor->compute(img2, keypoints2, descriptors2);
We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). ::
......
......@@ -185,7 +185,7 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz
//! finds circles' grid pattern of the specified size in the image
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
const Ptr<FeatureDetector> &blobDetector = makePtr<SimpleBlobDetector>());
const Ptr<FeatureDetector> &blobDetector = SimpleBlobDetector::create());
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
......
......@@ -115,7 +115,10 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
double f = PnP.compute_pose(R, tvec);
cv::Rodrigues(R, rvec);
cameraMatrix.at<double>(0,0) = cameraMatrix.at<double>(1,1) = f;
if(cameraMatrix.type() == CV_32F)
cameraMatrix.at<float>(0,0) = cameraMatrix.at<float>(1,1) = (float)f;
else
cameraMatrix.at<double>(0,0) = cameraMatrix.at<double>(1,1) = f;
return true;
}
else
......
......@@ -73,12 +73,12 @@ private:
{
for(int i = 0; i < number_of_correspondences; i++)
{
pws[3 * i ] = opoints.at<OpointType>(0,i).x;
pws[3 * i + 1] = opoints.at<OpointType>(0,i).y;
pws[3 * i + 2] = opoints.at<OpointType>(0,i).z;
pws[3 * i ] = opoints.at<OpointType>(i).x;
pws[3 * i + 1] = opoints.at<OpointType>(i).y;
pws[3 * i + 2] = opoints.at<OpointType>(i).z;
us[2 * i ] = ipoints.at<IpointType>(0,i).x;
us[2 * i + 1] = ipoints.at<IpointType>(0,i).y;
us[2 * i ] = ipoints.at<IpointType>(i).x;
us[2 * i + 1] = ipoints.at<IpointType>(i).y;
}
}
......
......@@ -874,6 +874,9 @@ public:
virtual ~Algorithm();
String name() const;
virtual void set(int, double);
virtual double get(int) const;
template<typename _Tp> typename ParamType<_Tp>::member_type get(const String& name) const;
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
......
......@@ -179,6 +179,9 @@ String Algorithm::name() const
return info()->name();
}
void Algorithm::set(int, double) {}
double Algorithm::get(int) const { return 0.; }
void Algorithm::set(const String& parameter, int value)
{
info()->set(this, parameter.c_str(), ParamType<int>::type, &value);
......
......@@ -59,100 +59,60 @@ Detects keypoints in an image (first variant) or image set (second variant).
:param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``.
FeatureDetector::create
-----------------------
Creates a feature detector by its name.
.. ocv:function:: Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
.. ocv:pyfunction:: cv2.FeatureDetector_create(detectorType) -> retval
:param detectorType: Feature detector type.
The following detector types are supported:
* ``"FAST"`` -- :ocv:class:`FastFeatureDetector`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"MSER"`` -- :ocv:class:`MSER`
* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector`
* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled
* ``"SimpleBlob"`` -- :ocv:class:`SimpleBlobDetector`
FastFeatureDetector
-------------------
.. ocv:class:: FastFeatureDetector : public FeatureDetector
.. ocv:class:: FastFeatureDetector : public Feature2D
Wrapping class for feature detection using the
:ocv:func:`FAST` method. ::
class FastFeatureDetector : public FeatureDetector
class FastFeatureDetector : public Feature2D
{
public:
FastFeatureDetector( int threshold=1, bool nonmaxSuppression=true, type=FastFeatureDetector::TYPE_9_16 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
static Ptr<FastFeatureDetector> create( int threshold=1, bool nonmaxSuppression=true, type=FastFeatureDetector::TYPE_9_16 );
};
GoodFeaturesToTrackDetector
GFTTDetector
---------------------------
.. ocv:class:: GoodFeaturesToTrackDetector : public FeatureDetector
.. ocv:class:: GFTTDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:func:`goodFeaturesToTrack` function. ::
class GoodFeaturesToTrackDetector : public FeatureDetector
class GFTTDetector : public Feature2D
{
public:
class Params
{
public:
Params( int maxCorners=1000, double qualityLevel=0.01,
double minDistance=1., int blockSize=3,
bool useHarrisDetector=false, double k=0.04 );
void read( const FileNode& fn );
void write( FileStorage& fs ) const;
int maxCorners;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double k;
};
GoodFeaturesToTrackDetector( const GoodFeaturesToTrackDetector::Params& params=
GoodFeaturesToTrackDetector::Params() );
GoodFeaturesToTrackDetector( int maxCorners, double qualityLevel,
double minDistance, int blockSize=3,
bool useHarrisDetector=false, double k=0.04 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
enum { USE_HARRIS_DETECTOR=10000 };
static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01,
double minDistance=1, int blockSize=3,
bool useHarrisDetector=false, double k=0.04 );
};
MserFeatureDetector
MSER
-------------------
.. ocv:class:: MserFeatureDetector : public FeatureDetector
.. ocv:class:: MSER : public Feature2D
Wrapping class for feature detection using the
:ocv:class:`MSER` class. ::
Maximally stable region detector ::
class MserFeatureDetector : public FeatureDetector
class MSER : public Feature2D
{
public:
MserFeatureDetector( CvMSERParams params=cvMSERParams() );
MserFeatureDetector( int delta, int minArea, int maxArea,
double maxVariation, double minDiversity,
int maxEvolution, double areaThreshold,
double minMargin, int edgeBlurSize );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
enum
{
DELTA=10000, MIN_AREA=10001, MAX_AREA=10002, PASS2_ONLY=10003,
MAX_EVOLUTION=10004, AREA_THRESHOLD=10005,
MIN_MARGIN=10006, EDGE_BLUR_SIZE=10007
};
//! the full constructor
static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,
double _max_variation=0.25, double _min_diversity=.2,
int _max_evolution=200, double _area_threshold=1.01,
double _min_margin=0.003, int _edge_blur_size=5 );
virtual void detectRegions( InputArray image,
std::vector<std::vector<Point> >& msers,
std::vector<Rect>& bboxes ) = 0;
};
SimpleBlobDetector
......@@ -189,10 +149,8 @@ Class for extracting blobs from an image. ::
float minConvexity, maxConvexity;
};
SimpleBlobDetector(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());
protected:
...
static Ptr<SimpleBlobDetector> create(const SimpleBlobDetector::Params
&parameters = SimpleBlobDetector::Params());
};
The class implements a simple algorithm for extracting blobs from an image:
......
......@@ -14,11 +14,6 @@ Detects corners using the FAST algorithm
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSuppression=true )
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSuppression, int type )
.. ocv:pyfunction:: cv2.FastFeatureDetector([, threshold[, nonmaxSuppression]]) -> <FastFeatureDetector object>
.. ocv:pyfunction:: cv2.FastFeatureDetector(threshold, nonmaxSuppression, type) -> <FastFeatureDetector object>
.. ocv:pyfunction:: cv2.FastFeatureDetector.detect(image[, mask]) -> keypoints
:param image: grayscale image where keypoints (corners) are detected.
:param keypoints: keypoints detected on the image.
......@@ -55,7 +50,7 @@ Maximally stable extremal region extractor. ::
// runs the extractor on the specified image; returns the MSERs,
// each encoded as a contour (vector<Point>, see findContours)
// the optional mask marks the area where MSERs are searched for
void operator()( const Mat& image, vector<vector<Point> >& msers, const Mat& mask ) const;
void detectRegions( InputArray image, vector<vector<Point> >& msers, vector<Rect>& bboxes ) const;
};
The class encapsulates all the parameters of the MSER extraction algorithm (see
......
......@@ -32,11 +32,8 @@ OCL_PERF_TEST_P(FASTFixture, FastDetect, testing::Combine(
mframe.copyTo(frame);
declare.in(frame);
Ptr<FeatureDetector> fd = Algorithm::create<FeatureDetector>("Feature2D.FAST");
Ptr<FeatureDetector> fd = FastFeatureDetector::create(20, true, type);
ASSERT_FALSE( fd.empty() );
fd->set("threshold", 20);
fd->set("nonmaxSuppression", true);
fd->set("type", type);
vector<KeyPoint> points;
OCL_TEST_CYCLE() fd->detect(frame, points);
......
......@@ -22,10 +22,10 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES)
mframe.copyTo(frame);
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
OCL_TEST_CYCLE() detector(frame, mask, points);
OCL_TEST_CYCLE() detector->detect(frame, points, mask);
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......@@ -44,14 +44,14 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES)
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
detector(frame, mask, points);
detector->detect(frame, points, mask);
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
UMat descriptors;
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, true);
OCL_TEST_CYCLE() detector->compute(frame, points, descriptors);
SANITY_CHECK(descriptors);
}
......@@ -68,12 +68,12 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
mframe.copyTo(frame);
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
UMat descriptors;
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, false);
OCL_TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
::perf::sort(points, descriptors);
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......
......@@ -30,11 +30,8 @@ PERF_TEST_P(fast, detect, testing::Combine(
declare.in(frame);
Ptr<FeatureDetector> fd = Algorithm::create<FeatureDetector>("Feature2D.FAST");
Ptr<FeatureDetector> fd = FastFeatureDetector::create(20, true, type);
ASSERT_FALSE( fd.empty() );
fd->set("threshold", 20);
fd->set("nonmaxSuppression", true);
fd->set("type", type);
vector<KeyPoint> points;
TEST_CYCLE() fd->detect(frame, points);
......
......@@ -22,10 +22,10 @@ PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES))
Mat mask;
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
TEST_CYCLE() detector(frame, mask, points);
TEST_CYCLE() detector->detect(frame, points, mask);
sort(points.begin(), points.end(), comparators::KeypointGreater());
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......@@ -42,14 +42,14 @@ PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES))
Mat mask;
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
detector(frame, mask, points);
detector->detect(frame, points, mask);
sort(points.begin(), points.end(), comparators::KeypointGreater());
Mat descriptors;
TEST_CYCLE() detector(frame, mask, points, descriptors, true);
TEST_CYCLE() detector->compute(frame, points, descriptors);
SANITY_CHECK(descriptors);
}
......@@ -64,12 +64,12 @@ PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES))
Mat mask;
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
Mat descriptors;
TEST_CYCLE() detector(frame, mask, points, descriptors, false);
TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
perf::sort(points, descriptors);
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......
This diff is collapsed.
......@@ -55,7 +55,31 @@
# endif
#endif
using namespace cv;
namespace cv
{
class CV_EXPORTS_W SimpleBlobDetectorImpl : public SimpleBlobDetector
{
public:
explicit SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
struct CV_EXPORTS Center
{
Point2d location;
double radius;
double confidence;
};
virtual void detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() );
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> &centers) const;
Params params;
};
/*
* SimpleBlobDetector
......@@ -148,22 +172,22 @@ void SimpleBlobDetector::Params::write(cv::FileStorage& fs) const
fs << "maxConvexity" << maxConvexity;
}
SimpleBlobDetector::SimpleBlobDetector(const SimpleBlobDetector::Params &parameters) :
SimpleBlobDetectorImpl::SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters) :
params(parameters)
{
}
void SimpleBlobDetector::read( const cv::FileNode& fn )
void SimpleBlobDetectorImpl::read( const cv::FileNode& fn )
{
params.read(fn);
}
void SimpleBlobDetector::write( cv::FileStorage& fs ) const
void SimpleBlobDetectorImpl::write( cv::FileStorage& fs ) const
{
params.write(fs);
}
void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
void SimpleBlobDetectorImpl::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
{
Mat image = _image.getMat(), binaryImage = _binaryImage.getMat();
(void)image;
......@@ -277,7 +301,7 @@ void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, s
#endif
}
void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray) const
void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray)
{
//TODO: support mask
keypoints.clear();
......@@ -340,3 +364,10 @@ void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>&
keypoints.push_back(kpt);
}
}
Ptr<SimpleBlobDetector> SimpleBlobDetector::create(const SimpleBlobDetector::Params& params)
{
return makePtr<SimpleBlobDetectorImpl>(params);
}
}
This diff is collapsed.
......@@ -359,30 +359,63 @@ void FAST(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool
{
FAST(_img, keypoints, threshold, nonmax_suppression, FastFeatureDetector::TYPE_9_16);
}
/*
* FastFeatureDetector
*/
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type(FastFeatureDetector::TYPE_9_16)
{}
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression, int _type )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
{}
void FastFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
class FastFeatureDetector_Impl : public FastFeatureDetector
{
Mat mask = _mask.getMat(), grayImage;
UMat ugrayImage;
_InputArray gray = _image;
if( _image.type() != CV_8U )
public:
FastFeatureDetector_Impl( int _threshold, bool _nonmaxSuppression, int _type )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
{}
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
{
Mat mask = _mask.getMat(), grayImage;
UMat ugrayImage;
_InputArray gray = _image;
if( _image.type() != CV_8U )
{
_OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage);
cvtColor( _image, ogray, COLOR_BGR2GRAY );
gray = ogray;
}
FAST( gray, keypoints, threshold, nonmaxSuppression, type );
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
void set(int prop, double value)
{
_OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage);
cvtColor( _image, ogray, COLOR_BGR2GRAY );
gray = ogray;
if(prop == THRESHOLD)
threshold = cvRound(value);
else if(prop == NONMAX_SUPPRESSION)
nonmaxSuppression = value != 0;
else if(prop == FAST_N)
type = cvRound(value);
else
CV_Error(Error::StsBadArg, "");
}
FAST( gray, keypoints, threshold, nonmaxSuppression, type );
KeyPointsFilter::runByPixelsMask( keypoints, mask );
double get(int prop) const
{
if(prop == THRESHOLD)
return threshold;
if(prop == NONMAX_SUPPRESSION)
return nonmaxSuppression;
if(prop == FAST_N)
return type;
CV_Error(Error::StsBadArg, "");
return 0;
}
int threshold;
bool nonmaxSuppression;
int type;
};
Ptr<FastFeatureDetector> FastFeatureDetector::create( int threshold, bool nonmaxSuppression, int type )
{
return makePtr<FastFeatureDetector_Impl>(threshold, nonmaxSuppression, type);
}
}
......@@ -7,10 +7,11 @@
// copy or use the software.
//
//
// Intel License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
......@@ -23,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
......@@ -44,118 +45,125 @@
namespace cv
{
/*
* FeatureDetector
*/
using std::vector;
FeatureDetector::~FeatureDetector()
{}
Feature2D::~Feature2D() {}
void FeatureDetector::detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask ) const
/*
* Detect keypoints in an image.
* image The image.
* keypoints The detected keypoints.
* mask Mask specifying where to look for keypoints (optional). Must be a char
* matrix with non-zero values in the region of interest.
*/
void Feature2D::detect( InputArray image,
std::vector<KeyPoint>& keypoints,
InputArray mask )
{
keypoints.clear();
if( image.empty() )
{
keypoints.clear();
return;
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()) );
detectImpl( image, keypoints, mask );
}
detectAndCompute(image, mask, keypoints, noArray(), false);
}
void FeatureDetector::detect(InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection,
InputArrayOfArrays _masks ) const
void Feature2D::detect( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints,
InputArrayOfArrays _masks )
{
if (_imageCollection.isUMatVector())
{
std::vector<UMat> uimageCollection, umasks;
_imageCollection.getUMatVector(uimageCollection);
_masks.getUMatVector(umasks);
vector<Mat> images, masks;
pointCollection.resize( uimageCollection.size() );
for( size_t i = 0; i < uimageCollection.size(); i++ )
detect( uimageCollection[i], pointCollection[i], umasks.empty() ? noArray() : umasks[i] );
_images.getMatVector(images);
size_t i, nimages = images.size();
return;
if( !_masks.empty() )
{
_masks.getMatVector(masks);
CV_Assert(masks.size() == nimages);
}
std::vector<Mat> imageCollection, masks;
_imageCollection.getMatVector(imageCollection);
_masks.getMatVector(masks);
keypoints.resize(nimages);
pointCollection.resize( imageCollection.size() );
for( size_t i = 0; i < imageCollection.size(); i++ )
detect( imageCollection[i], pointCollection[i], masks.empty() ? noArray() : masks[i] );
for( i = 0; i < nimages; i++ )
{
detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] );
}
}
/*void FeatureDetector::read( const FileNode& )
{}
void FeatureDetector::write( FileStorage& ) const
{}*/
bool FeatureDetector::empty() const
/*
* Compute the descriptors for a set of keypoints in an image.
* image The image.
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
*/
void Feature2D::compute( InputArray image,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors )
{
return false;
if( image.empty() )
{
descriptors.release();
return;
}
detectAndCompute(image, noArray(), keypoints, descriptors, true);
}
void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector<KeyPoint>& keypoints )
void Feature2D::compute( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints,
OutputArrayOfArrays _descriptors )
{
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
if( !_descriptors.needed() )
return;
Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
{
if( detectorType.compare( "HARRIS" ) == 0 )
vector<Mat> images;
_images.getMatVector(images);
size_t i, nimages = images.size();
CV_Assert( keypoints.size() == nimages );
CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT );
vector<Mat>& descriptors = *(vector<Mat>*)_descriptors.getObj();
descriptors.resize(nimages);
for( i = 0; i < nimages; i++ )
{
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
fd->set("useHarrisDetector", true);
return fd;
compute(images[i], keypoints[i], descriptors[i]);
}
return Algorithm::create<FeatureDetector>("Feature2D." + detectorType);
}
GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize,
bool _useHarrisDetector, double _k )
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
/* Detects keypoints and computes the descriptors */
void Feature2D::detectAndCompute( InputArray, InputArray,
std::vector<KeyPoint>&,
OutputArray,
bool )
{
CV_Error(Error::StsNotImplemented, "");
}
void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
int Feature2D::descriptorSize() const
{
std::vector<Point2f> corners;
if (_image.isUMat())
{
UMat ugrayImage;
if( _image.type() != CV_8U )
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
else
ugrayImage = _image.getUMat();
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
else
{
Mat image = _image.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
return 0;
}
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
int Feature2D::descriptorType() const
{
return CV_32F;
}
keypoints.resize(corners.size());
std::vector<Point2f>::const_iterator corner_it = corners.begin();
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
int Feature2D::defaultNorm() const
{
int tp = descriptorType();
return tp == CV_8U ? NORM_HAMMING : NORM_L2;
}
// Return true if detector object is empty
bool Feature2D::empty() const
{
return true;
}
}
This diff is collapsed.
......@@ -40,71 +40,87 @@
//M*/
#include "precomp.hpp"
#include <limits>
namespace cv
{
/****************************************************************************************\
* DescriptorExtractor *
\****************************************************************************************/
/*
* DescriptorExtractor
*/
DescriptorExtractor::~DescriptorExtractor()
{}
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
class GFTTDetector_Impl : public GFTTDetector
{
if( image.empty() || keypoints.empty() )
public:
GFTTDetector_Impl( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize,
bool _useHarrisDetector, double _k )
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
{
descriptors.release();
return;
}
KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 );
KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits<float>::epsilon() );
void set(int prop, double value)
{
if( prop == USE_HARRIS_DETECTOR )
useHarrisDetector = value != 0;
else
CV_Error(Error::StsBadArg, "");
}
computeImpl( image, keypoints, descriptors );
}
double get(int prop) const
{
double value = 0;
if( prop == USE_HARRIS_DETECTOR )
value = useHarrisDetector;
else
CV_Error(Error::StsBadArg, "");
return value;
}
void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, OutputArrayOfArrays _descCollection ) const
{
std::vector<Mat> imageCollection, descCollection;
_imageCollection.getMatVector(imageCollection);
_descCollection.getMatVector(descCollection);
CV_Assert( imageCollection.size() == pointCollection.size() );
descCollection.resize( imageCollection.size() );
for( size_t i = 0; i < imageCollection.size(); i++ )
compute( imageCollection[i], pointCollection[i], descCollection[i] );
}
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
{
std::vector<Point2f> corners;
/*void DescriptorExtractor::read( const FileNode& )
{}
if (_image.isUMat())
{
UMat ugrayImage;
if( _image.type() != CV_8U )
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
else
ugrayImage = _image.getUMat();
void DescriptorExtractor::write( FileStorage& ) const
{}*/
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
else
{
Mat image = _image.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
bool DescriptorExtractor::empty() const
{
return false;
}
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
void DescriptorExtractor::removeBorderKeypoints( std::vector<KeyPoint>& keypoints,
Size imageSize, int borderSize )
{
KeyPointsFilter::runByImageBorder( keypoints, imageSize, borderSize );
}
keypoints.resize(corners.size());
std::vector<Point2f>::const_iterator corner_it = corners.begin();
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExtractorType)
{
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
}
}
int nfeatures;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double k;
};
CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
Ptr<GFTTDetector> GFTTDetector::create( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize,
bool _useHarrisDetector, double _k )
{
DescriptorExtractor::compute(image, keypoints, descriptors);
return makePtr<GFTTDetector_Impl>(_nfeatures, _qualityLevel,
_minDistance, _blockSize, _useHarrisDetector, _k);
}
}
......@@ -52,153 +52,119 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd
namespace cv
{
KAZE::KAZE()
: extended(false)
, upright(false)
, threshold(0.001f)
, octaves(4)
, sublevels(4)
, diffusivity(DIFF_PM_G2)
{
}
KAZE::KAZE(bool _extended, bool _upright, float _threshold, int _octaves,
int _sublevels, int _diffusivity)
class KAZE_Impl : public KAZE
{
public:
KAZE_Impl(bool _extended, bool _upright, float _threshold, int _octaves,
int _sublevels, int _diffusivity)
: extended(_extended)
, upright(_upright)
, threshold(_threshold)
, octaves(_octaves)
, sublevels(_sublevels)
, diffusivity(_diffusivity)
{
}
KAZE::~KAZE()
{
}
// returns the descriptor size in bytes
int KAZE::descriptorSize() const
{
return extended ? 128 : 64;
}
// returns the descriptor type
int KAZE::descriptorType() const
{
return CV_32F;
}
// returns the default norm type
int KAZE::defaultNorm() const
{
return NORM_L2;
}
void KAZE::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
{
detectImpl(image, keypoints, mask);
}
void KAZE::operator()(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints) const
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
{
}
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
options.extended = extended;
options.upright = upright;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
virtual ~KAZE_Impl() {}
KAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
// returns the descriptor size in bytes
int descriptorSize() const
{
return extended ? 128 : 64;
}
if (!useProvidedKeypoints)
// returns the descriptor type
int descriptorType() const
{
impl.Feature_Detection(keypoints);
return CV_32F;
}
if (!mask.empty())
// returns the default norm type
int defaultNorm() const
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
return NORM_L2;
}
impl.Feature_Description(keypoints, desc);
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
options.extended = extended;
options.upright = upright;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
KAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
}
if (!mask.empty())
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl.Feature_Description(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
void write(FileStorage& fs) const
{
fs << "extended" << (int)extended;
fs << "upright" << (int)upright;
fs << "threshold" << threshold;
fs << "octaves" << octaves;
fs << "sublevels" << sublevels;
fs << "diffusivity" << diffusivity;
}
void KAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
options.extended = extended;
options.upright = upright;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
KAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Feature_Detection(keypoints);
if (!mask.empty())
void read(const FileNode& fn)
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
extended = (int)fn["extended"] != 0;
upright = (int)fn["upright"] != 0;
threshold = (float)fn["threshold"];
octaves = (int)fn["octaves"];
sublevels = (int)fn["sublevels"];
diffusivity = (int)fn["diffusivity"];
}
}
void KAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
bool extended;
bool upright;
float threshold;
int octaves;
int sublevels;
int diffusivity;
};
Ptr<KAZE> KAZE::create(bool extended, bool upright,
float threshold,
int octaves, int sublevels,
int diffusivity)
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
options.extended = extended;
options.upright = upright;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
KAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Feature_Description(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
return makePtr<KAZE_Impl>(extended, upright, threshold, octaves, sublevels, diffusivity);
}
}
......@@ -8,23 +8,8 @@
#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
/* ************************************************************************* */
// OpenCV
#include "../precomp.hpp"
#include <opencv2/features2d.hpp>
/* ************************************************************************* */
/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right
const float gauss25[7][7] = {
{ 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f },
{ 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f },
{ 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f },
{ 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f },
{ 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f },
{ 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f },
{ 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f }
};
namespace cv
{
/* ************************************************************************* */
/// AKAZE configuration options structure
struct AKAZEOptions {
......@@ -37,12 +22,12 @@ struct AKAZEOptions {
, soffset(1.6f)
, derivative_factor(1.5f)
, sderivatives(1.0)
, diffusivity(cv::DIFF_PM_G2)
, diffusivity(KAZE::DIFF_PM_G2)
, dthreshold(0.001f)
, min_dthreshold(0.00001f)
, descriptor(cv::DESCRIPTOR_MLDB)
, descriptor(AKAZE::DESCRIPTOR_MLDB)
, descriptor_size(0)
, descriptor_channels(3)
, descriptor_pattern_size(10)
......@@ -75,4 +60,6 @@ struct AKAZEOptions {
int kcontrast_nbins; ///< Number of bins for the contrast factor histogram
};
}
#endif
......@@ -11,10 +11,12 @@
/* ************************************************************************* */
// Includes
#include "../precomp.hpp"
#include "AKAZEConfig.h"
#include "TEvolution.h"
namespace cv
{
/* ************************************************************************* */
// AKAZE Class Declaration
class AKAZEFeatures {
......@@ -22,7 +24,7 @@ class AKAZEFeatures {
private:
AKAZEOptions options_; ///< Configuration options for AKAZE
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
/// FED parameters
int ncycles_; ///< Number of cycles
......@@ -59,4 +61,6 @@ public:
void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons,
int nbits, int pattern_size, int nchannels);
}
#endif
......@@ -12,12 +12,14 @@
#include "../precomp.hpp"
#include <opencv2/features2d.hpp>
namespace cv
{
//*************************************************************************************
struct KAZEOptions {
KAZEOptions()
: diffusivity(cv::DIFF_PM_G2)
: diffusivity(KAZE::DIFF_PM_G2)
, soffset(1.60f)
, omax(4)
......@@ -49,4 +51,6 @@ struct KAZEOptions {
bool extended;
};
}
#endif
......@@ -17,43 +17,48 @@
#include "fed.h"
#include "TEvolution.h"
namespace cv
{
/* ************************************************************************* */
// KAZE Class Declaration
class KAZEFeatures {
class KAZEFeatures
{
private:
/// Parameters of the Nonlinear diffusion class
KAZEOptions options_; ///< Configuration options for KAZE
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
/// Parameters of the Nonlinear diffusion class
KAZEOptions options_; ///< Configuration options for KAZE
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
/// Vector of keypoint vectors for finding extrema in multiple threads
/// Vector of keypoint vectors for finding extrema in multiple threads
std::vector<std::vector<cv::KeyPoint> > kpts_par_;
/// FED parameters
int ncycles_; ///< Number of cycles
bool reordering_; ///< Flag for reordering time steps
std::vector<std::vector<float > > tsteps_; ///< Vector of FED dynamic time steps
std::vector<int> nsteps_; ///< Vector of number of steps per cycle
/// FED parameters
int ncycles_; ///< Number of cycles
bool reordering_; ///< Flag for reordering time steps
std::vector<std::vector<float > > tsteps_; ///< Vector of FED dynamic time steps
std::vector<int> nsteps_; ///< Vector of number of steps per cycle
public:
/// Constructor
/// Constructor
KAZEFeatures(KAZEOptions& options);
/// Public methods for KAZE interface
/// Public methods for KAZE interface
void Allocate_Memory_Evolution(void);
int Create_Nonlinear_Scale_Space(const cv::Mat& img);
void Feature_Detection(std::vector<cv::KeyPoint>& kpts);
void Feature_Description(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc);
static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options);
/// Feature Detection Methods
/// Feature Detection Methods
void Compute_KContrast(const cv::Mat& img, const float& kper);
void Compute_Multiscale_Derivatives(void);
void Compute_Detector_Response(void);
void Determinant_Hessian(std::vector<cv::KeyPoint>& kpts);
void Determinant_Hessian(std::vector<cv::KeyPoint>& kpts);
void Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts);
};
}
#endif
......@@ -8,10 +8,13 @@
#ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__
#define __OPENCV_FEATURES_2D_TEVOLUTION_H__
namespace cv
{
/* ************************************************************************* */
/// KAZE/A-KAZE nonlinear diffusion filtering evolution
struct TEvolution {
struct TEvolution
{
TEvolution() {
etime = 0.0f;
esigma = 0.0f;
......@@ -20,11 +23,11 @@ struct TEvolution {
sigma_size = 0;
}
cv::Mat Lx, Ly; ///< First order spatial derivatives
cv::Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
cv::Mat Lt; ///< Evolution image
cv::Mat Lsmooth; ///< Smoothed image
cv::Mat Ldet; ///< Detector response
Mat Lx, Ly; ///< First order spatial derivatives
Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
Mat Lt; ///< Evolution image
Mat Lsmooth; ///< Smoothed image
Mat Ldet; ///< Detector response
float etime; ///< Evolution time
float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2
int octave; ///< Image octave
......@@ -32,4 +35,6 @@ struct TEvolution {
int sigma_size; ///< Integer esigma. For computing the feature detector responses
};
}
#endif
......@@ -11,43 +11,37 @@
#ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
#define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
/* ************************************************************************* */
// Includes
#include "../precomp.hpp"
/* ************************************************************************* */
// Declaration of functions
namespace cv {
namespace details {
namespace kaze {
namespace cv
{
// Gaussian 2D convolution
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
// Gaussian 2D convolution
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
// Diffusivity functions
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
// Diffusivity functions
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
// Image derivatives
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
// Image derivatives
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
// Nonlinear diffusion filtering scalar step
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
// Nonlinear diffusion filtering scalar step
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
// For non-maxima suppresion
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
// For non-maxima suppresion
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
// Image downsampling
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
// Image downsampling
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
}
}
}
#endif
This diff is collapsed.
This diff is collapsed.
......@@ -72,7 +72,7 @@ void CV_BRISKTest::run( int )
cvtColor(image1, gray1, COLOR_BGR2GRAY);
cvtColor(image2, gray2, COLOR_BGR2GRAY);
Ptr<FeatureDetector> detector = Algorithm::create<FeatureDetector>("Feature2D.BRISK");
Ptr<FeatureDetector> detector = BRISK::create();
vector<KeyPoint> keypoints1;
vector<KeyPoint> keypoints2;
......
......@@ -106,8 +106,6 @@ public:
~CV_DescriptorExtractorTest()
{
if(!detector.empty())
detector.release();
}
protected:
virtual void createDescriptorExtractor() {}
......@@ -314,31 +312,34 @@ private:
TEST( Features2d_DescriptorExtractor_BRISK, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk", (CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
DescriptorExtractor::create("BRISK") );
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
BRISK::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_ORB, regression )
{
// TODO adjust the parameters below
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
DescriptorExtractor::create("ORB") );
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
ORB::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_KAZE, regression )
{
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f,
DescriptorExtractor::create("KAZE"),
L2<float>(), FeatureDetector::create("KAZE"));
KAZE::create(),
L2<float>(), KAZE::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_AKAZE, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
DescriptorExtractor::create("AKAZE"),
Hamming(), FeatureDetector::create("AKAZE"));
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
AKAZE::create(),
Hamming(), AKAZE::create());
test.safe_run();
}
......@@ -249,48 +249,50 @@ void CV_FeatureDetectorTest::run( int /*start_from*/ )
TEST( Features2d_Detector_BRISK, regression )
{
CV_FeatureDetectorTest test( "detector-brisk", FeatureDetector::create("BRISK") );
CV_FeatureDetectorTest test( "detector-brisk", BRISK::create() );
test.safe_run();
}
TEST( Features2d_Detector_FAST, regression )
{
CV_FeatureDetectorTest test( "detector-fast", FeatureDetector::create("FAST") );
CV_FeatureDetectorTest test( "detector-fast", FastFeatureDetector::create() );
test.safe_run();
}
TEST( Features2d_Detector_GFTT, regression )
{
CV_FeatureDetectorTest test( "detector-gftt", FeatureDetector::create("GFTT") );
CV_FeatureDetectorTest test( "detector-gftt", GFTTDetector::create() );
test.safe_run();
}
TEST( Features2d_Detector_Harris, regression )
{
CV_FeatureDetectorTest test( "detector-harris", FeatureDetector::create("HARRIS") );
Ptr<FeatureDetector> gftt = GFTTDetector::create();
gftt->set(GFTTDetector::USE_HARRIS_DETECTOR, 1);
CV_FeatureDetectorTest test( "detector-harris", gftt);
test.safe_run();
}
TEST( Features2d_Detector_MSER, DISABLED_regression )
{
CV_FeatureDetectorTest test( "detector-mser", FeatureDetector::create("MSER") );
CV_FeatureDetectorTest test( "detector-mser", MSER::create() );
test.safe_run();
}
TEST( Features2d_Detector_ORB, regression )
{
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") );
CV_FeatureDetectorTest test( "detector-orb", ORB::create() );
test.safe_run();
}
TEST( Features2d_Detector_KAZE, regression )
{
CV_FeatureDetectorTest test( "detector-kaze", FeatureDetector::create("KAZE") );
CV_FeatureDetectorTest test( "detector-kaze", KAZE::create() );
test.safe_run();
}
TEST( Features2d_Detector_AKAZE, regression )
{
CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") );
CV_FeatureDetectorTest test( "detector-akaze", AKAZE::create() );
test.safe_run();
}
......@@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/core_c.h"
using namespace std;
using namespace cv;
......@@ -61,7 +62,6 @@ public:
protected:
virtual void run(int)
{
cv::initModule_features2d();
CV_Assert(detector);
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
......@@ -121,51 +121,54 @@ protected:
TEST(Features2d_Detector_Keypoints_BRISK, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"));
CV_FeatureDetectorKeypointsTest test(BRISK::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_FAST, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.FAST"));
CV_FeatureDetectorKeypointsTest test(FastFeatureDetector::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_HARRIS, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.HARRIS"));
CV_FeatureDetectorKeypointsTest test(GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04));
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_GFTT, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.GFTT"));
Ptr<FeatureDetector> gftt = GFTTDetector::create();
gftt->set(GFTTDetector::USE_HARRIS_DETECTOR, 1);
CV_FeatureDetectorKeypointsTest test(gftt);
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_MSER, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.MSER"));
CV_FeatureDetectorKeypointsTest test(MSER::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_ORB, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"));
CV_FeatureDetectorKeypointsTest test(ORB::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_KAZE, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.KAZE"));
CV_FeatureDetectorKeypointsTest test(KAZE::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_AKAZE, validation)
{
CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_KAZE)));
CV_FeatureDetectorKeypointsTest test_kaze(AKAZE::create(AKAZE::DESCRIPTOR_KAZE));
test_kaze.safe_run();
CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_MLDB)));
CV_FeatureDetectorKeypointsTest test_mldb(AKAZE::create(AKAZE::DESCRIPTOR_MLDB));
test_mldb.safe_run();
}
......@@ -532,12 +532,14 @@ void CV_DescriptorMatcherTest::run( int )
TEST( Features2d_DescriptorMatcher_BruteForce, regression )
{
CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force", Algorithm::create<DescriptorMatcher>("DescriptorMatcher.BFMatcher"), 0.01f );
CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force",
DescriptorMatcher::create("BruteForce"), 0.01f );
test.safe_run();
}
TEST( Features2d_DescriptorMatcher_FlannBased, regression )
{
CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based", Algorithm::create<DescriptorMatcher>("DescriptorMatcher.FlannBasedMatcher"), 0.04f );
CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based",
DescriptorMatcher::create("FlannBased"), 0.04f );
test.safe_run();
}
......@@ -43,6 +43,8 @@
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#if 0
#include <vector>
#include <string>
using namespace std;
......@@ -205,3 +207,5 @@ void CV_MserTest::run(int)
}
TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); }
#endif
......@@ -47,10 +47,8 @@ using namespace cv;
TEST(Features2D_ORB, _1996)
{
Ptr<FeatureDetector> fd = FeatureDetector::create("ORB");
fd->set("nFeatures", 10000);//setting a higher maximum to make effect of threshold visible
fd->set("fastThreshold", 20);//more features than the default
Ptr<DescriptorExtractor> de = DescriptorExtractor::create("ORB");
Ptr<FeatureDetector> fd = ORB::create(10000, 1.2f, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, 20);
Ptr<DescriptorExtractor> de = fd;
Mat image = imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.png");
ASSERT_FALSE(image.empty());
......
......@@ -24,15 +24,9 @@ JNI_OnLoad(JavaVM* vm, void* )
return -1;
bool init = true;
#ifdef HAVE_OPENCV_FEATURES2D
init &= cv::initModule_features2d();
#endif
#ifdef HAVE_OPENCV_VIDEO
init &= cv::initModule_video();
#endif
#ifdef HAVE_OPENCV_CONTRIB
init &= cv::initModule_contrib();
#endif
if(!init)
return -1;
......
......@@ -91,7 +91,7 @@ class Hackathon244Tests(NewOpenCVTests):
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector(30, True)
fd = cv2.FastFeatureDetector_create(30, True)
img = self.get_sample("samples/cpp/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment