Commit 22ff1e88 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #3339 from vpisarev:refactor_features2d_take4

parents af1d29db 4038beb6
......@@ -16,24 +16,15 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu
Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
#.
Detect keypoints in both images. ::
Detect keypoints in both images and compute descriptors for each of the keypoints. ::
// detecting keypoints
FastFeatureDetector detector(15);
Ptr<Feature2D> surf = SURF::create();
vector<KeyPoint> keypoints1;
detector.detect(img1, keypoints1);
... // do the same for the second image
#.
Compute descriptors for each of the keypoints. ::
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1;
extractor.compute(img1, keypoints1, descriptors1);
surf->detectAndCompute(img1, Mat(), keypoints1, descriptors1);
... // process keypoints from the second image as well
... // do the same for the second image
#.
Now, find the closest matches between descriptors from the first image to the second: ::
......
......@@ -65,18 +65,18 @@ Let us break the code down. ::
We load two images and check if they are loaded correctly.::
// detecting keypoints
FastFeatureDetector detector(15);
Ptr<FeatureDetector> detector = FastFeatureDetector::create(15);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
detector->detect(img1, keypoints1);
detector->detect(img2, keypoints2);
First, we create an instance of a keypoint detector. All detectors inherit the abstract ``FeatureDetector`` interface, but the constructors are algorithm-dependent. The first argument to each detector usually controls the balance between the amount of keypoints and their stability. The range of values is different for different detectors (For instance, *FAST* threshold has the meaning of pixel intensity difference and usually varies in the region *[0,40]*. *SURF* threshold is applied to a Hessian of an image and usually takes on values larger than *100*), so use defaults in case of doubt. ::
// computing descriptors
SurfDescriptorExtractor extractor;
Ptr<SURF> extractor = SURF::create();
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
extractor->compute(img1, keypoints1, descriptors1);
extractor->compute(img2, keypoints2, descriptors2);
We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). ::
......
......@@ -185,7 +185,7 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz
//! finds circles' grid pattern of the specified size in the image
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
const Ptr<FeatureDetector> &blobDetector = makePtr<SimpleBlobDetector>());
const Ptr<FeatureDetector> &blobDetector = SimpleBlobDetector::create());
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
......
......@@ -115,6 +115,9 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
double f = PnP.compute_pose(R, tvec);
cv::Rodrigues(R, rvec);
if(cameraMatrix.type() == CV_32F)
cameraMatrix.at<float>(0,0) = cameraMatrix.at<float>(1,1) = (float)f;
else
cameraMatrix.at<double>(0,0) = cameraMatrix.at<double>(1,1) = f;
return true;
}
......
......@@ -73,12 +73,12 @@ private:
{
for(int i = 0; i < number_of_correspondences; i++)
{
pws[3 * i ] = opoints.at<OpointType>(0,i).x;
pws[3 * i + 1] = opoints.at<OpointType>(0,i).y;
pws[3 * i + 2] = opoints.at<OpointType>(0,i).z;
pws[3 * i ] = opoints.at<OpointType>(i).x;
pws[3 * i + 1] = opoints.at<OpointType>(i).y;
pws[3 * i + 2] = opoints.at<OpointType>(i).z;
us[2 * i ] = ipoints.at<IpointType>(0,i).x;
us[2 * i + 1] = ipoints.at<IpointType>(0,i).y;
us[2 * i ] = ipoints.at<IpointType>(i).x;
us[2 * i + 1] = ipoints.at<IpointType>(i).y;
}
}
......
......@@ -874,6 +874,9 @@ public:
virtual ~Algorithm();
String name() const;
virtual void set(int, double);
virtual double get(int) const;
template<typename _Tp> typename ParamType<_Tp>::member_type get(const String& name) const;
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
......
......@@ -179,6 +179,9 @@ String Algorithm::name() const
return info()->name();
}
void Algorithm::set(int, double) {}
double Algorithm::get(int) const { return 0.; }
void Algorithm::set(const String& parameter, int value)
{
info()->set(this, parameter.c_str(), ParamType<int>::type, &value);
......
......@@ -59,100 +59,60 @@ Detects keypoints in an image (first variant) or image set (second variant).
:param masks: Masks for each input image specifying where to look for keypoints (optional). ``masks[i]`` is a mask for ``images[i]``.
FeatureDetector::create
-----------------------
Creates a feature detector by its name.
.. ocv:function:: Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
.. ocv:pyfunction:: cv2.FeatureDetector_create(detectorType) -> retval
:param detectorType: Feature detector type.
The following detector types are supported:
* ``"FAST"`` -- :ocv:class:`FastFeatureDetector`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"MSER"`` -- :ocv:class:`MSER`
* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector`
* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled
* ``"SimpleBlob"`` -- :ocv:class:`SimpleBlobDetector`
FastFeatureDetector
-------------------
.. ocv:class:: FastFeatureDetector : public FeatureDetector
.. ocv:class:: FastFeatureDetector : public Feature2D
Wrapping class for feature detection using the
:ocv:func:`FAST` method. ::
class FastFeatureDetector : public FeatureDetector
class FastFeatureDetector : public Feature2D
{
public:
FastFeatureDetector( int threshold=1, bool nonmaxSuppression=true, type=FastFeatureDetector::TYPE_9_16 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
static Ptr<FastFeatureDetector> create( int threshold=1, bool nonmaxSuppression=true, type=FastFeatureDetector::TYPE_9_16 );
};
GoodFeaturesToTrackDetector
GFTTDetector
---------------------------
.. ocv:class:: GoodFeaturesToTrackDetector : public FeatureDetector
.. ocv:class:: GFTTDetector : public FeatureDetector
Wrapping class for feature detection using the
:ocv:func:`goodFeaturesToTrack` function. ::
class GoodFeaturesToTrackDetector : public FeatureDetector
{
public:
class Params
class GFTTDetector : public Feature2D
{
public:
Params( int maxCorners=1000, double qualityLevel=0.01,
double minDistance=1., int blockSize=3,
enum { USE_HARRIS_DETECTOR=10000 };
static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01,
double minDistance=1, int blockSize=3,
bool useHarrisDetector=false, double k=0.04 );
void read( const FileNode& fn );
void write( FileStorage& fs ) const;
int maxCorners;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double k;
};
GoodFeaturesToTrackDetector( const GoodFeaturesToTrackDetector::Params& params=
GoodFeaturesToTrackDetector::Params() );
GoodFeaturesToTrackDetector( int maxCorners, double qualityLevel,
double minDistance, int blockSize=3,
bool useHarrisDetector=false, double k=0.04 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
MserFeatureDetector
MSER
-------------------
.. ocv:class:: MserFeatureDetector : public FeatureDetector
.. ocv:class:: MSER : public Feature2D
Wrapping class for feature detection using the
:ocv:class:`MSER` class. ::
Maximally stable region detector ::
class MserFeatureDetector : public FeatureDetector
class MSER : public Feature2D
{
public:
MserFeatureDetector( CvMSERParams params=cvMSERParams() );
MserFeatureDetector( int delta, int minArea, int maxArea,
double maxVariation, double minDiversity,
int maxEvolution, double areaThreshold,
double minMargin, int edgeBlurSize );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
enum
{
DELTA=10000, MIN_AREA=10001, MAX_AREA=10002, PASS2_ONLY=10003,
MAX_EVOLUTION=10004, AREA_THRESHOLD=10005,
MIN_MARGIN=10006, EDGE_BLUR_SIZE=10007
};
//! the full constructor
static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,
double _max_variation=0.25, double _min_diversity=.2,
int _max_evolution=200, double _area_threshold=1.01,
double _min_margin=0.003, int _edge_blur_size=5 );
virtual void detectRegions( InputArray image,
std::vector<std::vector<Point> >& msers,
std::vector<Rect>& bboxes ) = 0;
};
SimpleBlobDetector
......@@ -189,10 +149,8 @@ Class for extracting blobs from an image. ::
float minConvexity, maxConvexity;
};
SimpleBlobDetector(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());
protected:
...
static Ptr<SimpleBlobDetector> create(const SimpleBlobDetector::Params
&parameters = SimpleBlobDetector::Params());
};
The class implements a simple algorithm for extracting blobs from an image:
......
......@@ -14,11 +14,6 @@ Detects corners using the FAST algorithm
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSuppression=true )
.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSuppression, int type )
.. ocv:pyfunction:: cv2.FastFeatureDetector([, threshold[, nonmaxSuppression]]) -> <FastFeatureDetector object>
.. ocv:pyfunction:: cv2.FastFeatureDetector(threshold, nonmaxSuppression, type) -> <FastFeatureDetector object>
.. ocv:pyfunction:: cv2.FastFeatureDetector.detect(image[, mask]) -> keypoints
:param image: grayscale image where keypoints (corners) are detected.
:param keypoints: keypoints detected on the image.
......@@ -55,7 +50,7 @@ Maximally stable extremal region extractor. ::
// runs the extractor on the specified image; returns the MSERs,
// each encoded as a contour (vector<Point>, see findContours)
// the optional mask marks the area where MSERs are searched for
void operator()( const Mat& image, vector<vector<Point> >& msers, const Mat& mask ) const;
void detectRegions( InputArray image, vector<vector<Point> >& msers, vector<Rect>& bboxes ) const;
};
The class encapsulates all the parameters of the MSER extraction algorithm (see
......
......@@ -32,11 +32,8 @@ OCL_PERF_TEST_P(FASTFixture, FastDetect, testing::Combine(
mframe.copyTo(frame);
declare.in(frame);
Ptr<FeatureDetector> fd = Algorithm::create<FeatureDetector>("Feature2D.FAST");
Ptr<FeatureDetector> fd = FastFeatureDetector::create(20, true, type);
ASSERT_FALSE( fd.empty() );
fd->set("threshold", 20);
fd->set("nonmaxSuppression", true);
fd->set("type", type);
vector<KeyPoint> points;
OCL_TEST_CYCLE() fd->detect(frame, points);
......
......@@ -22,10 +22,10 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES)
mframe.copyTo(frame);
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
OCL_TEST_CYCLE() detector(frame, mask, points);
OCL_TEST_CYCLE() detector->detect(frame, points, mask);
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......@@ -44,14 +44,14 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES)
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
detector(frame, mask, points);
detector->detect(frame, points, mask);
std::sort(points.begin(), points.end(), comparators::KeypointGreater());
UMat descriptors;
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, true);
OCL_TEST_CYCLE() detector->compute(frame, points, descriptors);
SANITY_CHECK(descriptors);
}
......@@ -68,12 +68,12 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
mframe.copyTo(frame);
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
UMat descriptors;
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, false);
OCL_TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
::perf::sort(points, descriptors);
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......
......@@ -30,11 +30,8 @@ PERF_TEST_P(fast, detect, testing::Combine(
declare.in(frame);
Ptr<FeatureDetector> fd = Algorithm::create<FeatureDetector>("Feature2D.FAST");
Ptr<FeatureDetector> fd = FastFeatureDetector::create(20, true, type);
ASSERT_FALSE( fd.empty() );
fd->set("threshold", 20);
fd->set("nonmaxSuppression", true);
fd->set("type", type);
vector<KeyPoint> points;
TEST_CYCLE() fd->detect(frame, points);
......
......@@ -22,10 +22,10 @@ PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES))
Mat mask;
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
TEST_CYCLE() detector(frame, mask, points);
TEST_CYCLE() detector->detect(frame, points, mask);
sort(points.begin(), points.end(), comparators::KeypointGreater());
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......@@ -42,14 +42,14 @@ PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES))
Mat mask;
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
detector(frame, mask, points);
detector->detect(frame, points, mask);
sort(points.begin(), points.end(), comparators::KeypointGreater());
Mat descriptors;
TEST_CYCLE() detector(frame, mask, points, descriptors, true);
TEST_CYCLE() detector->compute(frame, points, descriptors);
SANITY_CHECK(descriptors);
}
......@@ -64,12 +64,12 @@ PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES))
Mat mask;
declare.in(frame);
ORB detector(1500, 1.3f, 1);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
Mat descriptors;
TEST_CYCLE() detector(frame, mask, points, descriptors, false);
TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
perf::sort(points, descriptors);
SANITY_CHECK_KEYPOINTS(points, 1e-5);
......
......@@ -52,22 +52,15 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd
#include "kaze/AKAZEFeatures.h"
#include <iostream>
using namespace std;
namespace cv
{
AKAZE::AKAZE()
: descriptor(DESCRIPTOR_MLDB)
, descriptor_channels(3)
, descriptor_size(0)
, threshold(0.001f)
, octaves(4)
, sublevels(4)
, diffusivity(DIFF_PM_G2)
{
}
using namespace std;
AKAZE::AKAZE(int _descriptor_type, int _descriptor_size, int _descriptor_channels,
class AKAZE_Impl : public AKAZE
{
public:
AKAZE_Impl(int _descriptor_type, int _descriptor_size, int _descriptor_channels,
float _threshold, int _octaves, int _sublevels, int _diffusivity)
: descriptor(_descriptor_type)
, descriptor_channels(_descriptor_channels)
......@@ -77,25 +70,24 @@ namespace cv
, sublevels(_sublevels)
, diffusivity(_diffusivity)
{
}
AKAZE::~AKAZE()
virtual ~AKAZE_Impl()
{
}
// returns the descriptor size in bytes
int AKAZE::descriptorSize() const
int descriptorSize() const
{
switch (descriptor)
{
case cv::DESCRIPTOR_KAZE:
case cv::DESCRIPTOR_KAZE_UPRIGHT:
case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
return 64;
case cv::DESCRIPTOR_MLDB:
case cv::DESCRIPTOR_MLDB_UPRIGHT:
case DESCRIPTOR_MLDB:
case DESCRIPTOR_MLDB_UPRIGHT:
// We use the full length binary descriptor -> 486 bits
if (descriptor_size == 0)
{
......@@ -114,16 +106,16 @@ namespace cv
}
// returns the descriptor type
int AKAZE::descriptorType() const
int descriptorType() const
{
switch (descriptor)
{
case cv::DESCRIPTOR_KAZE:
case cv::DESCRIPTOR_KAZE_UPRIGHT:
case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
return CV_32F;
case cv::DESCRIPTOR_MLDB:
case cv::DESCRIPTOR_MLDB_UPRIGHT:
case DESCRIPTOR_MLDB:
case DESCRIPTOR_MLDB_UPRIGHT:
return CV_8U;
default:
......@@ -132,38 +124,35 @@ namespace cv
}
// returns the default norm type
int AKAZE::defaultNorm() const
int defaultNorm() const
{
switch (descriptor)
{
case cv::DESCRIPTOR_KAZE:
case cv::DESCRIPTOR_KAZE_UPRIGHT:
return cv::NORM_L2;
case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
return NORM_L2;
case cv::DESCRIPTOR_MLDB:
case cv::DESCRIPTOR_MLDB_UPRIGHT:
return cv::NORM_HAMMING;
case DESCRIPTOR_MLDB:
case DESCRIPTOR_MLDB_UPRIGHT:
return NORM_HAMMING;
default:
return -1;
}
}
void AKAZE::operator()(InputArray image, InputArray mask,
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints) const
bool useProvidedKeypoints)
{
cv::Mat img = image.getMat();
Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
......@@ -185,72 +174,56 @@ namespace cv
if (!mask.empty())
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl.Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
void AKAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Feature_Detection(keypoints);
if (!mask.empty())
void write(FileStorage& fs) const
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
fs << "descriptor" << descriptor;
fs << "descriptor_channels" << descriptor_channels;
fs << "descriptor_size" << descriptor_size;
fs << "threshold" << threshold;
fs << "octaves" << octaves;
fs << "sublevels" << sublevels;
fs << "diffusivity" << diffusivity;
}
void AKAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
void read(const FileNode& fn)
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
descriptor = (int)fn["descriptor"];
descriptor_channels = (int)fn["descriptor_channels"];
descriptor_size = (int)fn["descriptor_size"];
threshold = (float)fn["threshold"];
octaves = (int)fn["octaves"];
sublevels = (int)fn["sublevels"];
diffusivity = (int)fn["diffusivity"];
}
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Compute_Descriptors(keypoints, desc);
int descriptor;
int descriptor_channels;
int descriptor_size;
float threshold;
int octaves;
int sublevels;
int diffusivity;
};
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
Ptr<AKAZE> AKAZE::create(int descriptor_type,
int descriptor_size, int descriptor_channels,
float threshold, int octaves,
int sublevels, int diffusivity)
{
return makePtr<AKAZE_Impl>(descriptor_type, descriptor_size, descriptor_channels,
threshold, octaves, sublevels, diffusivity);
}
}
......@@ -55,7 +55,31 @@
# endif
#endif
using namespace cv;
namespace cv
{
class CV_EXPORTS_W SimpleBlobDetectorImpl : public SimpleBlobDetector
{
public:
explicit SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
struct CV_EXPORTS Center
{
Point2d location;
double radius;
double confidence;
};
virtual void detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() );
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> &centers) const;
Params params;
};
/*
* SimpleBlobDetector
......@@ -148,22 +172,22 @@ void SimpleBlobDetector::Params::write(cv::FileStorage& fs) const
fs << "maxConvexity" << maxConvexity;
}
SimpleBlobDetector::SimpleBlobDetector(const SimpleBlobDetector::Params &parameters) :
SimpleBlobDetectorImpl::SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters) :
params(parameters)
{
}
void SimpleBlobDetector::read( const cv::FileNode& fn )
void SimpleBlobDetectorImpl::read( const cv::FileNode& fn )
{
params.read(fn);
}
void SimpleBlobDetector::write( cv::FileStorage& fs ) const
void SimpleBlobDetectorImpl::write( cv::FileStorage& fs ) const
{
params.write(fs);
}
void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
void SimpleBlobDetectorImpl::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
{
Mat image = _image.getMat(), binaryImage = _binaryImage.getMat();
(void)image;
......@@ -277,7 +301,7 @@ void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, s
#endif
}
void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray) const
void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray)
{
//TODO: support mask
keypoints.clear();
......@@ -340,3 +364,10 @@ void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>&
keypoints.push_back(kpt);
}
}
Ptr<SimpleBlobDetector> SimpleBlobDetector::create(const SimpleBlobDetector::Params& params)
{
return makePtr<SimpleBlobDetectorImpl>(params);
}
}
This diff is collapsed.
......@@ -359,19 +359,17 @@ void FAST(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool
{
FAST(_img, keypoints, threshold, nonmax_suppression, FastFeatureDetector::TYPE_9_16);
}
/*
* FastFeatureDetector
*/
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type(FastFeatureDetector::TYPE_9_16)
{}
FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppression, int _type )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
{}
void FastFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
class FastFeatureDetector_Impl : public FastFeatureDetector
{
public:
FastFeatureDetector_Impl( int _threshold, bool _nonmaxSuppression, int _type )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type((short)_type)
{}
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
{
Mat mask = _mask.getMat(), grayImage;
UMat ugrayImage;
_InputArray gray = _image;
......@@ -383,6 +381,41 @@ void FastFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>&
}
FAST( gray, keypoints, threshold, nonmaxSuppression, type );
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
void set(int prop, double value)
{
if(prop == THRESHOLD)
threshold = cvRound(value);
else if(prop == NONMAX_SUPPRESSION)
nonmaxSuppression = value != 0;
else if(prop == FAST_N)
type = cvRound(value);
else
CV_Error(Error::StsBadArg, "");
}
double get(int prop) const
{
if(prop == THRESHOLD)
return threshold;
if(prop == NONMAX_SUPPRESSION)
return nonmaxSuppression;
if(prop == FAST_N)
return type;
CV_Error(Error::StsBadArg, "");
return 0;
}
int threshold;
bool nonmaxSuppression;
int type;
};
Ptr<FastFeatureDetector> FastFeatureDetector::create( int threshold, bool nonmaxSuppression, int type )
{
return makePtr<FastFeatureDetector_Impl>(threshold, nonmaxSuppression, type);
}
}
......@@ -7,10 +7,11 @@
// copy or use the software.
//
//
// Intel License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
......@@ -23,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
......@@ -44,118 +45,125 @@
namespace cv
{
/*
* FeatureDetector
*/
using std::vector;
FeatureDetector::~FeatureDetector()
{}
Feature2D::~Feature2D() {}
void FeatureDetector::detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask ) const
/*
* Detect keypoints in an image.
* image The image.
* keypoints The detected keypoints.
* mask Mask specifying where to look for keypoints (optional). Must be a char
* matrix with non-zero values in the region of interest.
*/
void Feature2D::detect( InputArray image,
std::vector<KeyPoint>& keypoints,
InputArray mask )
{
keypoints.clear();
if( image.empty() )
{
keypoints.clear();
return;
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()) );
detectImpl( image, keypoints, mask );
}
detectAndCompute(image, mask, keypoints, noArray(), false);
}
void FeatureDetector::detect(InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection,
InputArrayOfArrays _masks ) const
void Feature2D::detect( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints,
InputArrayOfArrays _masks )
{
if (_imageCollection.isUMatVector())
{
std::vector<UMat> uimageCollection, umasks;
_imageCollection.getUMatVector(uimageCollection);
_masks.getUMatVector(umasks);
vector<Mat> images, masks;
pointCollection.resize( uimageCollection.size() );
for( size_t i = 0; i < uimageCollection.size(); i++ )
detect( uimageCollection[i], pointCollection[i], umasks.empty() ? noArray() : umasks[i] );
_images.getMatVector(images);
size_t i, nimages = images.size();
return;
if( !_masks.empty() )
{
_masks.getMatVector(masks);
CV_Assert(masks.size() == nimages);
}
std::vector<Mat> imageCollection, masks;
_imageCollection.getMatVector(imageCollection);
_masks.getMatVector(masks);
keypoints.resize(nimages);
pointCollection.resize( imageCollection.size() );
for( size_t i = 0; i < imageCollection.size(); i++ )
detect( imageCollection[i], pointCollection[i], masks.empty() ? noArray() : masks[i] );
for( i = 0; i < nimages; i++ )
{
detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] );
}
}
/*void FeatureDetector::read( const FileNode& )
{}
void FeatureDetector::write( FileStorage& ) const
{}*/
bool FeatureDetector::empty() const
/*
* Compute the descriptors for a set of keypoints in an image.
* image The image.
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
*/
void Feature2D::compute( InputArray image,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors )
{
return false;
if( image.empty() )
{
descriptors.release();
return;
}
detectAndCompute(image, noArray(), keypoints, descriptors, true);
}
void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector<KeyPoint>& keypoints )
void Feature2D::compute( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints,
OutputArrayOfArrays _descriptors )
{
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
if( !_descriptors.needed() )
return;
Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
{
if( detectorType.compare( "HARRIS" ) == 0 )
vector<Mat> images;
_images.getMatVector(images);
size_t i, nimages = images.size();
CV_Assert( keypoints.size() == nimages );
CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT );
vector<Mat>& descriptors = *(vector<Mat>*)_descriptors.getObj();
descriptors.resize(nimages);
for( i = 0; i < nimages; i++ )
{
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
fd->set("useHarrisDetector", true);
return fd;
compute(images[i], keypoints[i], descriptors[i]);
}
return Algorithm::create<FeatureDetector>("Feature2D." + detectorType);
}
GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize,
bool _useHarrisDetector, double _k )
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
/* Detects keypoints and computes the descriptors */
void Feature2D::detectAndCompute( InputArray, InputArray,
std::vector<KeyPoint>&,
OutputArray,
bool )
{
CV_Error(Error::StsNotImplemented, "");
}
void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
int Feature2D::descriptorSize() const
{
std::vector<Point2f> corners;
if (_image.isUMat())
{
UMat ugrayImage;
if( _image.type() != CV_8U )
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
else
ugrayImage = _image.getUMat();
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
else
{
Mat image = _image.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
return 0;
}
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
int Feature2D::descriptorType() const
{
return CV_32F;
}
keypoints.resize(corners.size());
std::vector<Point2f>::const_iterator corner_it = corners.begin();
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
int Feature2D::defaultNorm() const
{
int tp = descriptorType();
return tp == CV_8U ? NORM_HAMMING : NORM_L2;
}
// Return true if detector object is empty
bool Feature2D::empty() const
{
return true;
}
}
This diff is collapsed.
......@@ -40,71 +40,87 @@
//M*/
#include "precomp.hpp"
#include <limits>
namespace cv
{
/****************************************************************************************\
* DescriptorExtractor *
\****************************************************************************************/
/*
* DescriptorExtractor
*/
DescriptorExtractor::~DescriptorExtractor()
{}
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
class GFTTDetector_Impl : public GFTTDetector
{
if( image.empty() || keypoints.empty() )
public:
GFTTDetector_Impl( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize,
bool _useHarrisDetector, double _k )
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
blockSize(_blockSize), useHarrisDetector(_useHarrisDetector), k(_k)
{
descriptors.release();
return;
}
KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 );
KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits<float>::epsilon() );
void set(int prop, double value)
{
if( prop == USE_HARRIS_DETECTOR )
useHarrisDetector = value != 0;
else
CV_Error(Error::StsBadArg, "");
}
computeImpl( image, keypoints, descriptors );
}
double get(int prop) const
{
double value = 0;
if( prop == USE_HARRIS_DETECTOR )
value = useHarrisDetector;
else
CV_Error(Error::StsBadArg, "");
return value;
}
void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, OutputArrayOfArrays _descCollection ) const
{
std::vector<Mat> imageCollection, descCollection;
_imageCollection.getMatVector(imageCollection);
_descCollection.getMatVector(descCollection);
CV_Assert( imageCollection.size() == pointCollection.size() );
descCollection.resize( imageCollection.size() );
for( size_t i = 0; i < imageCollection.size(); i++ )
compute( imageCollection[i], pointCollection[i], descCollection[i] );
}
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
{
std::vector<Point2f> corners;
/*void DescriptorExtractor::read( const FileNode& )
{}
if (_image.isUMat())
{
UMat ugrayImage;
if( _image.type() != CV_8U )
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
else
ugrayImage = _image.getUMat();
void DescriptorExtractor::write( FileStorage& ) const
{}*/
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
else
{
Mat image = _image.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
bool DescriptorExtractor::empty() const
{
return false;
}
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
void DescriptorExtractor::removeBorderKeypoints( std::vector<KeyPoint>& keypoints,
Size imageSize, int borderSize )
{
KeyPointsFilter::runByImageBorder( keypoints, imageSize, borderSize );
}
keypoints.resize(corners.size());
std::vector<Point2f>::const_iterator corner_it = corners.begin();
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExtractorType)
{
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
}
}
int nfeatures;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double k;
};
CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
Ptr<GFTTDetector> GFTTDetector::create( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize,
bool _useHarrisDetector, double _k )
{
DescriptorExtractor::compute(image, keypoints, descriptors);
return makePtr<GFTTDetector_Impl>(_nfeatures, _qualityLevel,
_minDistance, _blockSize, _useHarrisDetector, _k);
}
}
......@@ -52,17 +52,11 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd
namespace cv
{
KAZE::KAZE()
: extended(false)
, upright(false)
, threshold(0.001f)
, octaves(4)
, sublevels(4)
, diffusivity(DIFF_PM_G2)
{
}
KAZE::KAZE(bool _extended, bool _upright, float _threshold, int _octaves,
class KAZE_Impl : public KAZE
{
public:
KAZE_Impl(bool _extended, bool _upright, float _threshold, int _octaves,
int _sublevels, int _diffusivity)
: extended(_extended)
, upright(_upright)
......@@ -71,40 +65,32 @@ namespace cv
, sublevels(_sublevels)
, diffusivity(_diffusivity)
{
}
KAZE::~KAZE()
{
}
virtual ~KAZE_Impl() {}
// returns the descriptor size in bytes
int KAZE::descriptorSize() const
int descriptorSize() const
{
return extended ? 128 : 64;
}
// returns the descriptor type
int KAZE::descriptorType() const
int descriptorType() const
{
return CV_32F;
}
// returns the default norm type
int KAZE::defaultNorm() const
int defaultNorm() const
{
return NORM_L2;
}
void KAZE::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
{
detectImpl(image, keypoints, mask);
}
void KAZE::operator()(InputArray image, InputArray mask,
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints) const
bool useProvidedKeypoints)
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
......@@ -113,8 +99,6 @@ namespace cv
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
......@@ -138,67 +122,49 @@ namespace cv
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl.Feature_Description(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
void KAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
void write(FileStorage& fs) const
{
Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
options.extended = extended;
options.upright = upright;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
KAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Feature_Detection(keypoints);
fs << "extended" << (int)extended;
fs << "upright" << (int)upright;
fs << "threshold" << threshold;
fs << "octaves" << octaves;
fs << "sublevels" << sublevels;
fs << "diffusivity" << diffusivity;
}
if (!mask.empty())
void read(const FileNode& fn)
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
extended = (int)fn["extended"] != 0;
upright = (int)fn["upright"] != 0;
threshold = (float)fn["threshold"];
octaves = (int)fn["octaves"];
sublevels = (int)fn["sublevels"];
diffusivity = (int)fn["diffusivity"];
}
void KAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
bool extended;
bool upright;
float threshold;
int octaves;
int sublevels;
int diffusivity;
};
Ptr<KAZE> KAZE::create(bool extended, bool upright,
float threshold,
int octaves, int sublevels,
int diffusivity)
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
options.extended = extended;
options.upright = upright;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
KAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Feature_Description(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
return makePtr<KAZE_Impl>(extended, upright, threshold, octaves, sublevels, diffusivity);
}
}
......@@ -8,23 +8,8 @@
#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
/* ************************************************************************* */
// OpenCV
#include "../precomp.hpp"
#include <opencv2/features2d.hpp>
/* ************************************************************************* */
/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right
const float gauss25[7][7] = {
{ 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f },
{ 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f },
{ 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f },
{ 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f },
{ 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f },
{ 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f },
{ 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f }
};
namespace cv
{
/* ************************************************************************* */
/// AKAZE configuration options structure
struct AKAZEOptions {
......@@ -37,12 +22,12 @@ struct AKAZEOptions {
, soffset(1.6f)
, derivative_factor(1.5f)
, sderivatives(1.0)
, diffusivity(cv::DIFF_PM_G2)
, diffusivity(KAZE::DIFF_PM_G2)
, dthreshold(0.001f)
, min_dthreshold(0.00001f)
, descriptor(cv::DESCRIPTOR_MLDB)
, descriptor(AKAZE::DESCRIPTOR_MLDB)
, descriptor_size(0)
, descriptor_channels(3)
, descriptor_pattern_size(10)
......@@ -75,4 +60,6 @@ struct AKAZEOptions {
int kcontrast_nbins; ///< Number of bins for the contrast factor histogram
};
}
#endif
......@@ -11,10 +11,12 @@
/* ************************************************************************* */
// Includes
#include "../precomp.hpp"
#include "AKAZEConfig.h"
#include "TEvolution.h"
namespace cv
{
/* ************************************************************************* */
// AKAZE Class Declaration
class AKAZEFeatures {
......@@ -59,4 +61,6 @@ public:
void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons,
int nbits, int pattern_size, int nchannels);
}
#endif
......@@ -12,12 +12,14 @@
#include "../precomp.hpp"
#include <opencv2/features2d.hpp>
namespace cv
{
//*************************************************************************************
struct KAZEOptions {
KAZEOptions()
: diffusivity(cv::DIFF_PM_G2)
: diffusivity(KAZE::DIFF_PM_G2)
, soffset(1.60f)
, omax(4)
......@@ -49,4 +51,6 @@ struct KAZEOptions {
bool extended;
};
}
#endif
......@@ -17,10 +17,13 @@
#include "fed.h"
#include "TEvolution.h"
namespace cv
{
/* ************************************************************************* */
// KAZE Class Declaration
class KAZEFeatures {
class KAZEFeatures
{
private:
/// Parameters of the Nonlinear diffusion class
......@@ -56,4 +59,6 @@ public:
void Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts);
};
}
#endif
......@@ -8,10 +8,13 @@
#ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__
#define __OPENCV_FEATURES_2D_TEVOLUTION_H__
namespace cv
{
/* ************************************************************************* */
/// KAZE/A-KAZE nonlinear diffusion filtering evolution
struct TEvolution {
struct TEvolution
{
TEvolution() {
etime = 0.0f;
esigma = 0.0f;
......@@ -20,11 +23,11 @@ struct TEvolution {
sigma_size = 0;
}
cv::Mat Lx, Ly; ///< First order spatial derivatives
cv::Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
cv::Mat Lt; ///< Evolution image
cv::Mat Lsmooth; ///< Smoothed image
cv::Mat Ldet; ///< Detector response
Mat Lx, Ly; ///< First order spatial derivatives
Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
Mat Lt; ///< Evolution image
Mat Lsmooth; ///< Smoothed image
Mat Ldet; ///< Detector response
float etime; ///< Evolution time
float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2
int octave; ///< Image octave
......@@ -32,4 +35,6 @@ struct TEvolution {
int sigma_size; ///< Integer esigma. For computing the feature detector responses
};
}
#endif
......@@ -11,43 +11,37 @@
#ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
#define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
/* ************************************************************************* */
// Includes
#include "../precomp.hpp"
/* ************************************************************************* */
// Declaration of functions
namespace cv {
namespace details {
namespace kaze {
namespace cv
{
// Gaussian 2D convolution
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
// Gaussian 2D convolution
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
// Diffusivity functions
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
// Diffusivity functions
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
// Image derivatives
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
// Image derivatives
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
// Nonlinear diffusion filtering scalar step
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
// Nonlinear diffusion filtering scalar step
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
// For non-maxima suppresion
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
// For non-maxima suppresion
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
// Image downsampling
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
// Image downsampling
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
}
}
}
#endif
This diff is collapsed.
This diff is collapsed.
......@@ -72,7 +72,7 @@ void CV_BRISKTest::run( int )
cvtColor(image1, gray1, COLOR_BGR2GRAY);
cvtColor(image2, gray2, COLOR_BGR2GRAY);
Ptr<FeatureDetector> detector = Algorithm::create<FeatureDetector>("Feature2D.BRISK");
Ptr<FeatureDetector> detector = BRISK::create();
vector<KeyPoint> keypoints1;
vector<KeyPoint> keypoints2;
......
......@@ -106,8 +106,6 @@ public:
~CV_DescriptorExtractorTest()
{
if(!detector.empty())
detector.release();
}
protected:
virtual void createDescriptorExtractor() {}
......@@ -314,31 +312,34 @@ private:
TEST( Features2d_DescriptorExtractor_BRISK, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk", (CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
DescriptorExtractor::create("BRISK") );
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
BRISK::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_ORB, regression )
{
// TODO adjust the parameters below
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
DescriptorExtractor::create("ORB") );
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
ORB::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_KAZE, regression )
{
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f,
DescriptorExtractor::create("KAZE"),
L2<float>(), FeatureDetector::create("KAZE"));
KAZE::create(),
L2<float>(), KAZE::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_AKAZE, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
DescriptorExtractor::create("AKAZE"),
Hamming(), FeatureDetector::create("AKAZE"));
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
AKAZE::create(),
Hamming(), AKAZE::create());
test.safe_run();
}
......@@ -249,48 +249,50 @@ void CV_FeatureDetectorTest::run( int /*start_from*/ )
TEST( Features2d_Detector_BRISK, regression )
{
CV_FeatureDetectorTest test( "detector-brisk", FeatureDetector::create("BRISK") );
CV_FeatureDetectorTest test( "detector-brisk", BRISK::create() );
test.safe_run();
}
TEST( Features2d_Detector_FAST, regression )
{
CV_FeatureDetectorTest test( "detector-fast", FeatureDetector::create("FAST") );
CV_FeatureDetectorTest test( "detector-fast", FastFeatureDetector::create() );
test.safe_run();
}
TEST( Features2d_Detector_GFTT, regression )
{
CV_FeatureDetectorTest test( "detector-gftt", FeatureDetector::create("GFTT") );
CV_FeatureDetectorTest test( "detector-gftt", GFTTDetector::create() );
test.safe_run();
}
TEST( Features2d_Detector_Harris, regression )
{
CV_FeatureDetectorTest test( "detector-harris", FeatureDetector::create("HARRIS") );
Ptr<FeatureDetector> gftt = GFTTDetector::create();
gftt->set(GFTTDetector::USE_HARRIS_DETECTOR, 1);
CV_FeatureDetectorTest test( "detector-harris", gftt);
test.safe_run();
}
TEST( Features2d_Detector_MSER, DISABLED_regression )
{
CV_FeatureDetectorTest test( "detector-mser", FeatureDetector::create("MSER") );
CV_FeatureDetectorTest test( "detector-mser", MSER::create() );
test.safe_run();
}
TEST( Features2d_Detector_ORB, regression )
{
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") );
CV_FeatureDetectorTest test( "detector-orb", ORB::create() );
test.safe_run();
}
TEST( Features2d_Detector_KAZE, regression )
{
CV_FeatureDetectorTest test( "detector-kaze", FeatureDetector::create("KAZE") );
CV_FeatureDetectorTest test( "detector-kaze", KAZE::create() );
test.safe_run();
}
TEST( Features2d_Detector_AKAZE, regression )
{
CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") );
CV_FeatureDetectorTest test( "detector-akaze", AKAZE::create() );
test.safe_run();
}
......@@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/core_c.h"
using namespace std;
using namespace cv;
......@@ -61,7 +62,6 @@ public:
protected:
virtual void run(int)
{
cv::initModule_features2d();
CV_Assert(detector);
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
......@@ -121,51 +121,54 @@ protected:
TEST(Features2d_Detector_Keypoints_BRISK, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"));
CV_FeatureDetectorKeypointsTest test(BRISK::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_FAST, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.FAST"));
CV_FeatureDetectorKeypointsTest test(FastFeatureDetector::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_HARRIS, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.HARRIS"));
CV_FeatureDetectorKeypointsTest test(GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04));
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_GFTT, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.GFTT"));
Ptr<FeatureDetector> gftt = GFTTDetector::create();
gftt->set(GFTTDetector::USE_HARRIS_DETECTOR, 1);
CV_FeatureDetectorKeypointsTest test(gftt);
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_MSER, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.MSER"));
CV_FeatureDetectorKeypointsTest test(MSER::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_ORB, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"));
CV_FeatureDetectorKeypointsTest test(ORB::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_KAZE, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.KAZE"));
CV_FeatureDetectorKeypointsTest test(KAZE::create());
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_AKAZE, validation)
{
CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_KAZE)));
CV_FeatureDetectorKeypointsTest test_kaze(AKAZE::create(AKAZE::DESCRIPTOR_KAZE));
test_kaze.safe_run();
CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_MLDB)));
CV_FeatureDetectorKeypointsTest test_mldb(AKAZE::create(AKAZE::DESCRIPTOR_MLDB));
test_mldb.safe_run();
}
......@@ -532,12 +532,14 @@ void CV_DescriptorMatcherTest::run( int )
TEST( Features2d_DescriptorMatcher_BruteForce, regression )
{
CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force", Algorithm::create<DescriptorMatcher>("DescriptorMatcher.BFMatcher"), 0.01f );
CV_DescriptorMatcherTest test( "descriptor-matcher-brute-force",
DescriptorMatcher::create("BruteForce"), 0.01f );
test.safe_run();
}
TEST( Features2d_DescriptorMatcher_FlannBased, regression )
{
CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based", Algorithm::create<DescriptorMatcher>("DescriptorMatcher.FlannBasedMatcher"), 0.04f );
CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based",
DescriptorMatcher::create("FlannBased"), 0.04f );
test.safe_run();
}
......@@ -43,6 +43,8 @@
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#if 0
#include <vector>
#include <string>
using namespace std;
......@@ -205,3 +207,5 @@ void CV_MserTest::run(int)
}
TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); }
#endif
......@@ -47,10 +47,8 @@ using namespace cv;
TEST(Features2D_ORB, _1996)
{
Ptr<FeatureDetector> fd = FeatureDetector::create("ORB");
fd->set("nFeatures", 10000);//setting a higher maximum to make effect of threshold visible
fd->set("fastThreshold", 20);//more features than the default
Ptr<DescriptorExtractor> de = DescriptorExtractor::create("ORB");
Ptr<FeatureDetector> fd = ORB::create(10000, 1.2f, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, 20);
Ptr<DescriptorExtractor> de = fd;
Mat image = imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.png");
ASSERT_FALSE(image.empty());
......
......@@ -595,7 +595,7 @@ protected:
TEST(Features2d_RotationInvariance_Detector_BRISK, regression)
{
DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"),
DetectorRotationInvarianceTest test(BRISK::create(),
0.32f,
0.76f);
test.safe_run();
......@@ -603,7 +603,7 @@ TEST(Features2d_RotationInvariance_Detector_BRISK, regression)
TEST(Features2d_RotationInvariance_Detector_ORB, regression)
{
DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
DetectorRotationInvarianceTest test(ORB::create(),
0.47f,
0.76f);
test.safe_run();
......@@ -615,19 +615,15 @@ TEST(Features2d_RotationInvariance_Detector_ORB, regression)
TEST(Features2d_RotationInvariance_Descriptor_BRISK, regression)
{
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"),
Algorithm::create<DescriptorExtractor>("Feature2D.BRISK"),
Algorithm::create<DescriptorExtractor>("Feature2D.BRISK")->defaultNorm(),
0.99f);
Ptr<Feature2D> f2d = BRISK::create();
DescriptorRotationInvarianceTest test(f2d, f2d, f2d->defaultNorm(), 0.99f);
test.safe_run();
}
TEST(Features2d_RotationInvariance_Descriptor_ORB, regression)
{
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
Algorithm::create<DescriptorExtractor>("Feature2D.ORB"),
Algorithm::create<DescriptorExtractor>("Feature2D.ORB")->defaultNorm(),
0.99f);
Ptr<Feature2D> f2d = ORB::create();
DescriptorRotationInvarianceTest test(f2d, f2d, f2d->defaultNorm(), 0.99f);
test.safe_run();
}
......@@ -646,25 +642,19 @@ TEST(Features2d_RotationInvariance_Descriptor_ORB, regression)
TEST(Features2d_ScaleInvariance_Detector_BRISK, regression)
{
DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"),
0.08f,
0.49f);
DetectorScaleInvarianceTest test(BRISK::create(), 0.08f, 0.49f);
test.safe_run();
}
TEST(Features2d_ScaleInvariance_Detector_KAZE, regression)
{
DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.KAZE"),
0.08f,
0.49f);
DetectorScaleInvarianceTest test(KAZE::create(), 0.08f, 0.49f);
test.safe_run();
}
TEST(Features2d_ScaleInvariance_Detector_AKAZE, regression)
{
DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.AKAZE"),
0.08f,
0.49f);
DetectorScaleInvarianceTest test(AKAZE::create(), 0.08f, 0.49f);
test.safe_run();
}
......
......@@ -90,67 +90,69 @@ public:
//not supported: SimpleBlob, Dense
CV_WRAP static javaFeatureDetector* create( int detectorType )
{
String name;
//String name;
if (detectorType > DYNAMICDETECTOR)
{
name = "Dynamic";
//name = "Dynamic";
detectorType -= DYNAMICDETECTOR;
}
if (detectorType > PYRAMIDDETECTOR)
{
name = "Pyramid";
//name = "Pyramid";
detectorType -= PYRAMIDDETECTOR;
}
if (detectorType > GRIDDETECTOR)
{
name = "Grid";
//name = "Grid";
detectorType -= GRIDDETECTOR;
}
Ptr<FeatureDetector> fd;
switch(detectorType)
{
case FAST:
name = name + "FAST";
break;
case STAR:
name = name + "STAR";
break;
case SIFT:
name = name + "SIFT";
break;
case SURF:
name = name + "SURF";
fd = FastFeatureDetector::create();
break;
//case STAR:
// fd = xfeatures2d::StarDetector::create();
// break;
//case SIFT:
// name = name + "SIFT";
// break;
//case SURF:
// name = name + "SURF";
// break;
case ORB:
name = name + "ORB";
fd = ORB::create();
break;
case MSER:
name = name + "MSER";
fd = MSER::create();
break;
case GFTT:
name = name + "GFTT";
fd = GFTTDetector::create();
break;
case HARRIS:
name = name + "HARRIS";
fd = GFTTDetector::create();
fd->set(GFTTDetector::USE_HARRIS_DETECTOR, 1);
break;
case SIMPLEBLOB:
name = name + "SimpleBlob";
break;
case DENSE:
name = name + "Dense";
fd = SimpleBlobDetector::create();
break;
//case DENSE:
// name = name + "Dense";
// break;
case BRISK:
name = name + "BRISK";
fd = BRISK::create();
break;
case AKAZE:
name = name + "AKAZE";
fd = AKAZE::create();
break;
default:
CV_Error( Error::StsBadArg, "Specified feature detector type is not supported." );
break;
}
return new javaFeatureDetector(FeatureDetector::create(name));
return new javaFeatureDetector(fd);
}
CV_WRAP void write( const String& fileName ) const
......@@ -332,43 +334,44 @@ public:
//not supported: Calonder
CV_WRAP static javaDescriptorExtractor* create( int extractorType )
{
String name;
//String name;
if (extractorType > OPPONENTEXTRACTOR)
{
name = "Opponent";
//name = "Opponent";
extractorType -= OPPONENTEXTRACTOR;
}
Ptr<DescriptorExtractor> de;
switch(extractorType)
{
case SIFT:
name = name + "SIFT";
break;
case SURF:
name = name + "SURF";
break;
//case SIFT:
// name = name + "SIFT";
// break;
//case SURF:
// name = name + "SURF";
// break;
case ORB:
name = name + "ORB";
break;
case BRIEF:
name = name + "BRIEF";
de = ORB::create();
break;
//case BRIEF:
// name = name + "BRIEF";
// break;
case BRISK:
name = name + "BRISK";
break;
case FREAK:
name = name + "FREAK";
de = BRISK::create();
break;
//case FREAK:
// name = name + "FREAK";
// break;
case AKAZE:
name = name + "AKAZE";
de = AKAZE::create();
break;
default:
CV_Error( Error::StsBadArg, "Specified descriptor extractor type is not supported." );
break;
}
return new javaDescriptorExtractor(DescriptorExtractor::create(name));
return new javaDescriptorExtractor(de);
}
CV_WRAP void write( const String& fileName ) const
......
......@@ -24,15 +24,9 @@ JNI_OnLoad(JavaVM* vm, void* )
return -1;
bool init = true;
#ifdef HAVE_OPENCV_FEATURES2D
init &= cv::initModule_features2d();
#endif
#ifdef HAVE_OPENCV_VIDEO
init &= cv::initModule_video();
#endif
#ifdef HAVE_OPENCV_CONTRIB
init &= cv::initModule_contrib();
#endif
if(!init)
return -1;
......
......@@ -91,7 +91,7 @@ class Hackathon244Tests(NewOpenCVTests):
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector(30, True)
fd = cv2.FastFeatureDetector_create(30, True)
img = self.get_sample("samples/cpp/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
......
......@@ -48,8 +48,7 @@ using namespace cv::cuda;
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d.hpp"
static bool makeUseOfXfeatures2d = xfeatures2d::initModule_xfeatures2d();
using xfeatures2d::SURF;
#endif
namespace {
......@@ -321,30 +320,34 @@ void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features, cons
SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int num_layers,
int num_octaves_descr, int num_layers_descr)
{
#ifdef HAVE_OPENCV_XFEATURES2D
if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
{
surf = Algorithm::create<Feature2D>("Feature2D.SURF");
surf = SURF::create();
if( !surf )
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
surf->set("hessianThreshold", hess_thresh);
surf->set("nOctaves", num_octaves);
surf->set("nOctaveLayers", num_layers);
surf->set(SURF::HESSIAN_THRESHOLD, hess_thresh);
surf->set(SURF::NOCTAVES, num_octaves);
surf->set(SURF::NOCTAVE_LAYERS, num_layers);
}
else
{
detector_ = Algorithm::create<FeatureDetector>("Feature2D.SURF");
extractor_ = Algorithm::create<DescriptorExtractor>("Feature2D.SURF");
detector_ = SURF::create();
extractor_ = SURF::create();
if( !detector_ || !extractor_ )
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
detector_->set("hessianThreshold", hess_thresh);
detector_->set("nOctaves", num_octaves);
detector_->set("nOctaveLayers", num_layers);
detector_->set(SURF::HESSIAN_THRESHOLD, hess_thresh);
detector_->set(SURF::NOCTAVES, num_octaves);
detector_->set(SURF::NOCTAVE_LAYERS, num_layers);
extractor_->set("nOctaves", num_octaves_descr);
extractor_->set("nOctaveLayers", num_layers_descr);
extractor_->set(SURF::NOCTAVES, num_octaves_descr);
extractor_->set(SURF::NOCTAVE_LAYERS, num_layers_descr);
}
#else
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
#endif
}
void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
......@@ -367,7 +370,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
else
{
UMat descriptors;
(*surf)(gray_image, Mat(), features.keypoints, descriptors);
surf->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
}
}
......@@ -375,7 +378,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
{
grid_size = _grid_size;
orb = makePtr<ORB>(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
orb = ORB::create(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
}
void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
......@@ -395,7 +398,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
}
if (grid_size.area() == 1)
(*orb)(gray_image, Mat(), features.keypoints, features.descriptors);
orb->detectAndCompute(gray_image, Mat(), features.keypoints, features.descriptors);
else
{
features.keypoints.clear();
......@@ -425,7 +428,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
// << " gray_image_part.dims=" << gray_image_part.dims << ", "
// << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
(*orb)(gray_image_part, UMat(), points, descriptors);
orb->detectAndCompute(gray_image_part, UMat(), points, descriptors);
features.keypoints.reserve(features.keypoints.size() + points.size());
for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
......
......@@ -671,7 +671,7 @@ Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{
setDetector(makePtr<GoodFeaturesToTrackDetector>());
setDetector(GFTTDetector::create());
setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>());
setOutlierRejector(makePtr<NullOutlierRejector>());
}
......
......@@ -47,12 +47,6 @@ bool cv::initAll()
return true
#ifdef HAVE_OPENCV_VIDEO
&& initModule_video()
#endif
#ifdef HAVE_OPENCV_FEATURES2D
&& initModule_features2d()
#endif
#ifdef HAVE_OPENCV_XFEATURES2D
&& xfeatures2d::initModule_xfeatures2d()
#endif
;
}
......@@ -16,8 +16,8 @@ JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindF
Mat& mRgb = *(Mat*)addrRgba;
vector<KeyPoint> v;
FastFeatureDetector detector(50);
detector.detect(mGr, v);
Ptr<FeatureDetector> detector = FastFeatureDetector::create(50);
detector->detect(mGr, v);
for( unsigned int i = 0; i < v.size(); i++ )
{
const KeyPoint& kp = v[i];
......
......@@ -19,23 +19,23 @@ public:
RobustMatcher() : ratio_(0.8f)
{
// ORB is the default feature
detector_ = new cv::OrbFeatureDetector();
extractor_ = new cv::OrbDescriptorExtractor();
detector_ = cv::ORB::create();
extractor_ = cv::ORB::create();
// BruteFroce matcher with Norm Hamming is the default matcher
matcher_ = new cv::BFMatcher(cv::NORM_HAMMING, false);
matcher_ = cv::makePtr<cv::BFMatcher>((int)cv::NORM_HAMMING, false);
}
virtual ~RobustMatcher();
// Set the feature detector
void setFeatureDetector(cv::FeatureDetector * detect) { detector_ = detect; }
void setFeatureDetector(const cv::Ptr<cv::FeatureDetector>& detect) { detector_ = detect; }
// Set the descriptor extractor
void setDescriptorExtractor(cv::DescriptorExtractor * desc) { extractor_ = desc; }
void setDescriptorExtractor(const cv::Ptr<cv::DescriptorExtractor>& desc) { extractor_ = desc; }
// Set the matcher
void setDescriptorMatcher(cv::DescriptorMatcher * match) { matcher_ = match; }
void setDescriptorMatcher(const cv::Ptr<cv::DescriptorMatcher>& match) { matcher_ = match; }
// Compute the keypoints of an image
void computeKeyPoints( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints);
......@@ -69,11 +69,11 @@ public:
private:
// pointer to the feature point detector object
cv::FeatureDetector * detector_;
cv::Ptr<cv::FeatureDetector> detector_;
// pointer to the feature descriptor extractor object
cv::DescriptorExtractor * extractor_;
cv::Ptr<cv::DescriptorExtractor> extractor_;
// pointer to the matcher object
cv::DescriptorMatcher * matcher_;
cv::Ptr<cv::DescriptorMatcher> matcher_;
// max ratio between 1st and 2nd NN
float ratio_;
};
......
......@@ -13,13 +13,16 @@
#include "ModelRegistration.h"
#include "Utils.h"
using namespace cv;
using namespace std;
/** GLOBAL VARIABLES **/
std::string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
std::string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register
std::string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh
std::string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file
string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register
string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh
string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file
// Boolean the know if the registration it's done
bool end_registration = false;
......@@ -39,10 +42,10 @@ int n = 8;
int pts[] = {1, 2, 3, 4, 5, 6, 7, 8}; // 3 -> 4
// Some basic colors
cv::Scalar red(0, 0, 255);
cv::Scalar green(0,255,0);
cv::Scalar blue(255,0,0);
cv::Scalar yellow(0,255,255);
Scalar red(0, 0, 255);
Scalar green(0,255,0);
Scalar blue(255,0,0);
Scalar yellow(0,255,255);
/*
* CREATE MODEL REGISTRATION OBJECT
......@@ -61,13 +64,13 @@ void help();
// Mouse events for model registration
static void onMouseModelRegistration( int event, int x, int y, int, void* )
{
if ( event == cv::EVENT_LBUTTONUP )
if ( event == EVENT_LBUTTONUP )
{
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
cv::Point2f point_2d = cv::Point2f((float)x,(float)y);
cv::Point3f point_3d = mesh.getVertex(n_vertex-1);
Point2f point_2d = Point2f((float)x,(float)y);
Point3f point_3d = mesh.getVertex(n_vertex-1);
bool is_registrable = registration.is_registrable();
if (is_registrable)
......@@ -92,23 +95,23 @@ int main()
//Instantiate robust matcher: detector, extractor, matcher
RobustMatcher rmatcher;
cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints);
Ptr<FeatureDetector> detector = ORB::create(numKeyPoints);
rmatcher.setFeatureDetector(detector);
/** GROUND TRUTH OF THE FIRST IMAGE **/
// Create & Open Window
cv::namedWindow("MODEL REGISTRATION", cv::WINDOW_KEEPRATIO);
namedWindow("MODEL REGISTRATION", WINDOW_KEEPRATIO);
// Set up the mouse events
cv::setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );
setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );
// Open the image to register
cv::Mat img_in = cv::imread(img_path, cv::IMREAD_COLOR);
cv::Mat img_vis = img_in.clone();
Mat img_in = imread(img_path, IMREAD_COLOR);
Mat img_vis = img_in.clone();
if (!img_in.data) {
std::cout << "Could not open or find the image" << std::endl;
cout << "Could not open or find the image" << endl;
return -1;
}
......@@ -116,18 +119,18 @@ int main()
int num_registrations = n;
registration.setNumMax(num_registrations);
std::cout << "Click the box corners ..." << std::endl;
std::cout << "Waiting ..." << std::endl;
cout << "Click the box corners ..." << endl;
cout << "Waiting ..." << endl;
// Loop until all the points are registered
while ( cv::waitKey(30) < 0 )
while ( waitKey(30) < 0 )
{
// Refresh debug image
img_vis = img_in.clone();
// Current registered points
std::vector<cv::Point2f> list_points2d = registration.get_points2d();
std::vector<cv::Point3f> list_points3d = registration.get_points3d();
vector<Point2f> list_points2d = registration.get_points2d();
vector<Point3f> list_points3d = registration.get_points3d();
// Draw current registered points
drawPoints(img_vis, list_points2d, list_points3d, red);
......@@ -139,7 +142,7 @@ int main()
// Draw debug text
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
cv::Point3f current_poin3d = mesh.getVertex(n_vertex-1);
Point3f current_poin3d = mesh.getVertex(n_vertex-1);
drawQuestion(img_vis, current_poin3d, green);
drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red);
......@@ -153,43 +156,43 @@ int main()
}
// Show the image
cv::imshow("MODEL REGISTRATION", img_vis);
imshow("MODEL REGISTRATION", img_vis);
}
/** COMPUTE CAMERA POSE **/
std::cout << "COMPUTING POSE ..." << std::endl;
cout << "COMPUTING POSE ..." << endl;
// The list of registered points
std::vector<cv::Point2f> list_points2d = registration.get_points2d();
std::vector<cv::Point3f> list_points3d = registration.get_points3d();
vector<Point2f> list_points2d = registration.get_points2d();
vector<Point3f> list_points3d = registration.get_points3d();
// Estimate pose given the registered points
bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, cv::SOLVEPNP_ITERATIVE);
bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, SOLVEPNP_ITERATIVE);
if ( is_correspondence )
{
std::cout << "Correspondence found" << std::endl;
cout << "Correspondence found" << endl;
// Compute all the 2D points of the mesh to verify the algorithm and draw it
std::vector<cv::Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
vector<Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
draw2DPoints(img_vis, list_points2d_mesh, green);
} else {
std::cout << "Correspondence not found" << std::endl << std::endl;
cout << "Correspondence not found" << endl << endl;
}
// Show the image
cv::imshow("MODEL REGISTRATION", img_vis);
imshow("MODEL REGISTRATION", img_vis);
// Show image until ESC pressed
cv::waitKey(0);
waitKey(0);
/** COMPUTE 3D of the image Keypoints **/
// Containers for keypoints and descriptors of the model
std::vector<cv::KeyPoint> keypoints_model;
cv::Mat descriptors;
vector<KeyPoint> keypoints_model;
Mat descriptors;
// Compute keypoints and descriptors
rmatcher.computeKeyPoints(img_in, keypoints_model);
......@@ -197,8 +200,8 @@ int main()
// Check if keypoints are on the surface of the registration image and add to the model
for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
cv::Point2f point2d(keypoints_model[i].pt);
cv::Point3f point3d;
Point2f point2d(keypoints_model[i].pt);
Point3f point3d;
bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
if (on_surface)
{
......@@ -219,12 +222,12 @@ int main()
img_vis = img_in.clone();
// The list of the points2d of the model
std::vector<cv::Point2f> list_points_in = model.get_points2d_in();
std::vector<cv::Point2f> list_points_out = model.get_points2d_out();
vector<Point2f> list_points_in = model.get_points2d_in();
vector<Point2f> list_points_out = model.get_points2d_out();
// Draw some debug text
std::string num = IntToString((int)list_points_in.size());
std::string text = "There are " + num + " inliers";
string num = IntToString((int)list_points_in.size());
string text = "There are " + num + " inliers";
drawText(img_vis, text, green);
// Draw some debug text
......@@ -240,26 +243,26 @@ int main()
draw2DPoints(img_vis, list_points_out, red);
// Show the image
cv::imshow("MODEL REGISTRATION", img_vis);
imshow("MODEL REGISTRATION", img_vis);
// Wait until ESC pressed
cv::waitKey(0);
waitKey(0);
// Close and Destroy Window
cv::destroyWindow("MODEL REGISTRATION");
destroyWindow("MODEL REGISTRATION");
std::cout << "GOODBYE" << std::endl;
cout << "GOODBYE" << endl;
}
/**********************************************************************************************************/
void help()
{
std::cout
<< "--------------------------------------------------------------------------" << std::endl
<< "This program shows how to create your 3D textured model. " << std::endl
<< "Usage:" << std::endl
<< "./cpp-tutorial-pnp_registration" << std::endl
<< "--------------------------------------------------------------------------" << std::endl
<< std::endl;
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to create your 3D textured model. " << endl
<< "Usage:" << endl
<< "./cpp-tutorial-pnp_registration" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
......@@ -22,9 +22,9 @@ int main(void)
vector<KeyPoint> kpts1, kpts2;
Mat desc1, desc2;
AKAZE akaze;
akaze(img1, noArray(), kpts1, desc1);
akaze(img2, noArray(), kpts2, desc2);
Ptr<AKAZE> akaze = AKAZE::create();
akaze->detectAndCompute(img1, noArray(), kpts1, desc1);
akaze->detectAndCompute(img2, noArray(), kpts2, desc2);
BFMatcher matcher(NORM_HAMMING);
vector< vector<DMatch> > nn_matches;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment