Commit 09df1a28 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

OpenCV with the refactored features2d compiles! contrib is broken for now; the…

OpenCV with the refactored features2d compiles! contrib is broken for now; the tests are not tried yet
parent 2e915026
...@@ -110,6 +110,10 @@ public: ...@@ -110,6 +110,10 @@ public:
CV_OUT std::vector<KeyPoint>& keypoints, CV_OUT std::vector<KeyPoint>& keypoints,
InputArray mask=noArray() ); InputArray mask=noArray() );
virtual void detect( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints,
InputArrayOfArrays masks=noArray() );
/* /*
* Compute the descriptors for a set of keypoints in an image. * Compute the descriptors for a set of keypoints in an image.
* image The image. * image The image.
...@@ -120,6 +124,10 @@ public: ...@@ -120,6 +124,10 @@ public:
CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints,
OutputArray descriptors ); OutputArray descriptors );
virtual void compute( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints,
OutputArrayOfArrays descriptors );
/* Detects keypoints and computes the descriptors */ /* Detects keypoints and computes the descriptors */
CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask,
CV_OUT std::vector<KeyPoint>& keypoints, CV_OUT std::vector<KeyPoint>& keypoints,
...@@ -146,7 +154,7 @@ public: ...@@ -146,7 +154,7 @@ public:
CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f); CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f);
// custom setup // custom setup
CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList, CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList,
float dMax=5.85f, float dMin=8.2f, const std::vector<int> indexChange=std::vector<int>()); float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>());
}; };
/*! /*!
...@@ -174,6 +182,13 @@ public: ...@@ -174,6 +182,13 @@ public:
class CV_EXPORTS_W MSER : public Feature2D class CV_EXPORTS_W MSER : public Feature2D
{ {
public: public:
enum
{
DELTA=10000, MIN_AREA=10001, MAX_AREA=10002, PASS2_ONLY=10003,
MAX_EVOLUTION=10004, AREA_THRESHOLD=10005,
MIN_MARGIN=10006, EDGE_BLUR_SIZE=10007
};
//! the full constructor //! the full constructor
CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400, CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,
double _max_variation=0.25, double _min_diversity=.2, double _max_variation=0.25, double _min_diversity=.2,
...@@ -181,7 +196,7 @@ public: ...@@ -181,7 +196,7 @@ public:
double _min_margin=0.003, int _edge_blur_size=5 ); double _min_margin=0.003, int _edge_blur_size=5 );
CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label, CV_WRAP virtual int detectAndLabel( InputArray image, OutputArray label,
OutputArray stats=noArray() ) const = 0; OutputArray stats=noArray() ) = 0;
}; };
//! detects corners using FAST algorithm by E. Rosten //! detects corners using FAST algorithm by E. Rosten
...@@ -199,13 +214,16 @@ public: ...@@ -199,13 +214,16 @@ public:
TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2
}; };
CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10, bool nonmaxSuppression=true, int type=TYPE_9_16 ); CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,
bool nonmaxSuppression=true,
int type=FastFeatureDetector::TYPE_9_16 );
}; };
class CV_EXPORTS_W GFTTDetector : public Feature2D class CV_EXPORTS_W GFTTDetector : public Feature2D
{ {
public: public:
enum { USE_HARRIS_DETECTOR=10000 };
CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); int blockSize=3, bool useHarrisDetector=false, double k=0.04 );
}; };
...@@ -282,7 +300,7 @@ public: ...@@ -282,7 +300,7 @@ public:
DESCRIPTOR_MLDB = 5 DESCRIPTOR_MLDB = 5
}; };
CV_WRAP static Ptr<AKAZE> create(int descriptor_type=DESCRIPTOR_MLDB, CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB,
int descriptor_size = 0, int descriptor_channels = 3, int descriptor_size = 0, int descriptor_channels = 3,
float threshold = 0.001f, int octaves = 4, float threshold = 0.001f, int octaves = 4,
int sublevels = 4, int diffusivity = KAZE::DIFF_PM_G2); int sublevels = 4, int diffusivity = KAZE::DIFF_PM_G2);
...@@ -535,8 +553,6 @@ public: ...@@ -535,8 +553,6 @@ public:
virtual bool isMaskSupported() const { return true; } virtual bool isMaskSupported() const { return true; }
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const; virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
AlgorithmInfo* info() const;
protected: protected:
virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
InputArrayOfArrays masks=noArray(), bool compactResult=false ); InputArrayOfArrays masks=noArray(), bool compactResult=false );
...@@ -569,8 +585,6 @@ public: ...@@ -569,8 +585,6 @@ public:
virtual bool isMaskSupported() const; virtual bool isMaskSupported() const;
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const; virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
AlgorithmInfo* info() const;
protected: protected:
static void convertToDMatches( const DescriptorCollection& descriptors, static void convertToDMatches( const DescriptorCollection& descriptors,
const Mat& indices, const Mat& distances, const Mat& indices, const Mat& distances,
......
...@@ -22,10 +22,10 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES) ...@@ -22,10 +22,10 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES)
mframe.copyTo(frame); mframe.copyTo(frame);
declare.in(frame); declare.in(frame);
ORB detector(1500, 1.3f, 1); Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points; vector<KeyPoint> points;
OCL_TEST_CYCLE() detector(frame, mask, points); OCL_TEST_CYCLE() detector->detect(frame, points, mask);
std::sort(points.begin(), points.end(), comparators::KeypointGreater()); std::sort(points.begin(), points.end(), comparators::KeypointGreater());
SANITY_CHECK_KEYPOINTS(points, 1e-5); SANITY_CHECK_KEYPOINTS(points, 1e-5);
...@@ -44,14 +44,14 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES) ...@@ -44,14 +44,14 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES)
declare.in(frame); declare.in(frame);
ORB detector(1500, 1.3f, 1); Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points; vector<KeyPoint> points;
detector(frame, mask, points); detector->detect(frame, points, mask);
std::sort(points.begin(), points.end(), comparators::KeypointGreater()); std::sort(points.begin(), points.end(), comparators::KeypointGreater());
UMat descriptors; UMat descriptors;
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, true); OCL_TEST_CYCLE() detector->compute(frame, points, descriptors);
SANITY_CHECK(descriptors); SANITY_CHECK(descriptors);
} }
...@@ -68,12 +68,12 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES) ...@@ -68,12 +68,12 @@ OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
mframe.copyTo(frame); mframe.copyTo(frame);
declare.in(frame); declare.in(frame);
ORB detector(1500, 1.3f, 1); Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points; vector<KeyPoint> points;
UMat descriptors; UMat descriptors;
OCL_TEST_CYCLE() detector(frame, mask, points, descriptors, false); OCL_TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
::perf::sort(points, descriptors); ::perf::sort(points, descriptors);
SANITY_CHECK_KEYPOINTS(points, 1e-5); SANITY_CHECK_KEYPOINTS(points, 1e-5);
......
...@@ -22,10 +22,10 @@ PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES)) ...@@ -22,10 +22,10 @@ PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES))
Mat mask; Mat mask;
declare.in(frame); declare.in(frame);
ORB detector(1500, 1.3f, 1); Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points; vector<KeyPoint> points;
TEST_CYCLE() detector(frame, mask, points); TEST_CYCLE() detector->detect(frame, points, mask);
sort(points.begin(), points.end(), comparators::KeypointGreater()); sort(points.begin(), points.end(), comparators::KeypointGreater());
SANITY_CHECK_KEYPOINTS(points, 1e-5); SANITY_CHECK_KEYPOINTS(points, 1e-5);
...@@ -42,14 +42,14 @@ PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES)) ...@@ -42,14 +42,14 @@ PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES))
Mat mask; Mat mask;
declare.in(frame); declare.in(frame);
ORB detector(1500, 1.3f, 1); Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points; vector<KeyPoint> points;
detector(frame, mask, points); detector->detect(frame, points, mask);
sort(points.begin(), points.end(), comparators::KeypointGreater()); sort(points.begin(), points.end(), comparators::KeypointGreater());
Mat descriptors; Mat descriptors;
TEST_CYCLE() detector(frame, mask, points, descriptors, true); TEST_CYCLE() detector->compute(frame, points, descriptors);
SANITY_CHECK(descriptors); SANITY_CHECK(descriptors);
} }
...@@ -64,12 +64,12 @@ PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES)) ...@@ -64,12 +64,12 @@ PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES))
Mat mask; Mat mask;
declare.in(frame); declare.in(frame);
ORB detector(1500, 1.3f, 1); Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points; vector<KeyPoint> points;
Mat descriptors; Mat descriptors;
TEST_CYCLE() detector(frame, mask, points, descriptors, false); TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
perf::sort(points, descriptors); perf::sort(points, descriptors);
SANITY_CHECK_KEYPOINTS(points, 1e-5); SANITY_CHECK_KEYPOINTS(points, 1e-5);
......
...@@ -52,22 +52,15 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd ...@@ -52,22 +52,15 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd
#include "kaze/AKAZEFeatures.h" #include "kaze/AKAZEFeatures.h"
#include <iostream> #include <iostream>
using namespace std;
namespace cv namespace cv
{ {
AKAZE::AKAZE() using namespace std;
: descriptor(DESCRIPTOR_MLDB)
, descriptor_channels(3)
, descriptor_size(0)
, threshold(0.001f)
, octaves(4)
, sublevels(4)
, diffusivity(DIFF_PM_G2)
{
}
AKAZE::AKAZE(int _descriptor_type, int _descriptor_size, int _descriptor_channels, class AKAZE_Impl : public AKAZE
{
public:
AKAZE_Impl(int _descriptor_type, int _descriptor_size, int _descriptor_channels,
float _threshold, int _octaves, int _sublevels, int _diffusivity) float _threshold, int _octaves, int _sublevels, int _diffusivity)
: descriptor(_descriptor_type) : descriptor(_descriptor_type)
, descriptor_channels(_descriptor_channels) , descriptor_channels(_descriptor_channels)
...@@ -76,181 +69,139 @@ namespace cv ...@@ -76,181 +69,139 @@ namespace cv
, octaves(_octaves) , octaves(_octaves)
, sublevels(_sublevels) , sublevels(_sublevels)
, diffusivity(_diffusivity) , diffusivity(_diffusivity)
{
}
AKAZE::~AKAZE()
{
}
// returns the descriptor size in bytes
int AKAZE::descriptorSize() const
{
switch (descriptor)
{ {
case cv::DESCRIPTOR_KAZE: }
case cv::DESCRIPTOR_KAZE_UPRIGHT:
return 64;
case cv::DESCRIPTOR_MLDB: virtual ~AKAZE_Impl()
case cv::DESCRIPTOR_MLDB_UPRIGHT: {
// We use the full length binary descriptor -> 486 bits
if (descriptor_size == 0)
{
int t = (6 + 36 + 120) * descriptor_channels;
return (int)ceil(t / 8.);
}
else
{
// We use the random bit selection length binary descriptor
return (int)ceil(descriptor_size / 8.);
}
default:
return -1;
} }
}
// returns the descriptor type // returns the descriptor size in bytes
int AKAZE::descriptorType() const int descriptorSize() const
{
switch (descriptor)
{ {
case cv::DESCRIPTOR_KAZE: switch (descriptor)
case cv::DESCRIPTOR_KAZE_UPRIGHT: {
return CV_32F; case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
case cv::DESCRIPTOR_MLDB: return 64;
case cv::DESCRIPTOR_MLDB_UPRIGHT:
return CV_8U; case DESCRIPTOR_MLDB:
case DESCRIPTOR_MLDB_UPRIGHT:
// We use the full length binary descriptor -> 486 bits
if (descriptor_size == 0)
{
int t = (6 + 36 + 120) * descriptor_channels;
return (int)ceil(t / 8.);
}
else
{
// We use the random bit selection length binary descriptor
return (int)ceil(descriptor_size / 8.);
}
default: default:
return -1; return -1;
}
} }
}
// returns the default norm type // returns the descriptor type
int AKAZE::defaultNorm() const int descriptorType() const
{
switch (descriptor)
{ {
case cv::DESCRIPTOR_KAZE: switch (descriptor)
case cv::DESCRIPTOR_KAZE_UPRIGHT: {
return cv::NORM_L2; case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
return CV_32F;
case cv::DESCRIPTOR_MLDB: case DESCRIPTOR_MLDB:
case cv::DESCRIPTOR_MLDB_UPRIGHT: case DESCRIPTOR_MLDB_UPRIGHT:
return cv::NORM_HAMMING; return CV_8U;
default: default:
return -1; return -1;
}
} }
}
void AKAZE::operator()(InputArray image, InputArray mask, // returns the default norm type
std::vector<KeyPoint>& keypoints, int defaultNorm() const
OutputArray descriptors, {
bool useProvidedKeypoints) const switch (descriptor)
{ {
cv::Mat img = image.getMat(); case DESCRIPTOR_KAZE:
if (img.type() != CV_8UC1) case DESCRIPTOR_KAZE_UPRIGHT:
cvtColor(image, img, COLOR_BGR2GRAY); return NORM_L2;
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options); case DESCRIPTOR_MLDB:
impl.Create_Nonlinear_Scale_Space(img1_32); case DESCRIPTOR_MLDB_UPRIGHT:
return NORM_HAMMING;
if (!useProvidedKeypoints) default:
{ return -1;
impl.Feature_Detection(keypoints); }
} }
if (!mask.empty()) void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{ {
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); Mat img = image.getMat();
} if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
impl.Compute_Descriptors(keypoints, desc);
Mat img1_32;
CV_Assert((!desc.rows || desc.cols == descriptorSize())); img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
} AKAZEOptions options;
options.descriptor = descriptor;
void AKAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const options.descriptor_channels = descriptor_channels;
{ options.descriptor_size = descriptor_size;
cv::Mat img = image.getMat(); options.img_width = img.cols;
if (img.type() != CV_8UC1) options.img_height = img.rows;
cvtColor(image, img, COLOR_BGR2GRAY); options.dthreshold = threshold;
options.omax = octaves;
Mat img1_32; options.nsublevels = sublevels;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); options.diffusivity = diffusivity;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
}
AKAZEOptions options; if (!mask.empty())
options.descriptor = descriptor; {
options.descriptor_channels = descriptor_channels; KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
options.descriptor_size = descriptor_size; }
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options); if( descriptors.needed() )
impl.Create_Nonlinear_Scale_Space(img1_32); {
impl.Feature_Detection(keypoints); Mat& desc = descriptors.getMatRef();
impl.Compute_Descriptors(keypoints, desc);
if (!mask.empty()) CV_Assert((!desc.rows || desc.cols == descriptorSize()));
{ CV_Assert((!desc.rows || (desc.type() == descriptorType())));
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); }
} }
}
void AKAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const int descriptor;
int descriptor_channels;
int descriptor_size;
float threshold;
int octaves;
int sublevels;
int diffusivity;
};
Ptr<AKAZE> AKAZE::create(int descriptor_type,
int descriptor_size, int descriptor_channels,
float threshold, int octaves,
int sublevels, int diffusivity)
{ {
cv::Mat img = image.getMat(); return makePtr<AKAZE_Impl>(descriptor_type, descriptor_size, descriptor_channels,
if (img.type() != CV_8UC1) threshold, octaves, sublevels, diffusivity);
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
} }
} }
...@@ -75,11 +75,10 @@ protected: ...@@ -75,11 +75,10 @@ protected:
double confidence; double confidence;
}; };
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const; virtual void detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() );
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> &centers) const; virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> &centers) const;
Params params; Params params;
AlgorithmInfo* info() const;
}; };
/* /*
...@@ -173,22 +172,22 @@ void SimpleBlobDetector::Params::write(cv::FileStorage& fs) const ...@@ -173,22 +172,22 @@ void SimpleBlobDetector::Params::write(cv::FileStorage& fs) const
fs << "maxConvexity" << maxConvexity; fs << "maxConvexity" << maxConvexity;
} }
SimpleBlobDetector::SimpleBlobDetector(const SimpleBlobDetector::Params &parameters) : SimpleBlobDetectorImpl::SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters) :
params(parameters) params(parameters)
{ {
} }
void SimpleBlobDetector::read( const cv::FileNode& fn ) void SimpleBlobDetectorImpl::read( const cv::FileNode& fn )
{ {
params.read(fn); params.read(fn);
} }
void SimpleBlobDetector::write( cv::FileStorage& fs ) const void SimpleBlobDetectorImpl::write( cv::FileStorage& fs ) const
{ {
params.write(fs); params.write(fs);
} }
void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const void SimpleBlobDetectorImpl::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
{ {
Mat image = _image.getMat(), binaryImage = _binaryImage.getMat(); Mat image = _image.getMat(), binaryImage = _binaryImage.getMat();
(void)image; (void)image;
...@@ -302,7 +301,7 @@ void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, s ...@@ -302,7 +301,7 @@ void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, s
#endif #endif
} }
void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray) const void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray)
{ {
//TODO: support mask //TODO: support mask
keypoints.clear(); keypoints.clear();
...@@ -365,3 +364,10 @@ void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>& ...@@ -365,3 +364,10 @@ void SimpleBlobDetector::detectImpl(InputArray image, std::vector<cv::KeyPoint>&
keypoints.push_back(kpt); keypoints.push_back(kpt);
} }
} }
Ptr<SimpleBlobDetector> SimpleBlobDetector::create(const SimpleBlobDetector::Params& params)
{
return makePtr<SimpleBlobDetectorImpl>(params);
}
}
...@@ -42,9 +42,7 @@ ...@@ -42,9 +42,7 @@
the IEEE International Conference on Computer Vision (ICCV2011). the IEEE International Conference on Computer Vision (ICCV2011).
*/ */
#include <opencv2/features2d.hpp> #include "precomp.hpp"
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <fstream> #include <fstream>
#include <stdlib.h> #include <stdlib.h>
...@@ -53,7 +51,6 @@ ...@@ -53,7 +51,6 @@
namespace cv namespace cv
{ {
class BRISK_Impl : public BRISK class BRISK_Impl : public BRISK
{ {
public: public:
...@@ -62,17 +59,36 @@ public: ...@@ -62,17 +59,36 @@ public:
explicit BRISK_Impl(const std::vector<float> &radiusList, const std::vector<int> &numberList, explicit BRISK_Impl(const std::vector<float> &radiusList, const std::vector<int> &numberList,
float dMax=5.85f, float dMin=8.2f, const std::vector<int> indexChange=std::vector<int>()); float dMax=5.85f, float dMin=8.2f, const std::vector<int> indexChange=std::vector<int>());
virtual ~BRISK_Impl();
int descriptorSize() const
{
return strings_;
}
int descriptorType() const
{
return CV_8U;
}
int defaultNorm() const
{
return NORM_HAMMING;
}
// call this to generate the kernel: // call this to generate the kernel:
// circle of radius r (pixels), with n points; // circle of radius r (pixels), with n points;
// short pairings with dMax, long pairings with dMin // short pairings with dMax, long pairings with dMin
void generateKernel(std::vector<float> &radiusList, void generateKernel(const std::vector<float> &radiusList,
std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f, const std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
std::vector<int> indexChange=std::vector<int>()); const std::vector<int> &indexChange=std::vector<int>());
protected: void detectAndCompute( InputArray image, InputArray mask,
CV_OUT std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints );
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const; protected:
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const; void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints, void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
...@@ -256,16 +272,16 @@ protected: ...@@ -256,16 +272,16 @@ protected:
static const float basicSize_; static const float basicSize_;
}; };
const float BRISK::basicSize_ = 12.0f; const float BRISK_Impl::basicSize_ = 12.0f;
const unsigned int BRISK::scales_ = 64; const unsigned int BRISK_Impl::scales_ = 64;
const float BRISK::scalerange_ = 30.f; // 40->4 Octaves - else, this needs to be adjusted... const float BRISK_Impl::scalerange_ = 30.f; // 40->4 Octaves - else, this needs to be adjusted...
const unsigned int BRISK::n_rot_ = 1024; // discretization of the rotation look-up const unsigned int BRISK_Impl::n_rot_ = 1024; // discretization of the rotation look-up
const float BriskScaleSpace::safetyFactor_ = 1.0f; const float BriskScaleSpace::safetyFactor_ = 1.0f;
const float BriskScaleSpace::basicSize_ = 12.0f; const float BriskScaleSpace::basicSize_ = 12.0f;
// constructors // constructors
BRISK::BRISK(int thresh, int octaves_in, float patternScale) BRISK_Impl::BRISK_Impl(int thresh, int octaves_in, float patternScale)
{ {
threshold = thresh; threshold = thresh;
octaves = octaves_in; octaves = octaves_in;
...@@ -291,10 +307,12 @@ BRISK::BRISK(int thresh, int octaves_in, float patternScale) ...@@ -291,10 +307,12 @@ BRISK::BRISK(int thresh, int octaves_in, float patternScale)
nList[4] = 20; nList[4] = 20;
generateKernel(rList, nList, (float)(5.85 * patternScale), (float)(8.2 * patternScale)); generateKernel(rList, nList, (float)(5.85 * patternScale), (float)(8.2 * patternScale));
} }
BRISK::BRISK(std::vector<float> &radiusList, std::vector<int> &numberList, float dMax, float dMin,
std::vector<int> indexChange) BRISK_Impl::BRISK_Impl(const std::vector<float> &radiusList,
const std::vector<int> &numberList,
float dMax, float dMin,
const std::vector<int> indexChange)
{ {
generateKernel(radiusList, numberList, dMax, dMin, indexChange); generateKernel(radiusList, numberList, dMax, dMin, indexChange);
threshold = 20; threshold = 20;
...@@ -302,10 +320,12 @@ BRISK::BRISK(std::vector<float> &radiusList, std::vector<int> &numberList, float ...@@ -302,10 +320,12 @@ BRISK::BRISK(std::vector<float> &radiusList, std::vector<int> &numberList, float
} }
void void
BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberList, float dMax, BRISK_Impl::generateKernel(const std::vector<float> &radiusList,
float dMin, std::vector<int> indexChange) const std::vector<int> &numberList,
float dMax, float dMin,
const std::vector<int>& _indexChange)
{ {
std::vector<int> indexChange = _indexChange;
dMax_ = dMax; dMax_ = dMax;
dMin_ = dMin; dMin_ = dMin;
...@@ -427,7 +447,7 @@ BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberLi ...@@ -427,7 +447,7 @@ BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberLi
// simple alternative: // simple alternative:
inline int inline int
BRISK::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const float key_x, BRISK_Impl::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, const float key_x,
const float key_y, const unsigned int scale, const unsigned int rot, const float key_y, const unsigned int scale, const unsigned int rot,
const unsigned int point) const const unsigned int point) const
{ {
...@@ -594,8 +614,8 @@ RoiPredicate(const float minX, const float minY, const float maxX, const float m ...@@ -594,8 +614,8 @@ RoiPredicate(const float minX, const float minY, const float maxX, const float m
// computes the descriptor // computes the descriptor
void void
BRISK::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints, BRISK_Impl::detectAndCompute( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
OutputArray _descriptors, bool useProvidedKeypoints) const OutputArray _descriptors, bool useProvidedKeypoints)
{ {
bool doOrientation=true; bool doOrientation=true;
if (useProvidedKeypoints) if (useProvidedKeypoints)
...@@ -609,7 +629,7 @@ BRISK::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& k ...@@ -609,7 +629,7 @@ BRISK::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& k
} }
void void
BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints, BRISK_Impl::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
OutputArray _descriptors, bool doDescriptors, bool doOrientation, OutputArray _descriptors, bool doDescriptors, bool doOrientation,
bool useProvidedKeypoints) const bool useProvidedKeypoints) const
{ {
...@@ -775,25 +795,8 @@ BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, s ...@@ -775,25 +795,8 @@ BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, s
delete[] _values; delete[] _values;
} }
int
BRISK::descriptorSize() const
{
return strings_;
}
int BRISK_Impl::~BRISK_Impl()
BRISK::descriptorType() const
{
return CV_8U;
}
int
BRISK::defaultNorm() const
{
return NORM_HAMMING;
}
BRISK::~BRISK()
{ {
delete[] patternPoints_; delete[] patternPoints_;
delete[] shortPairs_; delete[] shortPairs_;
...@@ -803,14 +806,7 @@ BRISK::~BRISK() ...@@ -803,14 +806,7 @@ BRISK::~BRISK()
} }
void void
BRISK::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const BRISK_Impl::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints) const
{
computeKeypointsNoOrientation(image, mask, keypoints);
computeDescriptorsAndOrOrientation(image, mask, keypoints, cv::noArray(), false, true, true);
}
void
BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints) const
{ {
Mat image = _image.getMat(), mask = _mask.getMat(); Mat image = _image.getMat(), mask = _mask.getMat();
if( image.type() != CV_8UC1 ) if( image.type() != CV_8UC1 )
...@@ -821,20 +817,7 @@ BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::v ...@@ -821,20 +817,7 @@ BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::v
briskScaleSpace.getKeypoints(threshold, keypoints); briskScaleSpace.getKeypoints(threshold, keypoints);
// remove invalid points // remove invalid points
removeInvalidPoints(mask, keypoints); KeyPointsFilter::runByPixelsMask(keypoints, mask);
}
void
BRISK::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
(*this)(image.getMat(), mask.getMat(), keypoints);
}
void
BRISK::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
} }
// construct telling the octaves number: // construct telling the octaves number:
...@@ -2084,7 +2067,7 @@ BriskLayer::BriskLayer(const cv::Mat& img_in, float scale_in, float offset_in) ...@@ -2084,7 +2067,7 @@ BriskLayer::BriskLayer(const cv::Mat& img_in, float scale_in, float offset_in)
scale_ = scale_in; scale_ = scale_in;
offset_ = offset_in; offset_ = offset_in;
// create an agast detector // create an agast detector
fast_9_16_ = makePtr<FastFeatureDetector>(1, true, FastFeatureDetector::TYPE_9_16); fast_9_16_ = FastFeatureDetector::create(1, true, FastFeatureDetector::TYPE_9_16);
makeOffsets(pixel_5_8_, (int)img_.step, 8); makeOffsets(pixel_5_8_, (int)img_.step, 8);
makeOffsets(pixel_9_16_, (int)img_.step, 16); makeOffsets(pixel_9_16_, (int)img_.step, 16);
} }
...@@ -2106,7 +2089,7 @@ BriskLayer::BriskLayer(const BriskLayer& layer, int mode) ...@@ -2106,7 +2089,7 @@ BriskLayer::BriskLayer(const BriskLayer& layer, int mode)
offset_ = 0.5f * scale_ - 0.5f; offset_ = 0.5f * scale_ - 0.5f;
} }
scores_ = cv::Mat::zeros(img_.rows, img_.cols, CV_8U); scores_ = cv::Mat::zeros(img_.rows, img_.cols, CV_8U);
fast_9_16_ = makePtr<FastFeatureDetector>(1, false, FastFeatureDetector::TYPE_9_16); fast_9_16_ = FastFeatureDetector::create(1, false, FastFeatureDetector::TYPE_9_16);
makeOffsets(pixel_5_8_, (int)img_.step, 8); makeOffsets(pixel_5_8_, (int)img_.step, 8);
makeOffsets(pixel_9_16_, (int)img_.step, 16); makeOffsets(pixel_9_16_, (int)img_.step, 16);
} }
...@@ -2318,4 +2301,16 @@ BriskLayer::twothirdsample(const cv::Mat& srcimg, cv::Mat& dstimg) ...@@ -2318,4 +2301,16 @@ BriskLayer::twothirdsample(const cv::Mat& srcimg, cv::Mat& dstimg)
resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA); resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA);
} }
Ptr<BRISK> BRISK::create(int thresh, int octaves, float patternScale)
{
return makePtr<BRISK_Impl>(thresh, octaves, patternScale);
}
// custom setup
Ptr<BRISK> BRISK::create(const std::vector<float> &radiusList, const std::vector<int> &numberList,
float dMax, float dMin, const std::vector<int>& indexChange)
{
return makePtr<BRISK_Impl>(radiusList, numberList, dMax, dMin, indexChange);
}
} }
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
using std::vector;
Feature2D::~Feature2D() {}
/*
* Detect keypoints in an image.
* image The image.
* keypoints The detected keypoints.
* mask Mask specifying where to look for keypoints (optional). Must be a char
* matrix with non-zero values in the region of interest.
*/
void Feature2D::detect( InputArray image,
std::vector<KeyPoint>& keypoints,
InputArray mask )
{
detectAndCompute(image, mask, keypoints, noArray(), false);
}
void Feature2D::detect( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints,
InputArrayOfArrays _masks )
{
vector<Mat> images, masks;
_images.getMatVector(images);
size_t i, nimages = images.size();
if( !_masks.empty() )
{
_masks.getMatVector(masks);
CV_Assert(masks.size() == nimages);
}
keypoints.resize(nimages);
for( i = 0; i < nimages; i++ )
{
detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] );
}
}
/*
* Compute the descriptors for a set of keypoints in an image.
* image The image.
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
*/
void Feature2D::compute( InputArray image,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors )
{
detectAndCompute(image, noArray(), keypoints, descriptors, true);
}
void Feature2D::compute( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints,
OutputArrayOfArrays _descriptors )
{
if( !_descriptors.needed() )
return;
vector<Mat> images;
_images.getMatVector(images);
size_t i, nimages = images.size();
CV_Assert( keypoints.size() == nimages );
CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT );
vector<Mat>& descriptors = *(vector<Mat>*)_descriptors.getObj();
descriptors.resize(nimages);
for( i = 0; i < nimages; i++ )
{
compute(images[i], keypoints[i], descriptors[i]);
}
}
/* Detects keypoints and computes the descriptors */
void Feature2D::detectAndCompute( InputArray, InputArray,
std::vector<KeyPoint>&,
OutputArray,
bool )
{
CV_Error(Error::StsNotImplemented, "");
}
int Feature2D::descriptorSize() const
{
return 0;
}
int Feature2D::descriptorType() const
{
return CV_32F;
}
int Feature2D::defaultNorm() const
{
int tp = descriptorType();
return tp == CV_8U ? NORM_HAMMING : NORM_L2;
}
// Return true if detector object is empty
bool Feature2D::empty() const
{
return true;
}
}
This diff is collapsed.
...@@ -140,5 +140,11 @@ namespace cv ...@@ -140,5 +140,11 @@ namespace cv
int diffusivity; int diffusivity;
}; };
Ptr<KAZE> KAZE::create(bool extended, bool upright,
float threshold,
int octaves, int sublevels,
int diffusivity)
{
return makePtr<KAZE_Impl>(extended, upright, threshold, octaves, sublevels, diffusivity);
}
} }
...@@ -22,12 +22,12 @@ struct AKAZEOptions { ...@@ -22,12 +22,12 @@ struct AKAZEOptions {
, soffset(1.6f) , soffset(1.6f)
, derivative_factor(1.5f) , derivative_factor(1.5f)
, sderivatives(1.0) , sderivatives(1.0)
, diffusivity(cv::DIFF_PM_G2) , diffusivity(KAZE::DIFF_PM_G2)
, dthreshold(0.001f) , dthreshold(0.001f)
, min_dthreshold(0.00001f) , min_dthreshold(0.00001f)
, descriptor(cv::DESCRIPTOR_MLDB) , descriptor(AKAZE::DESCRIPTOR_MLDB)
, descriptor_size(0) , descriptor_size(0)
, descriptor_channels(3) , descriptor_channels(3)
, descriptor_pattern_size(10) , descriptor_pattern_size(10)
......
...@@ -11,10 +11,12 @@ ...@@ -11,10 +11,12 @@
/* ************************************************************************* */ /* ************************************************************************* */
// Includes // Includes
#include "../precomp.hpp"
#include "AKAZEConfig.h" #include "AKAZEConfig.h"
#include "TEvolution.h" #include "TEvolution.h"
namespace cv
{
/* ************************************************************************* */ /* ************************************************************************* */
// AKAZE Class Declaration // AKAZE Class Declaration
class AKAZEFeatures { class AKAZEFeatures {
...@@ -22,7 +24,7 @@ class AKAZEFeatures { ...@@ -22,7 +24,7 @@ class AKAZEFeatures {
private: private:
AKAZEOptions options_; ///< Configuration options for AKAZE AKAZEOptions options_; ///< Configuration options for AKAZE
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
/// FED parameters /// FED parameters
int ncycles_; ///< Number of cycles int ncycles_; ///< Number of cycles
...@@ -59,4 +61,6 @@ public: ...@@ -59,4 +61,6 @@ public:
void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons,
int nbits, int pattern_size, int nchannels); int nbits, int pattern_size, int nchannels);
}
#endif #endif
...@@ -22,8 +22,8 @@ namespace cv ...@@ -22,8 +22,8 @@ namespace cv
/* ************************************************************************* */ /* ************************************************************************* */
// KAZE Class Declaration // KAZE Class Declaration
class KAZEFeatures { class KAZEFeatures
{
private: private:
/// Parameters of the Nonlinear diffusion class /// Parameters of the Nonlinear diffusion class
......
...@@ -8,10 +8,13 @@ ...@@ -8,10 +8,13 @@
#ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__ #ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__
#define __OPENCV_FEATURES_2D_TEVOLUTION_H__ #define __OPENCV_FEATURES_2D_TEVOLUTION_H__
namespace cv
{
/* ************************************************************************* */ /* ************************************************************************* */
/// KAZE/A-KAZE nonlinear diffusion filtering evolution /// KAZE/A-KAZE nonlinear diffusion filtering evolution
struct TEvolution { struct TEvolution
{
TEvolution() { TEvolution() {
etime = 0.0f; etime = 0.0f;
esigma = 0.0f; esigma = 0.0f;
...@@ -20,11 +23,11 @@ struct TEvolution { ...@@ -20,11 +23,11 @@ struct TEvolution {
sigma_size = 0; sigma_size = 0;
} }
cv::Mat Lx, Ly; ///< First order spatial derivatives Mat Lx, Ly; ///< First order spatial derivatives
cv::Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
cv::Mat Lt; ///< Evolution image Mat Lt; ///< Evolution image
cv::Mat Lsmooth; ///< Smoothed image Mat Lsmooth; ///< Smoothed image
cv::Mat Ldet; ///< Detector response Mat Ldet; ///< Detector response
float etime; ///< Evolution time float etime; ///< Evolution time
float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2 float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2
int octave; ///< Image octave int octave; ///< Image octave
...@@ -32,4 +35,6 @@ struct TEvolution { ...@@ -32,4 +35,6 @@ struct TEvolution {
int sigma_size; ///< Integer esigma. For computing the feature detector responses int sigma_size; ///< Integer esigma. For computing the feature detector responses
}; };
}
#endif #endif
...@@ -11,43 +11,37 @@ ...@@ -11,43 +11,37 @@
#ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__ #ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
#define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__ #define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
/* ************************************************************************* */
// Includes
#include "../precomp.hpp"
/* ************************************************************************* */ /* ************************************************************************* */
// Declaration of functions // Declaration of functions
namespace cv { namespace cv
namespace details { {
namespace kaze {
// Gaussian 2D convolution
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
// Gaussian 2D convolution // Diffusivity functions
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma); void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
// Diffusivity functions float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); // Image derivatives
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
// Image derivatives // Nonlinear diffusion filtering scalar step
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale); void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
// Nonlinear diffusion filtering scalar step // For non-maxima suppresion
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
// For non-maxima suppresion // Image downsampling
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); void halfsample_image(const cv::Mat& src, cv::Mat& dst);
// Image downsampling
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
}
}
} }
#endif #endif
This diff is collapsed.
...@@ -664,19 +664,11 @@ public: ...@@ -664,19 +664,11 @@ public:
int defaultNorm() const; int defaultNorm() const;
// Compute the ORB_Impl features and descriptors on an image // Compute the ORB_Impl features and descriptors on an image
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const; void detectAndCompute( InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
OutputArray descriptors, bool useProvidedKeypoints=false );
// Compute the ORB_Impl features and descriptors on an image
void operator()( InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
OutputArray descriptors, bool useProvidedKeypoints=false ) const;
AlgorithmInfo* info() const;
protected: protected:
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
int nfeatures; int nfeatures;
double scaleFactor; double scaleFactor;
int nlevels; int nlevels;
...@@ -703,17 +695,6 @@ int ORB_Impl::defaultNorm() const ...@@ -703,17 +695,6 @@ int ORB_Impl::defaultNorm() const
return NORM_HAMMING; return NORM_HAMMING;
} }
/** Compute the ORB_Impl features and descriptors on an image
* @param img the image to compute the features and descriptors on
* @param mask the mask to apply
* @param keypoints the resulting keypoints
*/
void ORB_Impl::operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const
{
(*this)(image, mask, keypoints, noArray(), false);
}
static void uploadORBKeypoints(const std::vector<KeyPoint>& src, std::vector<Vec3i>& buf, OutputArray dst) static void uploadORBKeypoints(const std::vector<KeyPoint>& src, std::vector<Vec3i>& buf, OutputArray dst)
{ {
size_t i, n = src.size(); size_t i, n = src.size();
...@@ -813,8 +794,10 @@ static void computeKeyPoints(const Mat& imagePyramid, ...@@ -813,8 +794,10 @@ static void computeKeyPoints(const Mat& imagePyramid,
Mat mask = maskPyramid.empty() ? Mat() : maskPyramid(layerInfo[level]); Mat mask = maskPyramid.empty() ? Mat() : maskPyramid(layerInfo[level]);
// Detect FAST features, 20 is a good threshold // Detect FAST features, 20 is a good threshold
FastFeatureDetector fd(fastThreshold, true); {
fd.detect(img, keypoints, mask); Ptr<FastFeatureDetector> fd = FastFeatureDetector::create(fastThreshold, true);
fd->detect(img, keypoints, mask);
}
// Remove keypoints very close to the border // Remove keypoints very close to the border
KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold); KeyPointsFilter::runByImageBorder(keypoints, img.size(), edgeThreshold);
...@@ -928,8 +911,9 @@ static void computeKeyPoints(const Mat& imagePyramid, ...@@ -928,8 +911,9 @@ static void computeKeyPoints(const Mat& imagePyramid,
* @param do_keypoints if true, the keypoints are computed, otherwise used as an input * @param do_keypoints if true, the keypoints are computed, otherwise used as an input
* @param do_descriptors if true, also computes the descriptors * @param do_descriptors if true, also computes the descriptors
*/ */
void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints, void ORB_Impl::detectAndCompute( InputArray _image, InputArray _mask,
OutputArray _descriptors, bool useProvidedKeypoints ) const std::vector<KeyPoint>& keypoints,
OutputArray _descriptors, bool useProvidedKeypoints )
{ {
CV_Assert(patchSize >= 2); CV_Assert(patchSize >= 2);
...@@ -1159,15 +1143,11 @@ void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector<KeyP ...@@ -1159,15 +1143,11 @@ void ORB_Impl::operator()( InputArray _image, InputArray _mask, std::vector<KeyP
} }
} }
void ORB_Impl::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const Ptr<ORB> ORB::create(int nfeatures, float scaleFactor, int nlevels, int edgeThreshold,
int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold)
{ {
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false); return makePtr<ORB_Impl>(nfeatures, scaleFactor, nlevels, edgeThreshold,
firstLevel, WTA_K, scoreType, patchSize, fastThreshold);
} }
void ORB_Impl::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}
} }
...@@ -314,31 +314,34 @@ private: ...@@ -314,31 +314,34 @@ private:
TEST( Features2d_DescriptorExtractor_BRISK, regression ) TEST( Features2d_DescriptorExtractor_BRISK, regression )
{ {
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk", (CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f, CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk",
DescriptorExtractor::create("BRISK") ); (CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
BRISK::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_DescriptorExtractor_ORB, regression ) TEST( Features2d_DescriptorExtractor_ORB, regression )
{ {
// TODO adjust the parameters below // TODO adjust the parameters below
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f, CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb",
DescriptorExtractor::create("ORB") ); (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
ORB::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_DescriptorExtractor_KAZE, regression ) TEST( Features2d_DescriptorExtractor_KAZE, regression )
{ {
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f, CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f,
DescriptorExtractor::create("KAZE"), KAZE::create(),
L2<float>(), FeatureDetector::create("KAZE")); L2<float>() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_DescriptorExtractor_AKAZE, regression ) TEST( Features2d_DescriptorExtractor_AKAZE, regression )
{ {
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f, CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze",
DescriptorExtractor::create("AKAZE"), (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
Hamming(), FeatureDetector::create("AKAZE")); AKAZE::create(),
Hamming(), AKAZE::create());
test.safe_run(); test.safe_run();
} }
...@@ -249,48 +249,48 @@ void CV_FeatureDetectorTest::run( int /*start_from*/ ) ...@@ -249,48 +249,48 @@ void CV_FeatureDetectorTest::run( int /*start_from*/ )
TEST( Features2d_Detector_BRISK, regression ) TEST( Features2d_Detector_BRISK, regression )
{ {
CV_FeatureDetectorTest test( "detector-brisk", FeatureDetector::create("BRISK") ); CV_FeatureDetectorTest test( "detector-brisk", BRISK::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_Detector_FAST, regression ) TEST( Features2d_Detector_FAST, regression )
{ {
CV_FeatureDetectorTest test( "detector-fast", FeatureDetector::create("FAST") ); CV_FeatureDetectorTest test( "detector-fast", FastFeatureDetector::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_Detector_GFTT, regression ) TEST( Features2d_Detector_GFTT, regression )
{ {
CV_FeatureDetectorTest test( "detector-gftt", FeatureDetector::create("GFTT") ); CV_FeatureDetectorTest test( "detector-gftt", GFTTDetector::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_Detector_Harris, regression ) TEST( Features2d_Detector_Harris, regression )
{ {
CV_FeatureDetectorTest test( "detector-harris", FeatureDetector::create("HARRIS") ); CV_FeatureDetectorTest test( "detector-harris", GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04));
test.safe_run(); test.safe_run();
} }
TEST( Features2d_Detector_MSER, DISABLED_regression ) TEST( Features2d_Detector_MSER, DISABLED_regression )
{ {
CV_FeatureDetectorTest test( "detector-mser", FeatureDetector::create("MSER") ); CV_FeatureDetectorTest test( "detector-mser", MSER::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_Detector_ORB, regression ) TEST( Features2d_Detector_ORB, regression )
{ {
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") ); CV_FeatureDetectorTest test( "detector-orb", ORB::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_Detector_KAZE, regression ) TEST( Features2d_Detector_KAZE, regression )
{ {
CV_FeatureDetectorTest test( "detector-kaze", FeatureDetector::create("KAZE") ); CV_FeatureDetectorTest test( "detector-kaze", KAZE::create() );
test.safe_run(); test.safe_run();
} }
TEST( Features2d_Detector_AKAZE, regression ) TEST( Features2d_Detector_AKAZE, regression )
{ {
CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") ); CV_FeatureDetectorTest test( "detector-akaze", AKAZE::create() );
test.safe_run(); test.safe_run();
} }
...@@ -61,7 +61,6 @@ public: ...@@ -61,7 +61,6 @@ public:
protected: protected:
virtual void run(int) virtual void run(int)
{ {
cv::initModule_features2d();
CV_Assert(detector); CV_Assert(detector);
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME; string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
...@@ -121,51 +120,51 @@ protected: ...@@ -121,51 +120,51 @@ protected:
TEST(Features2d_Detector_Keypoints_BRISK, validation) TEST(Features2d_Detector_Keypoints_BRISK, validation)
{ {
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK")); CV_FeatureDetectorKeypointsTest test(BRISK::create());
test.safe_run(); test.safe_run();
} }
TEST(Features2d_Detector_Keypoints_FAST, validation) TEST(Features2d_Detector_Keypoints_FAST, validation)
{ {
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.FAST")); CV_FeatureDetectorKeypointsTest test(FastFeatureDetector::create());
test.safe_run(); test.safe_run();
} }
TEST(Features2d_Detector_Keypoints_HARRIS, validation) TEST(Features2d_Detector_Keypoints_HARRIS, validation)
{ {
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.HARRIS")); CV_FeatureDetectorKeypointsTest test(GFTTDetector::create(1000, 0.01, 1, 3, true, 0.04));
test.safe_run(); test.safe_run();
} }
TEST(Features2d_Detector_Keypoints_GFTT, validation) TEST(Features2d_Detector_Keypoints_GFTT, validation)
{ {
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.GFTT")); CV_FeatureDetectorKeypointsTest test(GFTTDetector::create());
test.safe_run(); test.safe_run();
} }
TEST(Features2d_Detector_Keypoints_MSER, validation) TEST(Features2d_Detector_Keypoints_MSER, validation)
{ {
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.MSER")); CV_FeatureDetectorKeypointsTest test(MSER::create());
test.safe_run(); test.safe_run();
} }
TEST(Features2d_Detector_Keypoints_ORB, validation) TEST(Features2d_Detector_Keypoints_ORB, validation)
{ {
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB")); CV_FeatureDetectorKeypointsTest test(ORB::create());
test.safe_run(); test.safe_run();
} }
TEST(Features2d_Detector_Keypoints_KAZE, validation) TEST(Features2d_Detector_Keypoints_KAZE, validation)
{ {
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.KAZE")); CV_FeatureDetectorKeypointsTest test(KAZE::create());
test.safe_run(); test.safe_run();
} }
TEST(Features2d_Detector_Keypoints_AKAZE, validation) TEST(Features2d_Detector_Keypoints_AKAZE, validation)
{ {
CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_KAZE))); CV_FeatureDetectorKeypointsTest test_kaze(AKAZE::create(AKAZE::DESCRIPTOR_KAZE));
test_kaze.safe_run(); test_kaze.safe_run();
CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr<FeatureDetector>(new cv::AKAZE(cv::DESCRIPTOR_MLDB))); CV_FeatureDetectorKeypointsTest test_mldb(AKAZE::create(AKAZE::DESCRIPTOR_MLDB));
test_mldb.safe_run(); test_mldb.safe_run();
} }
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#if 0
#include <vector> #include <vector>
#include <string> #include <string>
using namespace std; using namespace std;
...@@ -205,3 +207,5 @@ void CV_MserTest::run(int) ...@@ -205,3 +207,5 @@ void CV_MserTest::run(int)
} }
TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); } TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); }
#endif
...@@ -47,10 +47,10 @@ using namespace cv; ...@@ -47,10 +47,10 @@ using namespace cv;
TEST(Features2D_ORB, _1996) TEST(Features2D_ORB, _1996)
{ {
Ptr<FeatureDetector> fd = FeatureDetector::create("ORB"); Ptr<FeatureDetector> fd = ORB::create();
fd->set("nFeatures", 10000);//setting a higher maximum to make effect of threshold visible fd->set("nFeatures", 10000);//setting a higher maximum to make effect of threshold visible
fd->set("fastThreshold", 20);//more features than the default fd->set("fastThreshold", 20);//more features than the default
Ptr<DescriptorExtractor> de = DescriptorExtractor::create("ORB"); Ptr<DescriptorExtractor> de = fd;
Mat image = imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.png"); Mat image = imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.png");
ASSERT_FALSE(image.empty()); ASSERT_FALSE(image.empty());
......
...@@ -367,7 +367,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features) ...@@ -367,7 +367,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
else else
{ {
UMat descriptors; UMat descriptors;
(*surf)(gray_image, Mat(), features.keypoints, descriptors); surf->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size()); features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
} }
} }
...@@ -375,7 +375,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features) ...@@ -375,7 +375,7 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels) OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
{ {
grid_size = _grid_size; grid_size = _grid_size;
orb = makePtr<ORB>(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels); orb = ORB::create(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
} }
void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features) void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
...@@ -395,7 +395,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features) ...@@ -395,7 +395,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
} }
if (grid_size.area() == 1) if (grid_size.area() == 1)
(*orb)(gray_image, Mat(), features.keypoints, features.descriptors); orb->detectAndCompute(gray_image, Mat(), features.keypoints, features.descriptors);
else else
{ {
features.keypoints.clear(); features.keypoints.clear();
...@@ -425,7 +425,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features) ...@@ -425,7 +425,7 @@ void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
// << " gray_image_part.dims=" << gray_image_part.dims << ", " // << " gray_image_part.dims=" << gray_image_part.dims << ", "
// << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n"); // << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
(*orb)(gray_image_part, UMat(), points, descriptors); orb->detectAndCompute(gray_image_part, UMat(), points, descriptors);
features.keypoints.reserve(features.keypoints.size() + points.size()); features.keypoints.reserve(features.keypoints.size() + points.size());
for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp) for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
......
...@@ -671,7 +671,7 @@ Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok) ...@@ -671,7 +671,7 @@ Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator) KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator) : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{ {
setDetector(makePtr<GoodFeaturesToTrackDetector>()); setDetector(GFTTDetector::create());
setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>()); setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>());
setOutlierRejector(makePtr<NullOutlierRejector>()); setOutlierRejector(makePtr<NullOutlierRejector>());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment