Commit 1a617614 authored by cbalint13's avatar cbalint13

Merge branch 'master' of https://github.com/Itseez/opencv_contrib into daisy

parents 1a9eee39 7b6fe5cd
...@@ -351,7 +351,7 @@ void RetinaOCLImpl::setupIPLMagnoChannel(const bool normaliseOutput, const float ...@@ -351,7 +351,7 @@ void RetinaOCLImpl::setupIPLMagnoChannel(const bool normaliseOutput, const float
_retinaParameters.IplMagno.localAdaptintegration_k = localAdaptintegration_k; _retinaParameters.IplMagno.localAdaptintegration_k = localAdaptintegration_k;
} }
void RetinaOCLImpl::run(const InputArray input) void RetinaOCLImpl::run(InputArray input)
{ {
oclMat &inputMatToConvert = getOclMatRef(input); oclMat &inputMatToConvert = getOclMatRef(input);
bool colorMode = convertToColorPlanes(inputMatToConvert, _inputBuffer); bool colorMode = convertToColorPlanes(inputMatToConvert, _inputBuffer);
......
...@@ -78,7 +78,7 @@ class CV_EXPORTS Saliency : public virtual Algorithm ...@@ -78,7 +78,7 @@ class CV_EXPORTS Saliency : public virtual Algorithm
* \param saliencyMap The computed saliency map. * \param saliencyMap The computed saliency map.
* \return true if the saliency map is computed, false otherwise * \return true if the saliency map is computed, false otherwise
*/ */
bool computeSaliency( const InputArray image, OutputArray saliencyMap ); bool computeSaliency( InputArray image, OutputArray saliencyMap );
/** /**
* \brief Get the name of the specific saliency type * \brief Get the name of the specific saliency type
...@@ -88,7 +88,7 @@ class CV_EXPORTS Saliency : public virtual Algorithm ...@@ -88,7 +88,7 @@ class CV_EXPORTS Saliency : public virtual Algorithm
protected: protected:
virtual bool computeSaliencyImpl( const InputArray image, OutputArray saliencyMap ) = 0; virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap ) = 0;
String className; String className;
}; };
...@@ -114,7 +114,7 @@ class CV_EXPORTS StaticSaliency : public virtual Saliency ...@@ -114,7 +114,7 @@ class CV_EXPORTS StaticSaliency : public virtual Saliency
*/ */
bool computeBinaryMap( const Mat& saliencyMap, Mat& binaryMap ); bool computeBinaryMap( const Mat& saliencyMap, Mat& binaryMap );
protected: protected:
virtual bool computeSaliencyImpl( const InputArray image, OutputArray saliencyMap )=0; virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap )=0;
}; };
...@@ -123,7 +123,7 @@ class CV_EXPORTS MotionSaliency : public virtual Saliency ...@@ -123,7 +123,7 @@ class CV_EXPORTS MotionSaliency : public virtual Saliency
{ {
protected: protected:
virtual bool computeSaliencyImpl( const InputArray image, OutputArray saliencyMap )=0; virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap )=0;
}; };
...@@ -132,7 +132,7 @@ class CV_EXPORTS Objectness : public virtual Saliency ...@@ -132,7 +132,7 @@ class CV_EXPORTS Objectness : public virtual Saliency
{ {
protected: protected:
virtual bool computeSaliencyImpl( const InputArray image, OutputArray saliencyMap )=0; virtual bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap )=0;
}; };
......
...@@ -94,7 +94,7 @@ public: ...@@ -94,7 +94,7 @@ public:
} }
protected: protected:
bool computeSaliencyImpl( const InputArray image, OutputArray saliencyMap ); bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap );
int resImWidth; int resImWidth;
int resImHeight; int resImHeight;
...@@ -154,7 +154,7 @@ protected: ...@@ -154,7 +154,7 @@ protected:
The saliency map is given by a single *Mat* (one for each frame of an hypothetical video The saliency map is given by a single *Mat* (one for each frame of an hypothetical video
stream). stream).
*/ */
bool computeSaliencyImpl( const InputArray image, OutputArray saliencyMap ); bool computeSaliencyImpl( InputArray image, OutputArray saliencyMap );
private: private:
...@@ -268,7 +268,7 @@ protected: ...@@ -268,7 +268,7 @@ protected:
specialized algorithm, the objectnessBoundingBox is a *vector\<Vec4i\>*. Each bounding box is specialized algorithm, the objectnessBoundingBox is a *vector\<Vec4i\>*. Each bounding box is
represented by a *Vec4i* for (minX, minY, maxX, maxY). represented by a *Vec4i* for (minX, minY, maxX, maxY).
*/ */
bool computeSaliencyImpl( const InputArray image, OutputArray objectnessBoundingBox ); bool computeSaliencyImpl( InputArray image, OutputArray objectnessBoundingBox );
private: private:
......
...@@ -460,7 +460,7 @@ void ObjectnessBING::write() const ...@@ -460,7 +460,7 @@ void ObjectnessBING::write() const
} }
bool ObjectnessBING::computeSaliencyImpl( const InputArray image, OutputArray objectnessBoundingBox ) bool ObjectnessBING::computeSaliencyImpl( InputArray image, OutputArray objectnessBoundingBox )
{ {
ValStructVec<float, Vec4i> finalBoxes; ValStructVec<float, Vec4i> finalBoxes;
getObjBndBoxesForSingleImage( image.getMat(), finalBoxes, 250 ); getObjBndBoxesForSingleImage( image.getMat(), finalBoxes, 250 );
......
...@@ -501,7 +501,7 @@ bool MotionSaliencyBinWangApr2014::templateReplacement( const Mat& finalBFMask, ...@@ -501,7 +501,7 @@ bool MotionSaliencyBinWangApr2014::templateReplacement( const Mat& finalBFMask,
return true; return true;
} }
bool MotionSaliencyBinWangApr2014::computeSaliencyImpl( const InputArray image, OutputArray saliencyMap ) bool MotionSaliencyBinWangApr2014::computeSaliencyImpl( InputArray image, OutputArray saliencyMap )
{ {
Mat highResBFMask; Mat highResBFMask;
Mat lowResBFMask; Mat lowResBFMask;
......
...@@ -62,7 +62,7 @@ Ptr<Saliency> Saliency::create( const String& saliencyType ) ...@@ -62,7 +62,7 @@ Ptr<Saliency> Saliency::create( const String& saliencyType )
return Ptr<Saliency>(); return Ptr<Saliency>();
} }
bool Saliency::computeSaliency( const InputArray image, OutputArray saliencyMap ) bool Saliency::computeSaliency( InputArray image, OutputArray saliencyMap )
{ {
if( image.empty() ) if( image.empty() )
return false; return false;
......
...@@ -73,7 +73,7 @@ void StaticSaliencySpectralResidual::write( cv::FileStorage& /*fs*/) const ...@@ -73,7 +73,7 @@ void StaticSaliencySpectralResidual::write( cv::FileStorage& /*fs*/) const
//params.write( fs ); //params.write( fs );
} }
bool StaticSaliencySpectralResidual::computeSaliencyImpl( const InputArray image, OutputArray saliencyMap ) bool StaticSaliencySpectralResidual::computeSaliencyImpl( InputArray image, OutputArray saliencyMap )
{ {
Mat grayTemp, grayDown; Mat grayTemp, grayDown;
std::vector<Mat> mv; std::vector<Mat> mv;
......
...@@ -76,7 +76,7 @@ They are competitive alternatives to existing keypoints in particular for embedd ...@@ -76,7 +76,7 @@ They are competitive alternatives to existing keypoints in particular for embedd
- An example on how to use the FREAK descriptor can be found at - An example on how to use the FREAK descriptor can be found at
opencv_source_code/samples/cpp/freak_demo.cpp opencv_source_code/samples/cpp/freak_demo.cpp
*/ */
class CV_EXPORTS FREAK : public Feature2D class CV_EXPORTS_W FREAK : public Feature2D
{ {
public: public:
...@@ -92,7 +92,7 @@ public: ...@@ -92,7 +92,7 @@ public:
@param nOctaves Number of octaves covered by the detected keypoints. @param nOctaves Number of octaves covered by the detected keypoints.
@param selectedPairs (Optional) user defined selected pairs indexes, @param selectedPairs (Optional) user defined selected pairs indexes,
*/ */
static Ptr<FREAK> create(bool orientationNormalized = true, CV_WRAP static Ptr<FREAK> create(bool orientationNormalized = true,
bool scaleNormalized = true, bool scaleNormalized = true,
float patternScale = 22.0f, float patternScale = 22.0f,
int nOctaves = 4, int nOctaves = 4,
...@@ -102,11 +102,11 @@ public: ...@@ -102,11 +102,11 @@ public:
/** @brief The class implements the keypoint detector introduced by @cite Agrawal08, synonym of StarDetector. : /** @brief The class implements the keypoint detector introduced by @cite Agrawal08, synonym of StarDetector. :
*/ */
class CV_EXPORTS StarDetector : public FeatureDetector class CV_EXPORTS_W StarDetector : public Feature2D
{ {
public: public:
//! the full constructor //! the full constructor
static Ptr<StarDetector> create(int maxSize=45, int responseThreshold=30, CV_WRAP static Ptr<StarDetector> create(int maxSize=45, int responseThreshold=30,
int lineThresholdProjected=10, int lineThresholdProjected=10,
int lineThresholdBinarized=8, int lineThresholdBinarized=8,
int suppressNonmaxSize=5); int suppressNonmaxSize=5);
...@@ -123,10 +123,10 @@ public: ...@@ -123,10 +123,10 @@ public:
opencv_source_code/samples/cpp/brief_match_test.cpp opencv_source_code/samples/cpp/brief_match_test.cpp
*/ */
class CV_EXPORTS BriefDescriptorExtractor : public DescriptorExtractor class CV_EXPORTS_W BriefDescriptorExtractor : public Feature2D
{ {
public: public:
static Ptr<BriefDescriptorExtractor> create( int bytes = 32 ); CV_WRAP static Ptr<BriefDescriptorExtractor> create( int bytes = 32 );
}; };
/** @brief Class implementing the locally uniform comparison image descriptor, described in @cite LUCID /** @brief Class implementing the locally uniform comparison image descriptor, described in @cite LUCID
...@@ -134,14 +134,42 @@ public: ...@@ -134,14 +134,42 @@ public:
An image descriptor that can be computed very fast, while being An image descriptor that can be computed very fast, while being
about as robust as, for example, SURF or BRIEF. about as robust as, for example, SURF or BRIEF.
*/ */
class CV_EXPORTS LUCID : public DescriptorExtractor class CV_EXPORTS_W LUCID : public Feature2D
{ {
public: public:
/** /**
* @param lucid_kernel kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth * @param lucid_kernel kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth
* @param blur_kernel kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth * @param blur_kernel kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth
*/ */
static Ptr<LUCID> create(const int lucid_kernel, const int blur_kernel); CV_WRAP static Ptr<LUCID> create(const int lucid_kernel, const int blur_kernel);
};
/*
* LATCH Descriptor
*/
/** latch Class for computing the LATCH descriptor.
If you find this code useful, please add a reference to the following paper in your work:
Gil Levi and Tal Hassner, "LATCH: Learned Arrangements of Three Patch Codes", arXiv preprint arXiv:1501.03719, 15 Jan. 2015
LATCH is a binary descriptor based on learned comparisons of triplets of image patches.
* bytes is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1
* rotationInvariance - whether or not the descriptor should compansate for orientation changes.
* half_ssd_size - the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x
then the half_ssd_size should be (7-1)/2 = 3.
Note: the descriptor can be coupled with any keypoint extractor. The only demand is that if you use set rotationInvariance = True then
you will have to use an extractor which estimates the patch orientation (in degrees). Examples for such extractors are ORB and SIFT.
Note: a complete example can be found under /samples/cpp/tutorial_code/xfeatures2D/latch_match.cpp
*/
class CV_EXPORTS LATCH : public DescriptorExtractor
{
public:
static Ptr<LATCH> create(int bytes = 32, bool rotationInvariance = true, int half_ssd_size=3);
}; };
/** @brief Class implementing DAISY descriptor, described in @cite Tola10 /** @brief Class implementing DAISY descriptor, described in @cite Tola10
......
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
typedef perf::TestBaseWithParam<std::string> latch;
#define LATCH_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
PERF_TEST_P(latch, extract, testing::Values(LATCH_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
Ptr<SURF> detector = SURF::create();
vector<KeyPoint> points;
detector->detect(frame, points, mask);
Ptr<LATCH> descriptor = LATCH::create();
vector<uchar> descriptors;
TEST_CYCLE() descriptor->compute(frame, points, descriptors);
SANITY_CHECK(descriptors, 1e-4);
}
This diff is collapsed.
...@@ -1039,6 +1039,13 @@ TEST( Features2d_DescriptorExtractor_LUCID, regression ) ...@@ -1039,6 +1039,13 @@ TEST( Features2d_DescriptorExtractor_LUCID, regression )
test.safe_run(); test.safe_run();
} }
TEST( Features2d_DescriptorExtractor_LATCH, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-latch", 1,
LATCH::create() );
test.safe_run();
}
/*#if CV_SSE2 /*#if CV_SSE2
...@@ -1247,3 +1254,31 @@ TEST(DISABLED_Features2d_SURF_using_mask, regression) ...@@ -1247,3 +1254,31 @@ TEST(DISABLED_Features2d_SURF_using_mask, regression)
FeatureDetectorUsingMaskTest test(SURF::create()); FeatureDetectorUsingMaskTest test(SURF::create());
test.safe_run(); test.safe_run();
} }
TEST( XFeatures2d_DescriptorExtractor, batch )
{
string path = string(cvtest::TS::ptr()->get_data_path() + "detectors_descriptors_evaluation/images_datasets/graf");
vector<Mat> imgs, descriptors;
vector<vector<KeyPoint> > keypoints;
int i, n = 6;
Ptr<SIFT> sift = SIFT::create();
for( i = 0; i < n; i++ )
{
string imgname = format("%s/img%d.png", path.c_str(), i+1);
Mat img = imread(imgname, 0);
imgs.push_back(img);
}
sift->detect(imgs, keypoints);
sift->compute(imgs, keypoints, descriptors);
ASSERT_EQ((int)keypoints.size(), n);
ASSERT_EQ((int)descriptors.size(), n);
for( i = 0; i < n; i++ )
{
EXPECT_GT((int)keypoints[i].size(), 100);
EXPECT_GT(descriptors[i].rows, 100);
}
}
...@@ -651,15 +651,16 @@ TEST(Features2d_RotationInvariance_Descriptor_SIFT, regression) ...@@ -651,15 +651,16 @@ TEST(Features2d_RotationInvariance_Descriptor_SIFT, regression)
test.safe_run(); test.safe_run();
} }
TEST(Features2d_RotationInvariance_Descriptor_DAISY, regression) TEST(Features2d_RotationInvariance_Descriptor_LATCH, regression)
{ {
DescriptorRotationInvarianceTest test(BRISK::create(), DescriptorRotationInvarianceTest test(SIFT::create(),
DAISY::create(15, 3, 8, 8, DAISY::NRM_NONE, noArray(), true, true), LATCH::create(),
NORM_L1, NORM_HAMMING,
0.79f); 0.9999f);
test.safe_run(); test.safe_run();
} }
/* /*
* Detector's scale invariance check * Detector's scale invariance check
*/ */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment