Commit 5505aa28 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #3078 from vpisarev:refactor_features2d_take3

parents 51195645 1629d509
......@@ -33,7 +33,7 @@ if(BUILD_DOCS AND HAVE_SPHINX)
endif()
endforeach()
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching nonfree contrib legacy)
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching)
list(REMOVE_ITEM BASE_MODULES ${FIXED_ORDER_MODULES})
......
......@@ -23,7 +23,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
......@@ -32,9 +32,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
......@@ -50,25 +51,19 @@ This tutorial code's is shown lines below. You can also download it from `here <
if( !img_1.data || !img_2.data )
{ return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create();
detector->setMinHessian(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
detector->detectAndCompute( img_1, keypoints_1, descriptors_1 );
detector->detectAndCompute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
//-- Step 2: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
......
......@@ -22,7 +22,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
......@@ -30,11 +30,11 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
......@@ -53,12 +53,12 @@ This tutorial code's is shown lines below. You can also download it from `here <
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
detector->detect( img_1, keypoints_1 );
detector->detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
......
......@@ -19,10 +19,116 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
/**
* @file SURF_FlannMatcher
* @brief SURF detector + descriptor + FLANN Matcher
* @author A. Huaman
*/
#include <stdio.h>
#include <iostream>
#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
/**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
.. literalinclude:: ../../../../samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp
:language: cpp
Explanation
============
......
......@@ -20,7 +20,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp>`_
This tutorial code's is shown lines below.
.. code-block:: cpp
......@@ -30,9 +30,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
......
......@@ -3050,13 +3050,13 @@ The class provides the following features for all derived classes:
Here is example of SIFT use in your application via Algorithm interface: ::
#include "opencv2/opencv.hpp"
#include "opencv2/nonfree.hpp"
#include "opencv2/xfeatures2d.hpp"
...
using namespace cv::xfeatures2d;
initModule_nonfree(); // to load SURF/SIFT etc.
...
Ptr<Feature2D> sift = Algorithm::create<Feature2D>("Feature2D.SIFT");
Ptr<Feature2D> sift = SIFT::create();
FileStorage fs("sift_params.xml", FileStorage::READ);
if( fs.isOpened() ) // if we have file with parameters, read them
......@@ -3066,7 +3066,7 @@ Here is example of SIFT use in your application via Algorithm interface: ::
}
else // else modify the parameters and store them; user can later edit the file to use different parameters
{
sift->set("contrastThreshold", 0.01f); // lower the contrast threshold, compared to the default value
sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value
{
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
......@@ -3076,7 +3076,7 @@ Here is example of SIFT use in your application via Algorithm interface: ::
Mat image = imread("myimage.png", 0), descriptors;
vector<KeyPoint> keypoints;
(*sift)(image, noArray(), keypoints, descriptors);
sift->detectAndCompute(image, noArray(), keypoints, descriptors);
Algorithm::name
---------------
......
......@@ -84,38 +84,5 @@ Creates a descriptor extractor by name.
The current implementation supports the following types of a descriptor extractor:
* ``"SIFT"`` -- :ocv:class:`SIFT`
* ``"SURF"`` -- :ocv:class:`SURF`
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"ORB"`` -- :ocv:class:`ORB`
* ``"FREAK"`` -- :ocv:class:`FREAK`
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
:ocv:class:`OpponentColorDescriptorExtractor` ) + descriptor extractor name (see above),
for example: ``"OpponentSIFT"`` .
OpponentColorDescriptorExtractor
--------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor : public DescriptorExtractor
Class adapting a descriptor extractor to compute descriptors in the Opponent Color Space
(refer to Van de Sande et al., CGIV 2008 *Color Descriptors for Object Category Recognition*).
Input RGB image is transformed in the Opponent Color Space. Then, an unadapted descriptor extractor
(set in the constructor) computes descriptors on each of three channels and concatenates
them into a single color descriptor. ::
class OpponentColorDescriptorExtractor : public DescriptorExtractor
{
public:
OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
virtual int defaultNorm() const;
protected:
...
};
* ``"ORB"`` -- :ocv:class:`ORB`
\ No newline at end of file
......@@ -36,38 +36,6 @@ Detects corners using the FAST algorithm by [Rosten06]_.
.. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006.
BriefDescriptorExtractor
------------------------
.. ocv:class:: BriefDescriptorExtractor : public DescriptorExtractor
Class for computing BRIEF descriptors described in a paper of Calonder M., Lepetit V.,
Strecha C., Fua P. *BRIEF: Binary Robust Independent Elementary Features* ,
11th European Conference on Computer Vision (ECCV), Heraklion, Crete. LNCS Springer, September 2010. ::
class BriefDescriptorExtractor : public DescriptorExtractor
{
public:
static const int PATCH_SIZE = 48;
static const int KERNEL_SIZE = 9;
// bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes.
BriefDescriptorExtractor( int bytes = 32 );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
virtual int defaultNorm() const;
protected:
...
};
.. note::
* A complete BRIEF extractor sample can be found at opencv_source_code/samples/cpp/brief_match_test.cpp
MSER
----
.. ocv:class:: MSER : public FeatureDetector
......@@ -213,43 +181,6 @@ Finds keypoints in an image and computes their descriptors
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
FREAK
-----
.. ocv:class:: FREAK : public DescriptorExtractor
Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in [AOV12]_. The algorithm propose a novel keypoint descriptor inspired by the human visual system and more precisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is computed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in general faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK. They are competitive alternatives to existing keypoints in particular for embedded applications.
.. [AOV12] A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012. CVPR 2012 Open Source Award Winner.
.. note::
* An example on how to use the FREAK descriptor can be found at opencv_source_code/samples/cpp/freak_demo.cpp
FREAK::FREAK
------------
The FREAK constructor
.. ocv:function:: FREAK::FREAK( bool orientationNormalized=true, bool scaleNormalized=true, float patternScale=22.0f, int nOctaves=4, const vector<int>& selectedPairs=vector<int>() )
:param orientationNormalized: Enable orientation normalization.
:param scaleNormalized: Enable scale normalization.
:param patternScale: Scaling of the description pattern.
:param nOctaves: Number of octaves covered by the detected keypoints.
:param selectedPairs: (Optional) user defined selected pairs indexes,
FREAK::selectPairs
------------------
Select the 512 best description pair indexes from an input (grayscale) image set. FREAK is available with a set of pairs learned off-line. Researchers can run a training process to learn their own set of pair. For more details read section 4.2 in: A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012.
We notice that for keypoint matching applications, image content has little effect on the selected pairs unless very specific what does matter is the detector type (blobs, corners,...) and the options used (scale/rotation invariance,...). Reduce corrThresh if not enough pairs are selected (43 points --> 903 possible pairs)
.. ocv:function:: vector<int> FREAK::selectPairs(const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, const double corrThresh = 0.7, bool verbose = true)
:param images: Grayscale image input set.
:param keypoints: Set of detected keypoints
:param corrThresh: Correlation threshold.
:param verbose: Prints pair selection informations.
KAZE
----
.. ocv:class:: KAZE : public Feature2D
......@@ -312,3 +243,10 @@ The AKAZE constructor
:param octaves: Maximum octave evolution of the image
:param sublevels: Default number of sublevels per scale level
:param diffusivity: Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or DIFF_CHARBONNIER
SIFT
----
.. ocv:class:: SIFT : public Feature2D
The SIFT algorithm has been moved to opencv_contrib/xfeatures2d module.
\ No newline at end of file
......@@ -11,6 +11,5 @@ features2d. 2D Features Framework
common_interfaces_of_feature_detectors
common_interfaces_of_descriptor_extractors
common_interfaces_of_descriptor_matchers
common_interfaces_of_generic_descriptor_matchers
drawing_function_of_keypoints_and_matches
object_categorization
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <algorithm>
#include <vector>
#include <iostream>
#include <iomanip>
using namespace cv;
inline int smoothedSum(const Mat& sum, const KeyPoint& pt, int y, int x)
{
static const int HALF_KERNEL = BriefDescriptorExtractor::KERNEL_SIZE / 2;
int img_y = (int)(pt.pt.y + 0.5) + y;
int img_x = (int)(pt.pt.x + 0.5) + x;
return sum.at<int>(img_y + HALF_KERNEL + 1, img_x + HALF_KERNEL + 1)
- sum.at<int>(img_y + HALF_KERNEL + 1, img_x - HALF_KERNEL)
- sum.at<int>(img_y - HALF_KERNEL, img_x + HALF_KERNEL + 1)
+ sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
}
static void pixelTests16(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
const KeyPoint& pt = keypoints[i];
#include "generated_16.i"
}
}
static void pixelTests32(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
const KeyPoint& pt = keypoints[i];
#include "generated_32.i"
}
}
static void pixelTests64(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
const KeyPoint& pt = keypoints[i];
#include "generated_64.i"
}
}
namespace cv
{
BriefDescriptorExtractor::BriefDescriptorExtractor(int bytes) :
bytes_(bytes), test_fn_(NULL)
{
switch (bytes)
{
case 16:
test_fn_ = pixelTests16;
break;
case 32:
test_fn_ = pixelTests32;
break;
case 64:
test_fn_ = pixelTests64;
break;
default:
CV_Error(Error::StsBadArg, "bytes must be 16, 32, or 64");
}
}
int BriefDescriptorExtractor::descriptorSize() const
{
return bytes_;
}
int BriefDescriptorExtractor::descriptorType() const
{
return CV_8UC1;
}
int BriefDescriptorExtractor::defaultNorm() const
{
return NORM_HAMMING;
}
void BriefDescriptorExtractor::read( const FileNode& fn)
{
int dSize = fn["descriptorSize"];
switch (dSize)
{
case 16:
test_fn_ = pixelTests16;
break;
case 32:
test_fn_ = pixelTests32;
break;
case 64:
test_fn_ = pixelTests64;
break;
default:
CV_Error(Error::StsBadArg, "descriptorSize must be 16, 32, or 64");
}
bytes_ = dSize;
}
void BriefDescriptorExtractor::write( FileStorage& fs) const
{
fs << "descriptorSize" << bytes_;
}
void BriefDescriptorExtractor::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
// Construct integral image for fast smoothing (box filter)
Mat sum;
Mat grayImage = image.getMat();
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
///TODO allow the user to pass in a precomputed integral image
//if(image.type() == CV_32S)
// sum = image;
//else
integral( grayImage, sum, CV_32S);
//Remove keypoints very close to the border
KeyPointsFilter::runByImageBorder(keypoints, image.size(), PATCH_SIZE/2 + KERNEL_SIZE/2);
descriptors.create((int)keypoints.size(), bytes_, CV_8U);
descriptors.setTo(Scalar::all(0));
test_fn_(sum, keypoints, descriptors);
}
} // namespace cv
......@@ -98,13 +98,6 @@ void DescriptorExtractor::removeBorderKeypoints( std::vector<KeyPoint>& keypoint
Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExtractorType)
{
if( descriptorExtractorType.find("Opponent") == 0 )
{
size_t pos = String("Opponent").size();
String type = descriptorExtractorType.substr(pos);
return makePtr<OpponentColorDescriptorExtractor>(DescriptorExtractor::create(type));
}
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
}
......@@ -114,151 +107,4 @@ CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<
DescriptorExtractor::compute(image, keypoints, descriptors);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/****************************************************************************************\
* OpponentColorDescriptorExtractor *
\****************************************************************************************/
OpponentColorDescriptorExtractor::OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& _descriptorExtractor ) :
descriptorExtractor(_descriptorExtractor)
{
CV_Assert( descriptorExtractor );
}
static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, std::vector<Mat>& opponentChannels )
{
if( bgrImage.type() != CV_8UC3 )
CV_Error( Error::StsBadArg, "input image must be an BGR image of type CV_8UC3" );
// Prepare opponent color space storage matrices.
opponentChannels.resize( 3 );
opponentChannels[0] = cv::Mat(bgrImage.size(), CV_8UC1); // R-G RED-GREEN
opponentChannels[1] = cv::Mat(bgrImage.size(), CV_8UC1); // R+G-2B YELLOW-BLUE
opponentChannels[2] = cv::Mat(bgrImage.size(), CV_8UC1); // R+G+B
for(int y = 0; y < bgrImage.rows; ++y)
for(int x = 0; x < bgrImage.cols; ++x)
{
Vec3b v = bgrImage.at<Vec3b>(y, x);
uchar& b = v[0];
uchar& g = v[1];
uchar& r = v[2];
opponentChannels[0].at<uchar>(y, x) = saturate_cast<uchar>(0.5f * (255 + g - r)); // (R - G)/sqrt(2), but converted to the destination data type
opponentChannels[1].at<uchar>(y, x) = saturate_cast<uchar>(0.25f * (510 + r + g - 2*b)); // (R + G - 2B)/sqrt(6), but converted to the destination data type
opponentChannels[2].at<uchar>(y, x) = saturate_cast<uchar>(1.f/3.f * (r + g + b)); // (R + G + B)/sqrt(3), but converted to the destination data type
}
}
struct KP_LessThan
{
KP_LessThan(const std::vector<KeyPoint>& _kp) : kp(&_kp) {}
bool operator()(int i, int j) const
{
return (*kp)[i].class_id < (*kp)[j].class_id;
}
const std::vector<KeyPoint>* kp;
};
void OpponentColorDescriptorExtractor::computeImpl( InputArray _bgrImage, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
Mat bgrImage = _bgrImage.getMat();
std::vector<Mat> opponentChannels;
convertBGRImageToOpponentColorSpace( bgrImage, opponentChannels );
const int N = 3; // channels count
std::vector<KeyPoint> channelKeypoints[N];
Mat channelDescriptors[N];
std::vector<int> idxs[N];
// Compute descriptors three times, once for each Opponent channel to concatenate into a single color descriptor
int maxKeypointsCount = 0;
for( int ci = 0; ci < N; ci++ )
{
channelKeypoints[ci].insert( channelKeypoints[ci].begin(), keypoints.begin(), keypoints.end() );
// Use class_id member to get indices into initial keypoints vector
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
channelKeypoints[ci][ki].class_id = (int)ki;
descriptorExtractor->compute( opponentChannels[ci], channelKeypoints[ci], channelDescriptors[ci] );
idxs[ci].resize( channelKeypoints[ci].size() );
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
{
idxs[ci][ki] = (int)ki;
}
std::sort( idxs[ci].begin(), idxs[ci].end(), KP_LessThan(channelKeypoints[ci]) );
maxKeypointsCount = std::max( maxKeypointsCount, (int)channelKeypoints[ci].size());
}
std::vector<KeyPoint> outKeypoints;
outKeypoints.reserve( keypoints.size() );
int dSize = descriptorExtractor->descriptorSize();
Mat mergedDescriptors( maxKeypointsCount, 3*dSize, descriptorExtractor->descriptorType() );
int mergedCount = 0;
// cp - current channel position
size_t cp[] = {0, 0, 0};
while( cp[0] < channelKeypoints[0].size() &&
cp[1] < channelKeypoints[1].size() &&
cp[2] < channelKeypoints[2].size() )
{
const int maxInitIdx = std::max( 0, std::max( channelKeypoints[0][idxs[0][cp[0]]].class_id,
std::max( channelKeypoints[1][idxs[1][cp[1]]].class_id,
channelKeypoints[2][idxs[2][cp[2]]].class_id ) ) );
while( channelKeypoints[0][idxs[0][cp[0]]].class_id < maxInitIdx && cp[0] < channelKeypoints[0].size() ) { cp[0]++; }
while( channelKeypoints[1][idxs[1][cp[1]]].class_id < maxInitIdx && cp[1] < channelKeypoints[1].size() ) { cp[1]++; }
while( channelKeypoints[2][idxs[2][cp[2]]].class_id < maxInitIdx && cp[2] < channelKeypoints[2].size() ) { cp[2]++; }
if( cp[0] >= channelKeypoints[0].size() || cp[1] >= channelKeypoints[1].size() || cp[2] >= channelKeypoints[2].size() )
break;
if( channelKeypoints[0][idxs[0][cp[0]]].class_id == maxInitIdx &&
channelKeypoints[1][idxs[1][cp[1]]].class_id == maxInitIdx &&
channelKeypoints[2][idxs[2][cp[2]]].class_id == maxInitIdx )
{
outKeypoints.push_back( keypoints[maxInitIdx] );
// merge descriptors
for( int ci = 0; ci < N; ci++ )
{
Mat dst = mergedDescriptors(Range(mergedCount, mergedCount+1), Range(ci*dSize, (ci+1)*dSize));
channelDescriptors[ci].row( idxs[ci][cp[ci]] ).copyTo( dst );
cp[ci]++;
}
mergedCount++;
}
}
mergedDescriptors.rowRange(0, mergedCount).copyTo( descriptors );
std::swap( outKeypoints, keypoints );
}
void OpponentColorDescriptorExtractor::read( const FileNode& fn )
{
descriptorExtractor->read(fn);
}
void OpponentColorDescriptorExtractor::write( FileStorage& fs ) const
{
descriptorExtractor->write(fs);
}
int OpponentColorDescriptorExtractor::descriptorSize() const
{
return 3*descriptorExtractor->descriptorSize();
}
int OpponentColorDescriptorExtractor::descriptorType() const
{
return descriptorExtractor->descriptorType();
}
int OpponentColorDescriptorExtractor::defaultNorm() const
{
return descriptorExtractor->defaultNorm();
}
bool OpponentColorDescriptorExtractor::empty() const
{
return !descriptorExtractor || descriptorExtractor->empty();
}
}
......@@ -106,24 +106,6 @@ void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector<KeyPoint
Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
{
if( detectorType.find("Grid") == 0 )
{
return makePtr<GridAdaptedFeatureDetector>(FeatureDetector::create(
detectorType.substr(strlen("Grid"))));
}
if( detectorType.find("Pyramid") == 0 )
{
return makePtr<PyramidAdaptedFeatureDetector>(FeatureDetector::create(
detectorType.substr(strlen("Pyramid"))));
}
if( detectorType.find("Dynamic") == 0 )
{
return makePtr<DynamicAdaptedFeatureDetector>(AdjusterAdapter::create(
detectorType.substr(strlen("Dynamic"))));
}
if( detectorType.compare( "HARRIS" ) == 0 )
{
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
......@@ -176,212 +158,4 @@ void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoin
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
* DenseFeatureDetector
*/
DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
float _featureScaleMul, int _initXyStep,
int _initImgBound, bool _varyXyStepWithScale,
bool _varyImgBoundWithScale ) :
initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
{}
void DenseFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
float curScale = static_cast<float>(initFeatureScale);
int curStep = initXyStep;
int curBound = initImgBound;
for( int curLevel = 0; curLevel < featureScaleLevels; curLevel++ )
{
for( int x = curBound; x < image.cols - curBound; x += curStep )
{
for( int y = curBound; y < image.rows - curBound; y += curStep )
{
keypoints.push_back( KeyPoint(static_cast<float>(x), static_cast<float>(y), curScale) );
}
}
curScale = static_cast<float>(curScale * featureScaleMul);
if( varyXyStepWithScale ) curStep = static_cast<int>( curStep * featureScaleMul + 0.5f );
if( varyImgBoundWithScale ) curBound = static_cast<int>( curBound * featureScaleMul + 0.5f );
}
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
/*
* GridAdaptedFeatureDetector
*/
GridAdaptedFeatureDetector::GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& _detector,
int _maxTotalKeypoints, int _gridRows, int _gridCols )
: detector(_detector), maxTotalKeypoints(_maxTotalKeypoints), gridRows(_gridRows), gridCols(_gridCols)
{}
bool GridAdaptedFeatureDetector::empty() const
{
return !detector || detector->empty();
}
struct ResponseComparator
{
bool operator() (const KeyPoint& a, const KeyPoint& b)
{
return std::abs(a.response) > std::abs(b.response);
}
};
static void keepStrongest( int N, std::vector<KeyPoint>& keypoints )
{
if( (int)keypoints.size() > N )
{
std::vector<KeyPoint>::iterator nth = keypoints.begin() + N;
std::nth_element( keypoints.begin(), nth, keypoints.end(), ResponseComparator() );
keypoints.erase( nth, keypoints.end() );
}
}
namespace {
class GridAdaptedFeatureDetectorInvoker : public ParallelLoopBody
{
private:
int gridRows_, gridCols_;
int maxPerCell_;
std::vector<KeyPoint>& keypoints_;
const Mat& image_;
const Mat& mask_;
const Ptr<FeatureDetector>& detector_;
Mutex* kptLock_;
GridAdaptedFeatureDetectorInvoker& operator=(const GridAdaptedFeatureDetectorInvoker&); // to quiet MSVC
public:
GridAdaptedFeatureDetectorInvoker(const Ptr<FeatureDetector>& detector, const Mat& image, const Mat& mask,
std::vector<KeyPoint>& keypoints, int maxPerCell, int gridRows, int gridCols,
cv::Mutex* kptLock)
: gridRows_(gridRows), gridCols_(gridCols), maxPerCell_(maxPerCell),
keypoints_(keypoints), image_(image), mask_(mask), detector_(detector),
kptLock_(kptLock)
{
}
void operator() (const Range& range) const
{
for (int i = range.start; i < range.end; ++i)
{
int celly = i / gridCols_;
int cellx = i - celly * gridCols_;
Range row_range((celly*image_.rows)/gridRows_, ((celly+1)*image_.rows)/gridRows_);
Range col_range((cellx*image_.cols)/gridCols_, ((cellx+1)*image_.cols)/gridCols_);
Mat sub_image = image_(row_range, col_range);
Mat sub_mask;
if (!mask_.empty()) sub_mask = mask_(row_range, col_range);
std::vector<KeyPoint> sub_keypoints;
sub_keypoints.reserve(maxPerCell_);
detector_->detect( sub_image, sub_keypoints, sub_mask );
keepStrongest( maxPerCell_, sub_keypoints );
std::vector<cv::KeyPoint>::iterator it = sub_keypoints.begin(),
end = sub_keypoints.end();
for( ; it != end; ++it )
{
it->pt.x += col_range.start;
it->pt.y += row_range.start;
}
cv::AutoLock join_keypoints(*kptLock_);
keypoints_.insert( keypoints_.end(), sub_keypoints.begin(), sub_keypoints.end() );
}
}
};
} // namepace
void GridAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
if (_image.empty() || maxTotalKeypoints < gridRows * gridCols)
{
keypoints.clear();
return;
}
keypoints.reserve(maxTotalKeypoints);
int maxPerCell = maxTotalKeypoints / (gridRows * gridCols);
Mat image = _image.getMat(), mask = _mask.getMat();
cv::Mutex kptLock;
cv::parallel_for_(cv::Range(0, gridRows * gridCols),
GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols, &kptLock));
}
/*
* PyramidAdaptedFeatureDetector
*/
PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& _detector, int _maxLevel )
: detector(_detector), maxLevel(_maxLevel)
{}
bool PyramidAdaptedFeatureDetector::empty() const
{
return !detector || detector->empty();
}
void PyramidAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
Mat src = image;
Mat src_mask = mask;
Mat dilated_mask;
if( !mask.empty() )
{
dilate( mask, dilated_mask, Mat() );
Mat mask255( mask.size(), CV_8UC1, Scalar(0) );
mask255.setTo( Scalar(255), dilated_mask != 0 );
dilated_mask = mask255;
}
for( int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2 )
{
// Detect on current level of the pyramid
std::vector<KeyPoint> new_pts;
detector->detect( src, new_pts, src_mask );
std::vector<KeyPoint>::iterator it = new_pts.begin(),
end = new_pts.end();
for( ; it != end; ++it)
{
it->pt.x *= multiplier;
it->pt.y *= multiplier;
it->size *= multiplier;
it->octave = l;
}
keypoints.insert( keypoints.end(), new_pts.begin(), new_pts.end() );
// Downsample
if( l < maxLevel )
{
Mat dst;
pyrDown( src, dst );
src = dst;
if( !mask.empty() )
resize( dilated_mask, src_mask, src.size(), 0, 0, INTER_AREA );
}
}
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
}
......@@ -44,181 +44,4 @@
namespace cv
{
DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector(const Ptr<AdjusterAdapter>& a,
int min_features, int max_features, int max_iters ) :
escape_iters_(max_iters), min_features_(min_features), max_features_(max_features), adjuster_(a)
{}
bool DynamicAdaptedFeatureDetector::empty() const
{
return !adjuster_ || adjuster_->empty();
}
void DynamicAdaptedFeatureDetector::detectImpl(InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
//for oscillation testing
bool down = false;
bool up = false;
//flag for whether the correct threshhold has been reached
bool thresh_good = false;
Ptr<AdjusterAdapter> adjuster = adjuster_->clone();
//break if the desired number hasn't been reached.
int iter_count = escape_iters_;
while( iter_count > 0 && !(down && up) && !thresh_good && adjuster->good() )
{
keypoints.clear();
//the adjuster takes care of calling the detector with updated parameters
adjuster->detect(image, keypoints,mask);
if( int(keypoints.size()) < min_features_ )
{
down = true;
adjuster->tooFew(min_features_, (int)keypoints.size());
}
else if( int(keypoints.size()) > max_features_ )
{
up = true;
adjuster->tooMany(max_features_, (int)keypoints.size());
}
else
thresh_good = true;
iter_count--;
}
}
FastAdjuster::FastAdjuster( int init_thresh, bool nonmax, int min_thresh, int max_thresh ) :
thresh_(init_thresh), nonmax_(nonmax), init_thresh_(init_thresh),
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void FastAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
FastFeatureDetector(thresh_, nonmax_).detect(image, keypoints, mask);
}
void FastAdjuster::tooFew(int, int)
{
//fast is easy to adjust
thresh_--;
}
void FastAdjuster::tooMany(int, int)
{
//fast is easy to adjust
thresh_++;
}
//return whether or not the threshhold is beyond
//a useful point
bool FastAdjuster::good() const
{
return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
}
Ptr<AdjusterAdapter> FastAdjuster::clone() const
{
Ptr<AdjusterAdapter> cloned_obj(new FastAdjuster( init_thresh_, nonmax_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
StarAdjuster::StarAdjuster(double initial_thresh, double min_thresh, double max_thresh) :
thresh_(initial_thresh), init_thresh_(initial_thresh),
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void StarAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
StarFeatureDetector detector_tmp(16, cvRound(thresh_), 10, 8, 3);
detector_tmp.detect(image, keypoints, mask);
}
void StarAdjuster::tooFew(int, int)
{
thresh_ *= 0.9;
if (thresh_ < 1.1)
thresh_ = 1.1;
}
void StarAdjuster::tooMany(int, int)
{
thresh_ *= 1.1;
}
bool StarAdjuster::good() const
{
return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
}
Ptr<AdjusterAdapter> StarAdjuster::clone() const
{
Ptr<AdjusterAdapter> cloned_obj(new StarAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
SurfAdjuster::SurfAdjuster( double initial_thresh, double min_thresh, double max_thresh ) :
thresh_(initial_thresh), init_thresh_(initial_thresh),
min_thresh_(min_thresh), max_thresh_(max_thresh)
{}
void SurfAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
Ptr<FeatureDetector> surf = FeatureDetector::create("SURF");
surf->set("hessianThreshold", thresh_);
surf->detect(image, keypoints, mask);
}
void SurfAdjuster::tooFew(int, int)
{
thresh_ *= 0.9;
if (thresh_ < 1.1)
thresh_ = 1.1;
}
void SurfAdjuster::tooMany(int, int)
{
thresh_ *= 1.1;
}
//return whether or not the threshhold is beyond
//a useful point
bool SurfAdjuster::good() const
{
return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
}
Ptr<AdjusterAdapter> SurfAdjuster::clone() const
{
Ptr<AdjusterAdapter> cloned_obj(new SurfAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
Ptr<AdjusterAdapter> AdjusterAdapter::create( const String& detectorType )
{
Ptr<AdjusterAdapter> adapter;
if( !detectorType.compare( "FAST" ) )
{
adapter = makePtr<FastAdjuster>();
}
else if( !detectorType.compare( "STAR" ) )
{
adapter = makePtr<StarAdjuster>();
}
else if( !detectorType.compare( "SURF" ) )
{
adapter = makePtr<SurfAdjuster>();
}
return adapter;
}
}
......@@ -556,56 +556,3 @@ int cv::getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float
return nearestPointIndex;
}
void cv::evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2,
std::vector<KeyPoint>& keypoints1, std::vector<KeyPoint>& keypoints2,
std::vector<std::vector<DMatch> >* _matches1to2, std::vector<std::vector<uchar> >* _correctMatches1to2Mask,
std::vector<Point2f>& recallPrecisionCurve,
const Ptr<GenericDescriptorMatcher>& _dmatcher )
{
Ptr<GenericDescriptorMatcher> dmatcher = _dmatcher;
dmatcher->clear();
std::vector<std::vector<DMatch> > *matches1to2, buf1;
matches1to2 = _matches1to2 != 0 ? _matches1to2 : &buf1;
std::vector<std::vector<uchar> > *correctMatches1to2Mask, buf2;
correctMatches1to2Mask = _correctMatches1to2Mask != 0 ? _correctMatches1to2Mask : &buf2;
if( keypoints1.empty() )
CV_Error( Error::StsBadArg, "keypoints1 must not be empty" );
if( matches1to2->empty() && !dmatcher )
CV_Error( Error::StsBadArg, "dmatch must not be empty when matches1to2 is empty" );
bool computeKeypoints2ByPrj = keypoints2.empty();
if( computeKeypoints2ByPrj )
{
CV_Error(Error::StsNotImplemented, "");
// TODO: add computing keypoints2 from keypoints1 using H1to2
}
if( matches1to2->empty() || computeKeypoints2ByPrj )
{
dmatcher->clear();
dmatcher->radiusMatch( img1, keypoints1, img2, keypoints2, *matches1to2, std::numeric_limits<float>::max() );
}
float repeatability;
int correspCount;
Mat thresholdedOverlapMask; // thresholded allOverlapErrors
calculateRepeatability( img1, img2, H1to2, keypoints1, keypoints2, repeatability, correspCount, &thresholdedOverlapMask );
correctMatches1to2Mask->resize(matches1to2->size());
for( size_t i = 0; i < matches1to2->size(); i++ )
{
(*correctMatches1to2Mask)[i].resize((*matches1to2)[i].size());
for( size_t j = 0;j < (*matches1to2)[i].size(); j++ )
{
int indexQuery = (*matches1to2)[i][j].queryIdx;
int indexTrain = (*matches1to2)[i][j].trainIdx;
(*correctMatches1to2Mask)[i][j] = thresholdedOverlapMask.at<uchar>( indexQuery, indexTrain );
}
}
computeRecallPrecisionCurve( *matches1to2, *correctMatches1to2Mask, recallPrecisionCurve );
}
......@@ -62,24 +62,11 @@ CV_INIT_ALGORITHM(BRISK, "Feature2D.BRISK",
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BriefDescriptorExtractor, "Feature2D.BRIEF",
obj.info()->addParam(obj, "bytes", obj.bytes_))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
obj.info()->addParam(obj, "threshold", obj.threshold);
obj.info()->addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
obj.info()->addParam(obj, "type", obj.type))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(StarDetector, "Feature2D.STAR",
obj.info()->addParam(obj, "maxSize", obj.maxSize);
obj.info()->addParam(obj, "responseThreshold", obj.responseThreshold);
obj.info()->addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
obj.info()->addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
......@@ -108,14 +95,6 @@ CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(FREAK, "Feature2D.FREAK",
obj.info()->addParam(obj, "orientationNormalized", obj.orientationNormalized);
obj.info()->addParam(obj, "scaleNormalized", obj.scaleNormalized);
obj.info()->addParam(obj, "patternScale", obj.patternScale);
obj.info()->addParam(obj, "nbOctave", obj.nOctaves))
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
......@@ -181,23 +160,6 @@ CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
////////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
obj.info()->addParam(obj, "initFeatureScale", obj.initFeatureScale);
obj.info()->addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
obj.info()->addParam(obj, "featureScaleMul", obj.featureScaleMul);
obj.info()->addParam(obj, "initXyStep", obj.initXyStep);
obj.info()->addParam(obj, "initImgBound", obj.initImgBound);
obj.info()->addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale))
CV_INIT_ALGORITHM(GridAdaptedFeatureDetector, "Feature2D.Grid",
obj.info()->addParam<FeatureDetector>(obj, "detector", obj.detector, false, 0, 0); // Extra params added to avoid VS2013 fatal error in opencv2/core.hpp (decl. of addParam)
obj.info()->addParam(obj, "maxTotalKeypoints", obj.maxTotalKeypoints);
obj.info()->addParam(obj, "gridRows", obj.gridRows);
obj.info()->addParam(obj, "gridCols", obj.gridCols))
////////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BFMatcher, "DescriptorMatcher.BFMatcher",
obj.info()->addParam(obj, "normType", obj.normType);
obj.info()->addParam(obj, "crossCheck", obj.crossCheck))
......@@ -209,19 +171,14 @@ CV_INIT_ALGORITHM(FlannBasedMatcher, "DescriptorMatcher.FlannBasedMatcher",)
bool cv::initModule_features2d(void)
{
bool all = true;
all &= !BriefDescriptorExtractor_info_auto.name().empty();
all &= !BRISK_info_auto.name().empty();
all &= !FastFeatureDetector_info_auto.name().empty();
all &= !StarDetector_info_auto.name().empty();
all &= !MSER_info_auto.name().empty();
all &= !FREAK_info_auto.name().empty();
all &= !ORB_info_auto.name().empty();
all &= !GFTTDetector_info_auto.name().empty();
all &= !KAZE_info_auto.name().empty();
all &= !AKAZE_info_auto.name().empty();
all &= !HarrisDetector_info_auto.name().empty();
all &= !DenseFeatureDetector_info_auto.name().empty();
all &= !GridAdaptedFeatureDetector_info_auto.name().empty();
all &= !HarrisDetector_info_auto.name().empty();
all &= !BFMatcher_info_auto.name().empty();
all &= !FlannBasedMatcher_info_auto.name().empty();
......
This diff is collapsed.
// Code generated with '$ scripts/generate_code.py src/test_pairs.txt 16'
#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
#undef SMOOTHED
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -327,28 +327,6 @@ TEST( Features2d_DescriptorExtractor_ORB, regression )
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_FREAK, regression )
{
// TODO adjust the parameters below
CV_DescriptorExtractorTest<Hamming> test( "descriptor-freak", (CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
DescriptorExtractor::create("FREAK") );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_BRIEF, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brief", 1,
DescriptorExtractor::create("BRIEF") );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_OpponentBRIEF, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-opponent-brief", 1,
DescriptorExtractor::create("OpponentBRIEF") );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_KAZE, regression )
{
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f,
......
......@@ -277,12 +277,6 @@ TEST( Features2d_Detector_MSER, DISABLED_regression )
test.safe_run();
}
TEST( Features2d_Detector_STAR, regression )
{
CV_FeatureDetectorTest test( "detector-star", FeatureDetector::create("STAR") );
test.safe_run();
}
TEST( Features2d_Detector_ORB, regression )
{
CV_FeatureDetectorTest test( "detector-orb", FeatureDetector::create("ORB") );
......@@ -300,15 +294,3 @@ TEST( Features2d_Detector_AKAZE, regression )
CV_FeatureDetectorTest test( "detector-akaze", FeatureDetector::create("AKAZE") );
test.safe_run();
}
TEST( Features2d_Detector_GridFAST, regression )
{
CV_FeatureDetectorTest test( "detector-grid-fast", FeatureDetector::create("GridFAST") );
test.safe_run();
}
TEST( Features2d_Detector_PyramidFAST, regression )
{
CV_FeatureDetectorTest test( "detector-pyramid-fast", FeatureDetector::create("PyramidFAST") );
test.safe_run();
}
......@@ -155,18 +155,6 @@ TEST(Features2d_Detector_Keypoints_ORB, validation)
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_Star, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.STAR"));
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_Dense, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.Dense"));
test.safe_run();
}
TEST(Features2d_Detector_Keypoints_KAZE, validation)
{
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.KAZE"));
......
......@@ -2,7 +2,7 @@
from __future__ import print_function
import os, sys, re, string, fnmatch
allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "videostab", "softcascade", "superres"]
verbose = False
show_warnings = True
show_errors = True
......@@ -12,7 +12,6 @@ params_blacklist = {
"fromarray" : ("object", "allowND"), # python only function
"reprojectImageTo3D" : ("ddepth"), # python only argument
"composeRT" : ("d*d*"), # wildchards in parameter names are not supported by this parser
"CvSVM::train_auto" : ("\\*Grid"), # wildchards in parameter names are not supported by this parser
"error" : "args", # parameter of supporting macro
"getConvertElem" : ("from", "cn", "to", "beta", "alpha"), # arguments of returned functions
"gpu::swapChannels" : ("dstOrder") # parameter is not parsed correctly by the hdr_parser
......
......@@ -390,124 +390,6 @@ private:
Ptr<DescriptorExtractor> wrapped;
};
class CV_EXPORTS_AS(GenericDescriptorMatcher) javaGenericDescriptorMatcher
{
public:
CV_WRAP void add( const std::vector<Mat>& images,
std::vector<std::vector<KeyPoint> >& keypoints )
{ return wrapped->add(images, keypoints); }
CV_WRAP const std::vector<Mat>& getTrainImages() const
{ return wrapped->getTrainImages(); }
CV_WRAP const std::vector<std::vector<KeyPoint> >& getTrainKeypoints() const
{ return wrapped->getTrainKeypoints(); }
CV_WRAP void clear()
{ return wrapped->clear(); }
CV_WRAP bool isMaskSupported()
{ return wrapped->isMaskSupported(); }
CV_WRAP void train()
{ return wrapped->train(); }
CV_WRAP void classify( const Mat& queryImage, CV_IN_OUT std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints ) const
{ return wrapped->classify(queryImage, queryKeypoints, trainImage, trainKeypoints); }
CV_WRAP void classify( const Mat& queryImage, CV_IN_OUT std::vector<KeyPoint>& queryKeypoints )
{ return wrapped->classify(queryImage, queryKeypoints); }
CV_WRAP void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<DMatch>& matches, const Mat& mask=Mat() ) const
{ return wrapped->match(queryImage, queryKeypoints, trainImage, trainKeypoints, matches, mask); }
CV_WRAP void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
const Mat& mask=Mat(), bool compactResult=false ) const
{ return wrapped->knnMatch(queryImage, queryKeypoints, trainImage, trainKeypoints,
matches, k, mask, compactResult); }
CV_WRAP void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const
{ return wrapped->radiusMatch(queryImage, queryKeypoints, trainImage, trainKeypoints,
matches, maxDistance, mask, compactResult); }
CV_WRAP void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<DMatch>& matches, const std::vector<Mat>& masks=std::vector<Mat>() )
{ return wrapped->match(queryImage, queryKeypoints, matches, masks); }
CV_WRAP void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
{ return wrapped->knnMatch(queryImage, queryKeypoints, matches, k, masks, compactResult); }
CV_WRAP void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
{ return wrapped->radiusMatch(queryImage, queryKeypoints, matches, maxDistance, masks, compactResult); }
CV_WRAP bool empty() const
{ return wrapped->empty(); }
enum
{
ONEWAY = 1,
FERN = 2
};
CV_WRAP_AS(clone) javaGenericDescriptorMatcher* jclone( bool emptyTrainData=false ) const
{
return new javaGenericDescriptorMatcher(wrapped->clone(emptyTrainData));
}
//supported: OneWay, Fern
//unsupported: Vector
CV_WRAP static javaGenericDescriptorMatcher* create( int matcherType )
{
String name;
switch(matcherType)
{
case ONEWAY:
name = "ONEWAY";
break;
case FERN:
name = "FERN";
break;
default:
CV_Error( Error::StsBadArg, "Specified generic descriptor matcher type is not supported." );
break;
}
return new javaGenericDescriptorMatcher(GenericDescriptorMatcher::create(name));
}
CV_WRAP void write( const String& fileName ) const
{
FileStorage fs(fileName, FileStorage::WRITE);
wrapped->write(fs);
}
CV_WRAP void read( const String& fileName )
{
FileStorage fs(fileName, FileStorage::READ);
wrapped->read(fs.root());
}
private:
javaGenericDescriptorMatcher(Ptr<GenericDescriptorMatcher> _wrapped) : wrapped(_wrapped)
{}
Ptr<GenericDescriptorMatcher> wrapped;
};
#if 0
//DO NOT REMOVE! The block is required for sources parser
enum
......
......@@ -2,10 +2,6 @@
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE
# include "opencv2/nonfree.hpp"
#endif
#ifdef HAVE_OPENCV_FEATURES2D
# include "opencv2/features2d.hpp"
#endif
......@@ -28,9 +24,6 @@ JNI_OnLoad(JavaVM* vm, void* )
return -1;
bool init = true;
#ifdef HAVE_OPENCV_NONFREE
init &= cv::initModule_nonfree();
#endif
#ifdef HAVE_OPENCV_FEATURES2D
init &= cv::initModule_features2d();
#endif
......
if(BUILD_ANDROID_PACKAGE)
ocv_module_disable(nonfree)
endif()
set(the_description "Functionality with possible limitations on the use")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wshadow)
ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_cudaarithm)
This diff is collapsed.
********************************
nonfree. Non-free functionality
********************************
The module contains algorithms that may be patented in some countries or have some other limitations on the use.
.. toctree::
:maxdepth: 2
feature_detection
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_NONFREE_HPP__
#define __OPENCV_NONFREE_HPP__
#include "opencv2/nonfree/features2d.hpp"
namespace cv
{
CV_EXPORTS bool initModule_nonfree();
}
#endif
/* End of file. */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_NONFREE_CUDA_HPP__
#define __OPENCV_NONFREE_CUDA_HPP__
#include "opencv2/core/cuda.hpp"
namespace cv { namespace cuda {
class CV_EXPORTS SURF_CUDA
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the default constructor
SURF_CUDA();
//! the full constructor taking all the necessary parameters
explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! returns the default norm type
int defaultNorm() const;
//! upload host keypoints to device memory
void uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
//! download keypoints from device to host memory
void downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors);
//! finds the keypoints using fast hessian detector used in SURF
//! supports CV_8UC1 images
//! keypoints will have nFeature cols and 6 rows
//! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
//! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
//! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
//! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
//! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
//! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
//! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);
//! finds the keypoints and computes their descriptors.
//! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,
bool useProvidedKeypoints = false);
void releaseMemory();
// SURF parameters
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
//! max keypoints = min(keypointsRatio * img.size().area(), 65535)
float keypointsRatio;
GpuMat sum, mask1, maskSum, intBuffer;
GpuMat det, trace;
GpuMat maxPosBuffer;
};
}} // namespace cv { namespace cuda {
#endif // __OPENCV_NONFREE_CUDA_HPP__
This diff is collapsed.
This diff is collapsed.
#include "perf_precomp.hpp"
#include "opencv2/ts/cuda_perf.hpp"
static const char * impls[] = {
#ifdef HAVE_CUDA
"cuda",
#endif
"plain"
};
CV_PERF_TEST_MAIN_WITH_IMPLS(nonfree, impls, perf::printCudaInfo())
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "test_precomp.hpp"
CV_TEST_MAIN("cv")
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -20,6 +20,7 @@ ocv_list_filterout(candidate_deps "^opencv_matlab$")
ocv_list_filterout(candidate_deps "^opencv_tracking$")
ocv_list_filterout(candidate_deps "^opencv_optflow$")
ocv_list_filterout(candidate_deps "^opencv_bgsegm$")
ocv_list_filterout(candidate_deps "^opencv_xfeatures2d$")
ocv_add_module(${MODULE_NAME} BINDINGS OPTIONAL ${candidate_deps})
......
set(the_description "Images stitching")
ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_objdetect
OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_nonfree)
OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d)
......@@ -88,7 +88,7 @@ SURF features finder. ::
/* hidden */
};
.. seealso:: :ocv:class:`detail::FeaturesFinder`, :ocv:class:`SURF`
.. seealso:: :ocv:class:`detail::FeaturesFinder`, ``SURF``
detail::OrbFeaturesFinder
-------------------------
......
......@@ -5,7 +5,7 @@
SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
opencv_objdetect opencv_photo opencv_nonfree opencv_features2d opencv_calib3d
opencv_objdetect opencv_photo opencv_features2d opencv_calib3d
opencv_stitching opencv_videostab opencv_shape)
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment