Commit f9eaef9f authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #1744 from cv3d:chunks/enum_interface

parents 9735ec66 72e55f5c
......@@ -426,14 +426,10 @@ public:
class CV_EXPORTS_W FastFeatureDetector : public Feature2DAsync
{
public:
enum
{
LOCATION_ROW = 0,
RESPONSE_ROW,
ROWS_COUNT,
FEATURE_SIZE = 7
};
static const int LOCATION_ROW = 0;
static const int RESPONSE_ROW = 1;
static const int ROWS_COUNT = 2;
static const int FEATURE_SIZE = 7;
CV_WRAP static Ptr<cuda::FastFeatureDetector> create(int threshold=10,
bool nonmaxSuppression=true,
......@@ -456,16 +452,13 @@ public:
class CV_EXPORTS_W ORB : public Feature2DAsync
{
public:
enum
{
X_ROW = 0,
Y_ROW,
RESPONSE_ROW,
ANGLE_ROW,
OCTAVE_ROW,
SIZE_ROW,
ROWS_COUNT
};
static const int X_ROW = 0;
static const int Y_ROW = 1;
static const int RESPONSE_ROW = 2;
static const int ANGLE_ROW = 3;
static const int OCTAVE_ROW = 4;
static const int SIZE_ROW = 5;
static const int ROWS_COUNT = 6;
CV_WRAP static Ptr<cuda::ORB> create(int nfeatures=500,
float scaleFactor=1.2f,
......
......@@ -172,9 +172,7 @@ namespace
IMPLEMENT_PARAM_CLASS(ORB_BlurForDescriptor, bool)
}
CV_ENUM(ORB_ScoreType, cv::ORB::HARRIS_SCORE, cv::ORB::FAST_SCORE)
PARAM_TEST_CASE(ORB, cv::cuda::DeviceInfo, ORB_FeaturesCount, ORB_ScaleFactor, ORB_LevelsCount, ORB_EdgeThreshold, ORB_firstLevel, ORB_WTA_K, ORB_ScoreType, ORB_PatchSize, ORB_BlurForDescriptor)
PARAM_TEST_CASE(ORB, cv::cuda::DeviceInfo, ORB_FeaturesCount, ORB_ScaleFactor, ORB_LevelsCount, ORB_EdgeThreshold, ORB_firstLevel, ORB_WTA_K, cv::ORB::ScoreType, ORB_PatchSize, ORB_BlurForDescriptor)
{
cv::cuda::DeviceInfo devInfo;
int nFeatures;
......@@ -183,7 +181,7 @@ PARAM_TEST_CASE(ORB, cv::cuda::DeviceInfo, ORB_FeaturesCount, ORB_ScaleFactor, O
int edgeThreshold;
int firstLevel;
int WTA_K;
int scoreType;
cv::ORB::ScoreType scoreType;
int patchSize;
bool blurForDescriptor;
......@@ -260,7 +258,7 @@ INSTANTIATE_TEST_CASE_P(CUDA_Features2D, ORB, testing::Combine(
testing::Values(ORB_EdgeThreshold(31)),
testing::Values(ORB_firstLevel(0)),
testing::Values(ORB_WTA_K(2), ORB_WTA_K(3), ORB_WTA_K(4)),
testing::Values(ORB_ScoreType(cv::ORB::HARRIS_SCORE)),
testing::Values(cv::ORB::HARRIS_SCORE),
testing::Values(ORB_PatchSize(31), ORB_PatchSize(29)),
testing::Values(ORB_BlurForDescriptor(false), ORB_BlurForDescriptor(true))));
......
......@@ -48,6 +48,7 @@
#endif
#include "opencv2/core/cuda.hpp"
#include "opencv2/objdetect.hpp"
/**
@addtogroup cuda
......@@ -78,12 +79,6 @@ namespace cv { namespace cuda {
class CV_EXPORTS_W HOG : public Algorithm
{
public:
enum
{
DESCR_FORMAT_ROW_BY_ROW,
DESCR_FORMAT_COL_BY_COL
};
/** @brief Creates the HOG descriptor and detector.
@param win_size Detection window size. Align to block size and block stride.
......@@ -138,8 +133,8 @@ public:
//! Descriptor storage format:
//! - **DESCR_FORMAT_ROW_BY_ROW** - Row-major order.
//! - **DESCR_FORMAT_COL_BY_COL** - Column-major order.
CV_WRAP virtual void setDescriptorFormat(int descr_format) = 0;
CV_WRAP virtual int getDescriptorFormat() const = 0;
CV_WRAP virtual void setDescriptorFormat(HOGDescriptor::DescriptorStorageFormat descr_format) = 0;
CV_WRAP virtual HOGDescriptor::DescriptorStorageFormat getDescriptorFormat() const = 0;
/** @brief Returns the number of coefficients required for the classification.
*/
......
......@@ -170,8 +170,8 @@ namespace
virtual void setGroupThreshold(int group_threshold) { group_threshold_ = group_threshold; }
virtual int getGroupThreshold() const { return group_threshold_; }
virtual void setDescriptorFormat(int descr_format) { descr_format_ = descr_format; }
virtual int getDescriptorFormat() const { return descr_format_; }
virtual void setDescriptorFormat(HOGDescriptor::DescriptorStorageFormat descr_format) { descr_format_ = descr_format; }
virtual HOGDescriptor::DescriptorStorageFormat getDescriptorFormat() const { return descr_format_; }
virtual size_t getDescriptorSize() const;
......@@ -208,7 +208,7 @@ namespace
Size win_stride_;
double scale0_;
int group_threshold_;
int descr_format_;
HOGDescriptor::DescriptorStorageFormat descr_format_;
Size cells_per_block_;
private:
......@@ -240,7 +240,7 @@ namespace
win_stride_(block_stride),
scale0_(1.05),
group_threshold_(2),
descr_format_(DESCR_FORMAT_COL_BY_COL),
descr_format_(HOGDescriptor::DESCR_FORMAT_COL_BY_COL),
cells_per_block_(block_size.width / cell_size.width, block_size.height / cell_size.height)
{
CV_Assert((win_size.width - block_size.width ) % block_stride.width == 0 &&
......@@ -502,7 +502,7 @@ namespace
switch (descr_format_)
{
case DESCR_FORMAT_ROW_BY_ROW:
case HOGDescriptor::DESCR_FORMAT_ROW_BY_ROW:
hog::extract_descrs_by_rows(win_size_.height, win_size_.width,
block_stride_.height, block_stride_.width,
win_stride_.height, win_stride_.width,
......@@ -512,7 +512,7 @@ namespace
descriptors,
StreamAccessor::getStream(stream));
break;
case DESCR_FORMAT_COL_BY_COL:
case HOGDescriptor::DESCR_FORMAT_COL_BY_COL:
hog::extract_descrs_by_cols(win_size_.height, win_size_.width,
block_stride_.height, block_stride_.width,
win_stride_.height, win_stride_.width,
......
......@@ -187,10 +187,10 @@ CUDA_TEST_P(HOG, GetDescriptors)
hog->setWinStride(Size(64, 128));
hog->setDescriptorFormat(cv::cuda::HOG::DESCR_FORMAT_ROW_BY_ROW);
hog->setDescriptorFormat(HOGDescriptor::DESCR_FORMAT_ROW_BY_ROW);
hog->compute(d_img, descriptors);
hog->setDescriptorFormat(cv::cuda::HOG::DESCR_FORMAT_COL_BY_COL);
hog->setDescriptorFormat(HOGDescriptor::DESCR_FORMAT_COL_BY_COL);
hog->compute(d_img, descriptors_by_cols);
// Check size of the result train table
......
......@@ -137,17 +137,18 @@ class CvParams
class CvFeatureParams : public CvParams
{
public:
enum
enum FeatureType
{
HAAR = 0,
LBP = 1,
HOG = 2
};
CvFeatureParams();
virtual void init( const CvFeatureParams& fp );
virtual void write( FileStorage &fs ) const CV_OVERRIDE;
virtual bool read( const FileNode &node ) CV_OVERRIDE;
static Ptr<CvFeatureParams> create( int featureType );
static Ptr<CvFeatureParams> create(CvFeatureParams::FeatureType featureType);
int maxCatCount; // 0 in case of numerical features
int featSize; // 1 in case of simple features (HAAR, LBP) and N_BINS(9)*N_CELLS(4) in case of Dalal's HOG features
int numFeatures;
......@@ -163,7 +164,7 @@ class CvFeatureEvaluator
virtual void setImage( const Mat& img, uchar clsLabel, int idx );
virtual void writeFeatures( FileStorage &fs, const Mat& featureMap ) const = 0;
virtual float operator()( int featureIdx, int sampleIdx ) = 0;
static Ptr<CvFeatureEvaluator> create( int type );
static Ptr<CvFeatureEvaluator> create(CvFeatureParams::FeatureType type);
int getNumFeatures() const
{
......
......@@ -100,7 +100,7 @@ bool CvFeatureParams::read( const FileNode &node )
return ( maxCatCount >= 0 && featSize >= 1 );
}
Ptr<CvFeatureParams> CvFeatureParams::create( int featureType )
Ptr<CvFeatureParams> CvFeatureParams::create(FeatureType featureType)
{
return featureType == HAAR ? Ptr<CvFeatureParams>( new CvHaarFeatureParams ) : featureType == LBP ? Ptr<CvFeatureParams>( new CvLBPFeatureParams ) :
featureType == HOG ? Ptr<CvFeatureParams>( new CvHOGFeatureParams ) : Ptr<CvFeatureParams>();
......@@ -128,7 +128,7 @@ void CvFeatureEvaluator::setImage( const Mat &img, uchar clsLabel, int idx )
cls.ptr<float>( idx )[0] = clsLabel;
}
Ptr<CvFeatureEvaluator> CvFeatureEvaluator::create( int type )
Ptr<CvFeatureEvaluator> CvFeatureEvaluator::create(CvFeatureParams::FeatureType type)
{
return type == CvFeatureParams::HAAR ? Ptr<CvFeatureEvaluator>( new CvHaarEvaluator ) :
type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>( new CvLBPEvaluator ) :
......
......@@ -84,10 +84,9 @@ class CV_EXPORTS_W FREAK : public Feature2D
{
public:
enum
{
NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45
};
static const int NB_SCALES = 64;
static const int NB_PAIRS = 512;
static const int NB_ORIENPAIRS = 45;
/**
@param orientationNormalized Enable orientation normalization.
......@@ -197,12 +196,12 @@ DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 bu
class CV_EXPORTS_W DAISY : public Feature2D
{
public:
enum
enum NormalizationType
{
NRM_NONE = 100, NRM_PARTIAL = 101, NRM_FULL = 102, NRM_SIFT = 103,
};
CV_WRAP static Ptr<DAISY> create( float radius = 15, int q_radius = 3, int q_theta = 8,
int q_hist = 8, int norm = DAISY::NRM_NONE, InputArray H = noArray(),
int q_hist = 8, DAISY::NormalizationType norm = DAISY::NRM_NONE, InputArray H = noArray(),
bool interpolation = true, bool use_orientation = false );
/** @overload
......@@ -961,7 +960,7 @@ FastFeatureDetector::TYPE_5_8
Detects corners using the FAST algorithm by @cite Rosten06 .
*/
CV_EXPORTS void FASTForPointSet( InputArray image, CV_IN_OUT std::vector<KeyPoint>& keypoints,
int threshold, bool nonmaxSuppression=true, int type=FastFeatureDetector::TYPE_9_16);
int threshold, bool nonmaxSuppression=true, FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16);
//! @}
......
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d.hpp"
using cv::xfeatures2d::DAISY;
typedef DAISY::NormalizationType DAISY_NormalizationType;
CV_PY_FROM_ENUM(DAISY::NormalizationType);
CV_PY_TO_ENUM(DAISY::NormalizationType);
#endif
......@@ -98,7 +98,7 @@ public:
* @param use_orientation sample patterns using keypoints orientation, disabled by default.
*/
explicit DAISY_Impl(float radius=15, int q_radius=3, int q_theta=8, int q_hist=8,
int norm = DAISY::NRM_NONE, InputArray H = noArray(),
DAISY::NormalizationType norm = DAISY::NRM_NONE, InputArray H = noArray(),
bool interpolation = true, bool use_orientation = false);
virtual ~DAISY_Impl() CV_OVERRIDE;
......@@ -189,7 +189,7 @@ protected:
// holds the type of the normalization to apply; equals to NRM_PARTIAL by
// default. change the value using set_normalization() function.
int m_nrm_type;
DAISY::NormalizationType m_nrm_type;
// the size of the descriptor vector
int m_descriptor_size;
......@@ -561,7 +561,7 @@ static void normalize_full( float* desc, const int _descriptor_size )
}
}
static void normalize_descriptor( float* desc, const int nrm_type, const int _grid_point_number,
static void normalize_descriptor( float* desc, const DAISY::NormalizationType nrm_type, const int _grid_point_number,
const int _hist_th_q_no, const int _descriptor_size )
{
if( nrm_type == DAISY::NRM_NONE ) return;
......@@ -888,7 +888,7 @@ static void get_descriptor( const double y, const double x, const int orientatio
const std::vector<Mat>* m_smoothed_gradient_layers, const Mat* m_oriented_grid_points,
const double* m_orientation_shift_table, const int m_th_q_no, const int m_hist_th_q_no,
const int m_grid_point_number, const int m_descriptor_size, const bool m_enable_interpolation,
const int m_nrm_type )
const DAISY::NormalizationType m_nrm_type)
{
get_unnormalized_descriptor( y, x, orientation, descriptor, m_smoothed_gradient_layers,
m_oriented_grid_points, m_orientation_shift_table, m_th_q_no, m_enable_interpolation );
......@@ -912,7 +912,7 @@ static bool get_descriptor_h( const double y, const double x, const int orientat
const std::vector<Mat>* m_smoothed_gradient_layers, const Mat& m_cube_sigmas,
const Mat* m_grid_points, const double* m_orientation_shift_table, const int m_th_q_no,
const int m_hist_th_q_no, const int m_grid_point_number, const int m_descriptor_size,
const bool m_enable_interpolation, const int m_nrm_type )
const bool m_enable_interpolation, const DAISY::NormalizationType m_nrm_type)
{
bool rval =
......@@ -1054,7 +1054,7 @@ inline void DAISY_Impl::compute_descriptors( Mat* m_dense_descriptors )
struct NormalizeDescriptorsInvoker : ParallelLoopBody
{
NormalizeDescriptorsInvoker( Mat* _descriptors, int _nrm_type, int _grid_point_number,
NormalizeDescriptorsInvoker( Mat* _descriptors, DAISY::NormalizationType _nrm_type, int _grid_point_number,
int _hist_th_q_no, int _descriptor_size )
{
descriptors = _descriptors;
......@@ -1074,7 +1074,7 @@ struct NormalizeDescriptorsInvoker : ParallelLoopBody
}
Mat *descriptors;
int nrm_type;
DAISY::NormalizationType nrm_type;
int grid_point_number;
int hist_th_q_no;
int descriptor_size;
......@@ -1590,7 +1590,7 @@ void DAISY_Impl::compute( InputArray _image, OutputArray _descriptors )
// constructor
DAISY_Impl::DAISY_Impl( float _radius, int _q_radius, int _q_theta, int _q_hist,
int _norm, InputArray _H, bool _interpolation, bool _use_orientation )
DAISY::NormalizationType _norm, InputArray _H, bool _interpolation, bool _use_orientation )
: m_rad(_radius), m_rad_q_no(_q_radius), m_th_q_no(_q_theta), m_hist_th_q_no(_q_hist),
m_nrm_type(_norm), m_enable_interpolation(_interpolation), m_use_orientation(_use_orientation)
{
......@@ -1612,7 +1612,7 @@ DAISY_Impl::~DAISY_Impl()
}
Ptr<DAISY> DAISY::create( float radius, int q_radius, int q_theta, int q_hist,
int norm, InputArray H, bool interpolation, bool use_orientation)
DAISY::NormalizationType norm, InputArray H, bool interpolation, bool use_orientation)
{
return makePtr<DAISY_Impl>(radius, q_radius, q_theta, q_hist, norm, H, interpolation, use_orientation);
}
......
......@@ -456,7 +456,7 @@ namespace {
namespace cv {
namespace xfeatures2d {
void FASTForPointSet(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression, int type)
void FASTForPointSet(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression, FastFeatureDetector::DetectorType type)
{
if (keypoints.empty()) {
FAST(_img, keypoints, threshold, nonmax_suppression, type);
......
......@@ -138,14 +138,10 @@ protected:
OrientationPair orientationPairs[NB_ORIENPAIRS];
};
static const double FREAK_LOG2 = 0.693147180559945;
static const int FREAK_NB_ORIENTATION = 256;
static const int FREAK_NB_POINTS = 43;
static const int FREAK_SMALLEST_KP_SIZE = 7; // smallest size of keypoints
static const int FREAK_NB_SCALES = FREAK::NB_SCALES;
static const int FREAK_NB_PAIRS = FREAK::NB_PAIRS;
static const int FREAK_NB_ORIENPAIRS = FREAK::NB_ORIENPAIRS;
// default pairs
static const int FREAK_DEF_PAIRS[FREAK_Impl::NB_PAIRS] =
......@@ -209,8 +205,8 @@ void FREAK_Impl::buildPattern()
nOctaves0 = nOctaves;
patternScale0 = patternScale;
patternLookup.resize(FREAK_NB_SCALES*FREAK_NB_ORIENTATION*FREAK_NB_POINTS);
double scaleStep = std::pow(2.0, (double)(nOctaves)/FREAK_NB_SCALES ); // 2 ^ ( (nOctaves-1) /nbScales)
patternLookup.resize(FREAK::NB_SCALES*FREAK_NB_ORIENTATION*FREAK_NB_POINTS);
double scaleStep = std::pow(2.0, (double)(nOctaves)/FREAK::NB_SCALES ); // 2 ^ ( (nOctaves-1) /nbScales)
double scalingFactor, alpha, beta, theta = 0;
// pattern definition, radius normalized to 1.0 (outer point position+sigma=1.0)
......@@ -226,7 +222,7 @@ void FREAK_Impl::buildPattern()
radius[6]/2.0, radius[6]/2.0
};
// fill the lookup table
for( int scaleIdx=0; scaleIdx < FREAK_NB_SCALES; ++scaleIdx )
for( int scaleIdx=0; scaleIdx < FREAK::NB_SCALES; ++scaleIdx )
{
patternSizes[scaleIdx] = 0; // proper initialization
scalingFactor = std::pow(scaleStep,scaleIdx); //scale of the pattern, scaleStep ^ scaleIdx
......@@ -282,7 +278,7 @@ void FREAK_Impl::buildPattern()
orientationPairs[39].i=30; orientationPairs[39].j=33; orientationPairs[40].i=31; orientationPairs[40].j=34; orientationPairs[41].i=32; orientationPairs[41].j=35;
orientationPairs[42].i=36; orientationPairs[42].j=39; orientationPairs[43].i=37; orientationPairs[43].j=40; orientationPairs[44].i=38; orientationPairs[44].j=41;
for( unsigned m = FREAK_NB_ORIENPAIRS; m--; )
for( unsigned m = FREAK::NB_ORIENPAIRS; m--; )
{
const float dx = patternLookup[orientationPairs[m].i].x-patternLookup[orientationPairs[m].j].x;
const float dy = patternLookup[orientationPairs[m].i].y-patternLookup[orientationPairs[m].j].y;
......@@ -305,9 +301,9 @@ void FREAK_Impl::buildPattern()
// Input vector provided
if( !selectedPairs0.empty() )
{
if( (int)selectedPairs0.size() == FREAK_NB_PAIRS )
if( (int)selectedPairs0.size() == FREAK::NB_PAIRS )
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
for( int i = 0; i < FREAK::NB_PAIRS; ++i )
descriptionPairs[i] = allPairs[selectedPairs0.at(i)];
}
else
......@@ -317,7 +313,7 @@ void FREAK_Impl::buildPattern()
}
else // default selected pairs
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
for( int i = 0; i < FREAK::NB_PAIRS; ++i )
descriptionPairs[i] = allPairs[FREAK_DEF_PAIRS[i]];
}
}
......@@ -370,11 +366,11 @@ void FREAK_Impl::compute( InputArray _image, std::vector<KeyPoint>& keypoints, O
template <typename srcMatType>
void FREAK_Impl::extractDescriptor(srcMatType *pointsValue, void ** ptr)
{
std::bitset<FREAK_NB_PAIRS>** ptrScalar = (std::bitset<FREAK_NB_PAIRS>**) ptr;
std::bitset<FREAK::NB_PAIRS>** ptrScalar = (std::bitset<FREAK::NB_PAIRS>**) ptr;
// extracting descriptor preserving the order of SSE version
int cnt = 0;
for( int n = 7; n < FREAK_NB_PAIRS; n += 128)
for( int n = 7; n < FREAK::NB_PAIRS; n += 128)
{
for( int m = 8; m--; )
{
......@@ -396,7 +392,7 @@ void FREAK_Impl::extractDescriptor(uchar *pointsValue, void ** ptr)
// note that comparisons order is modified in each block (but first 128 comparisons remain globally the same-->does not affect the 128,384 bits segmanted matching strategy)
int cnt = 0;
for( int n = FREAK_NB_PAIRS/128; n-- ; )
for( int n = FREAK::NB_PAIRS/128; n-- ; )
{
__m128i result128 = _mm_setzero_si128();
for( int m = 128/16; m--; cnt += 16 )
......@@ -457,7 +453,7 @@ void FREAK_Impl::computeDescriptors( InputArray _image, std::vector<KeyPoint>& k
std::vector<int> kpScaleIdx(keypoints.size()); // used to save pattern scale index corresponding to each keypoints
const std::vector<int>::iterator ScaleIdxBegin = kpScaleIdx.begin(); // used in std::vector erase function
const std::vector<cv::KeyPoint>::iterator kpBegin = keypoints.begin(); // used in std::vector erase function
const float sizeCst = static_cast<float>(FREAK_NB_SCALES/(FREAK_LOG2* nOctaves));
const float sizeCst = static_cast<float>(FREAK::NB_SCALES/(FREAK_LOG2* nOctaves));
srcMatType pointsValue[FREAK_NB_POINTS];
int thetaIdx = 0;
int direction0;
......@@ -470,8 +466,8 @@ void FREAK_Impl::computeDescriptors( InputArray _image, std::vector<KeyPoint>& k
{
//Is k non-zero? If so, decrement it and continue"
kpScaleIdx[k] = std::max( (int)(std::log(keypoints[k].size/FREAK_SMALLEST_KP_SIZE)*sizeCst+0.5) ,0);
if( kpScaleIdx[k] >= FREAK_NB_SCALES )
kpScaleIdx[k] = FREAK_NB_SCALES-1;
if( kpScaleIdx[k] >= FREAK::NB_SCALES )
kpScaleIdx[k] = FREAK::NB_SCALES-1;
if( keypoints[k].pt.x <= patternSizes[kpScaleIdx[k]] || //check if the description at this specific position and scale fits inside the image
keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
......@@ -490,9 +486,9 @@ void FREAK_Impl::computeDescriptors( InputArray _image, std::vector<KeyPoint>& k
for( size_t k = keypoints.size(); k--; )
{
kpScaleIdx[k] = scIdx; // equivalent to the formule when the scale is normalized with a constant size of keypoints[k].size=3*SMALLEST_KP_SIZE
if( kpScaleIdx[k] >= FREAK_NB_SCALES )
if( kpScaleIdx[k] >= FREAK::NB_SCALES )
{
kpScaleIdx[k] = FREAK_NB_SCALES-1;
kpScaleIdx[k] = FREAK::NB_SCALES-1;
}
if( keypoints[k].pt.x <= patternSizes[kpScaleIdx[k]] ||
keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
......@@ -510,7 +506,7 @@ void FREAK_Impl::computeDescriptors( InputArray _image, std::vector<KeyPoint>& k
if( !extAll )
{
// extract the best comparisons only
_descriptors.create((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
_descriptors.create((int)keypoints.size(), FREAK::NB_PAIRS/8, CV_8U);
_descriptors.setTo(Scalar::all(0));
Mat descriptors = _descriptors.getMat();
......@@ -773,9 +769,9 @@ std::vector<int> FREAK_Impl::selectPairs(const std::vector<Mat>& images
}
std::vector<int> idxBestPairs;
if( (int)bestPairs.size() >= FREAK_NB_PAIRS )
if( (int)bestPairs.size() >= FREAK::NB_PAIRS )
{
for( int i = 0; i < FREAK_NB_PAIRS; ++i )
for( int i = 0; i < FREAK::NB_PAIRS; ++i )
idxBestPairs.push_back(bestPairs[i].idx);
}
else
......@@ -827,7 +823,7 @@ FREAK_Impl::~FREAK_Impl()
int FREAK_Impl::descriptorSize() const
{
return FREAK_NB_PAIRS / 8; // descriptor length in bytes
return FREAK::NB_PAIRS / 8; // descriptor length in bytes
}
int FREAK_Impl::descriptorType() const
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment