Commit f97e38d8 authored by Ievgen Khvedchenia's avatar Ievgen Khvedchenia

Fix casting from/to int/float that caused lot of compiler warnings.

parent 61f79c26
...@@ -30,12 +30,12 @@ namespace cv ...@@ -30,12 +30,12 @@ namespace cv
if (descriptor_size == 0) if (descriptor_size == 0)
{ {
int t = (6 + 36 + 120) * descriptor_channels; int t = (6 + 36 + 120) * descriptor_channels;
return ceil(t / 8.); return (int)ceil(t / 8.);
} }
else else
{ {
// We use the random bit selection length binary descriptor // We use the random bit selection length binary descriptor
return ceil(descriptor_size / 8.); return (int)ceil(descriptor_size / 8.);
} }
} }
} }
......
...@@ -47,12 +47,12 @@ AKAZEFeatures::~AKAZEFeatures(void) { ...@@ -47,12 +47,12 @@ AKAZEFeatures::~AKAZEFeatures(void) {
*/ */
void AKAZEFeatures::Allocate_Memory_Evolution(void) { void AKAZEFeatures::Allocate_Memory_Evolution(void) {
float rfactor = 0.0; float rfactor = 0.0f;
int level_height = 0, level_width = 0; int level_height = 0, level_width = 0;
// Allocate the dimension of the matrices for the evolution // Allocate the dimension of the matrices for the evolution
for (int i = 0; i <= options_.omax - 1; i++) { for (int i = 0; i <= options_.omax - 1; i++) {
rfactor = 1.0 / pow(2.f, i); rfactor = 1.0f / pow(2.f, i);
level_height = (int)(options_.img_height*rfactor); level_height = (int)(options_.img_height*rfactor);
level_width = (int)(options_.img_width*rfactor); level_width = (int)(options_.img_width*rfactor);
...@@ -75,7 +75,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { ...@@ -75,7 +75,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) {
step.Lstep = cv::Mat::zeros(level_height, level_width, CV_32F); step.Lstep = cv::Mat::zeros(level_height, level_width, CV_32F);
step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i); step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i);
step.sigma_size = fRound(step.esigma); step.sigma_size = fRound(step.esigma);
step.etime = 0.5*(step.esigma*step.esigma); step.etime = 0.5f*(step.esigma*step.esigma);
step.octave = i; step.octave = i;
step.sublevel = j; step.sublevel = j;
evolution_.push_back(step); evolution_.push_back(step);
...@@ -86,9 +86,9 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { ...@@ -86,9 +86,9 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) {
for (size_t i = 1; i < evolution_.size(); i++) { for (size_t i = 1; i < evolution_.size(); i++) {
int naux = 0; int naux = 0;
vector<float> tau; vector<float> tau;
float ttime = 0.0; float ttime = 0.0f;
ttime = evolution_[i].etime - evolution_[i - 1].etime; ttime = evolution_[i].etime - evolution_[i - 1].etime;
naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_, tau); naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau);
nsteps_.push_back(naux); nsteps_.push_back(naux);
tsteps_.push_back(tau); tsteps_.push_back(tau);
ncycles_++; ncycles_++;
...@@ -103,7 +103,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { ...@@ -103,7 +103,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) {
*/ */
int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
double t1 = 0.0, t2 = 0.0; //double t1 = 0.0, t2 = 0.0;
if (evolution_.size() == 0) { if (evolution_.size() == 0) {
cerr << "Error generating the nonlinear scale space!!" << endl; cerr << "Error generating the nonlinear scale space!!" << endl;
...@@ -111,7 +111,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { ...@@ -111,7 +111,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
return -1; return -1;
} }
t1 = cv::getTickCount(); //t1 = cv::getTickCount();
// Copy the original image to the first level of the evolution // Copy the original image to the first level of the evolution
img.copyTo(evolution_[0].Lt); img.copyTo(evolution_[0].Lt);
...@@ -120,23 +120,23 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { ...@@ -120,23 +120,23 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
// First compute the kcontrast factor // First compute the kcontrast factor
options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile,
1.0, options_.kcontrast_nbins, 0, 0); 1.0f, options_.kcontrast_nbins, 0, 0);
t2 = cv::getTickCount(); //t2 = cv::getTickCount();
timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency(); //timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency();
// Now generate the rest of evolution levels // Now generate the rest of evolution levels
for (size_t i = 1; i < evolution_.size(); i++) { for (size_t i = 1; i < evolution_.size(); i++) {
if (evolution_[i].octave > evolution_[i - 1].octave) { if (evolution_[i].octave > evolution_[i - 1].octave) {
halfsample_image(evolution_[i - 1].Lt, evolution_[i].Lt); halfsample_image(evolution_[i - 1].Lt, evolution_[i].Lt);
options_.kcontrast = options_.kcontrast*0.75; options_.kcontrast = options_.kcontrast*0.75f;
} }
else { else {
evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); evolution_[i - 1].Lt.copyTo(evolution_[i].Lt);
} }
gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0); gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0f);
// Compute the Gaussian derivatives Lx and Ly // Compute the Gaussian derivatives Lx and Ly
image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0); image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0);
...@@ -167,8 +167,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { ...@@ -167,8 +167,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
} }
} }
t2 = cv::getTickCount(); //t2 = cv::getTickCount();
timing_.scale = 1000.0*(t2 - t1) / cv::getTickFrequency(); //timing_.scale = 1000.0*(t2 - t1) / cv::getTickFrequency();
return 0; return 0;
} }
...@@ -180,9 +180,9 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { ...@@ -180,9 +180,9 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
*/ */
void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) { void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
double t1 = 0.0, t2 = 0.0; //double t1 = 0.0, t2 = 0.0;
t1 = cv::getTickCount(); //t1 = cv::getTickCount();
kpts.clear(); kpts.clear();
...@@ -190,8 +190,8 @@ void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) { ...@@ -190,8 +190,8 @@ void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
Find_Scale_Space_Extrema(kpts); Find_Scale_Space_Extrema(kpts);
Do_Subpixel_Refinement(kpts); Do_Subpixel_Refinement(kpts);
t2 = cv::getTickCount(); //t2 = cv::getTickCount();
timing_.detector = 1000.0*(t2 - t1) / cv::getTickFrequency(); //timing_.detector = 1000.0*(t2 - t1) / cv::getTickFrequency();
} }
/* ************************************************************************* */ /* ************************************************************************* */
...@@ -200,7 +200,7 @@ class MultiscaleDerivativesInvoker : public cv::ParallelLoopBody ...@@ -200,7 +200,7 @@ class MultiscaleDerivativesInvoker : public cv::ParallelLoopBody
{ {
public: public:
explicit MultiscaleDerivativesInvoker(std::vector<TEvolution>& ev, const AKAZEOptions& opt) explicit MultiscaleDerivativesInvoker(std::vector<TEvolution>& ev, const AKAZEOptions& opt)
: evolution_(ev) : evolution_(&ev)
, options_(opt) , options_(opt)
{ {
} }
...@@ -208,27 +208,29 @@ public: ...@@ -208,27 +208,29 @@ public:
void operator()(const cv::Range& range) const void operator()(const cv::Range& range) const
{ {
std::vector<TEvolution>& evolution = *evolution_;
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
float ratio = pow(2.f, (float)evolution_[i].octave); float ratio = pow(2.f, (float)evolution[i].octave);
int sigma_size_ = fRound(evolution_[i].esigma * options_.derivative_factor / ratio); int sigma_size_ = fRound(evolution[i].esigma * options_.derivative_factor / ratio);
compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_); compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Lx, 1, 0, sigma_size_);
compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_); compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Ly, 0, 1, sigma_size_);
compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_); compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxx, 1, 0, sigma_size_);
compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_); compute_scharr_derivatives(evolution[i].Ly, evolution[i].Lyy, 0, 1, sigma_size_);
compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_); compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxy, 0, 1, sigma_size_);
evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); evolution[i].Lx = evolution[i].Lx*((sigma_size_));
evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); evolution[i].Ly = evolution[i].Ly*((sigma_size_));
evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_)); evolution[i].Lxx = evolution[i].Lxx*((sigma_size_)*(sigma_size_));
evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); evolution[i].Lxy = evolution[i].Lxy*((sigma_size_)*(sigma_size_));
evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); evolution[i].Lyy = evolution[i].Lyy*((sigma_size_)*(sigma_size_));
} }
} }
private: private:
std::vector<TEvolution> & evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions options_; AKAZEOptions options_;
}; };
...@@ -237,9 +239,9 @@ private: ...@@ -237,9 +239,9 @@ private:
*/ */
void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { void AKAZEFeatures::Compute_Multiscale_Derivatives(void) {
double t1 = 0.0, t2 = 0.0; //double t1 = 0.0, t2 = 0.0;
t1 = cv::getTickCount(); //t1 = cv::getTickCount();
cv::parallel_for_(cv::Range(0, evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_)); cv::parallel_for_(cv::Range(0, evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_));
/* /*
...@@ -261,8 +263,8 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { ...@@ -261,8 +263,8 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) {
evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_));
} }
*/ */
t2 = cv::getTickCount(); //t2 = cv::getTickCount();
timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency(); //timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency();
} }
/* ************************************************************************* */ /* ************************************************************************* */
...@@ -276,9 +278,10 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { ...@@ -276,9 +278,10 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) {
Compute_Multiscale_Derivatives(); Compute_Multiscale_Derivatives();
for (size_t i = 0; i < evolution_.size(); i++) { for (size_t i = 0; i < evolution_.size(); i++) {
if (options_.verbosity == true) {
cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; //if (options_.verbosity == true) {
} // cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl;
//}
for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) { for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) {
for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) { for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) {
...@@ -298,7 +301,7 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { ...@@ -298,7 +301,7 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) {
*/ */
void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) { void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
double t1 = 0.0, t2 = 0.0; //double t1 = 0.0, t2 = 0.0;
float value = 0.0; float value = 0.0;
float dist = 0.0, ratio = 0.0, smax = 0.0; float dist = 0.0, ratio = 0.0, smax = 0.0;
int npoints = 0, id_repeated = 0; int npoints = 0, id_repeated = 0;
...@@ -310,13 +313,13 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) { ...@@ -310,13 +313,13 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
// Set maximum size // Set maximum size
if (options_.descriptor == SURF_UPRIGHT || options_.descriptor == SURF || if (options_.descriptor == SURF_UPRIGHT || options_.descriptor == SURF ||
options_.descriptor == MLDB_UPRIGHT || options_.descriptor == MLDB) { options_.descriptor == MLDB_UPRIGHT || options_.descriptor == MLDB) {
smax = 10.0*sqrtf(2.0); smax = 10.0f*sqrtf(2.0f);
} }
else if (options_.descriptor == MSURF_UPRIGHT || options_.descriptor == MSURF) { else if (options_.descriptor == MSURF_UPRIGHT || options_.descriptor == MSURF) {
smax = 12.0*sqrtf(2.0); smax = 12.0f*sqrtf(2.0f);
} }
t1 = cv::getTickCount(); //t1 = cv::getTickCount();
for (size_t i = 0; i < evolution_.size(); i++) { for (size_t i = 0; i < evolution_.size(); i++) {
for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) { for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) {
...@@ -344,8 +347,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) { ...@@ -344,8 +347,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
point.class_id = i; point.class_id = i;
ratio = pow(2.f, point.octave); ratio = pow(2.f, point.octave);
sigma_size_ = fRound(point.size / ratio); sigma_size_ = fRound(point.size / ratio);
point.pt.x = jx; point.pt.x = static_cast<float>(jx);
point.pt.y = ix; point.pt.y = static_cast<float>(ix);
// Compare response with the same and lower scale // Compare response with the same and lower scale
for (size_t ik = 0; ik < kpts_aux.size(); ik++) { for (size_t ik = 0; ik < kpts_aux.size(); ik++) {
...@@ -422,8 +425,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) { ...@@ -422,8 +425,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
kpts.push_back(point); kpts.push_back(point);
} }
t2 = cv::getTickCount(); //t2 = cv::getTickCount();
timing_.extrema = 1000.0*(t2 - t1) / cv::getTickFrequency(); //timing_.extrema = 1000.0*(t2 - t1) / cv::getTickFrequency();
} }
/* ************************************************************************* */ /* ************************************************************************* */
...@@ -433,7 +436,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) { ...@@ -433,7 +436,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
*/ */
void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) { void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
double t1 = 0.0, t2 = 0.0; //double t1 = 0.0, t2 = 0.0;
float Dx = 0.0, Dy = 0.0, ratio = 0.0; float Dx = 0.0, Dy = 0.0, ratio = 0.0;
float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0;
int x = 0, y = 0; int x = 0, y = 0;
...@@ -441,7 +444,7 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) { ...@@ -441,7 +444,7 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); cv::Mat b = cv::Mat::zeros(2, 1, CV_32F);
cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F);
t1 = cv::getTickCount(); //t1 = cv::getTickCount();
for (size_t i = 0; i < kpts.size(); i++) { for (size_t i = 0; i < kpts.size(); i++) {
ratio = pow(2.f, kpts[i].octave); ratio = pow(2.f, kpts[i].octave);
...@@ -449,23 +452,23 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) { ...@@ -449,23 +452,23 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
y = fRound(kpts[i].pt.y / ratio); y = fRound(kpts[i].pt.y / ratio);
// Compute the gradient // Compute the gradient
Dx = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x + 1) Dx = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x + 1)
- *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x - 1)); - *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x - 1));
Dy = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x) Dy = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x)
- *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x)); - *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x));
// Compute the Hessian // Compute the Hessian
Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x + 1) Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x + 1)
+ *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x - 1) + *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x - 1)
- 2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x))); - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x)));
Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x) Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x)
+ *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x) + *(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x)
- 2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x))); - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y)+x)));
Dxy = (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x + 1) Dxy = (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x + 1)
+ (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x - 1))) + (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x - 1)))
- (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x + 1) - (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y - 1) + x + 1)
+ (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x - 1))); + (*(evolution_[kpts[i].class_id].Ldet.ptr<float>(y + 1) + x - 1)));
// Solve the linear system // Solve the linear system
...@@ -477,15 +480,15 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) { ...@@ -477,15 +480,15 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
cv::solve(A, b, dst, DECOMP_LU); cv::solve(A, b, dst, DECOMP_LU);
if (fabs(*(dst.ptr<float>(0))) <= 1.0 && fabs(*(dst.ptr<float>(1))) <= 1.0) { if (fabs(*(dst.ptr<float>(0))) <= 1.0f && fabs(*(dst.ptr<float>(1))) <= 1.0f) {
kpts[i].pt.x = x + (*(dst.ptr<float>(0))); kpts[i].pt.x = x + (*(dst.ptr<float>(0)));
kpts[i].pt.y = y + (*(dst.ptr<float>(1))); kpts[i].pt.y = y + (*(dst.ptr<float>(1)));
kpts[i].pt.x *= powf(2.f, evolution_[kpts[i].class_id].octave); kpts[i].pt.x *= powf(2.f, (float)evolution_[kpts[i].class_id].octave);
kpts[i].pt.y *= powf(2.f, evolution_[kpts[i].class_id].octave); kpts[i].pt.y *= powf(2.f, (float)evolution_[kpts[i].class_id].octave);
kpts[i].angle = 0.0; kpts[i].angle = 0.0;
// In OpenCV the size of a keypoint its the diameter // In OpenCV the size of a keypoint its the diameter
kpts[i].size *= 2.0; kpts[i].size *= 2.0f;
} }
// Delete the point since its not stable // Delete the point since its not stable
else { else {
...@@ -494,8 +497,8 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) { ...@@ -494,8 +497,8 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
} }
} }
t2 = cv::getTickCount(); //t2 = cv::getTickCount();
timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency(); //timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency();
} }
/* ************************************************************************* */ /* ************************************************************************* */
...@@ -554,10 +557,10 @@ class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody ...@@ -554,10 +557,10 @@ class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody
{ {
public: public:
SURF_Descriptor_Upright_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options) SURF_Descriptor_Upright_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
{ {
} }
...@@ -565,27 +568,27 @@ public: ...@@ -565,27 +568,27 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
Get_SURF_Descriptor_Upright_64(keypoints_[i], descriptors_.ptr<float>(i)); Get_SURF_Descriptor_Upright_64((*keypoints_)[i], descriptors_->ptr<float>(i));
} }
} }
void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
}; };
class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody
{ {
public: public:
SURF_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options) SURF_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
{ {
} }
...@@ -593,28 +596,28 @@ public: ...@@ -593,28 +596,28 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_);
Get_SURF_Descriptor_64(keypoints_[i], descriptors_.ptr<float>(i)); Get_SURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr<float>(i));
} }
} }
void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
}; };
class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody
{ {
public: public:
MSURF_Upright_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options) MSURF_Upright_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
{ {
} }
...@@ -622,27 +625,27 @@ public: ...@@ -622,27 +625,27 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
Get_MSURF_Upright_Descriptor_64(keypoints_[i], descriptors_.ptr<float>(i)); Get_MSURF_Upright_Descriptor_64((*keypoints_)[i], descriptors_->ptr<float>(i));
} }
} }
void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
}; };
class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody
{ {
public: public:
MSURF_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options) MSURF_Descriptor_64_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
{ {
} }
...@@ -650,28 +653,28 @@ public: ...@@ -650,28 +653,28 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_);
Get_MSURF_Descriptor_64(keypoints_[i], descriptors_.ptr<float>(i)); Get_MSURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr<float>(i));
} }
} }
void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
}; };
class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody
{ {
public: public:
Upright_MLDB_Full_Descriptor_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options) Upright_MLDB_Full_Descriptor_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
{ {
} }
...@@ -679,17 +682,17 @@ public: ...@@ -679,17 +682,17 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
Get_Upright_MLDB_Full_Descriptor(keypoints_[i], descriptors_.ptr<unsigned char>(i)); Get_Upright_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr<unsigned char>(i));
} }
} }
void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
}; };
class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody
...@@ -701,10 +704,10 @@ public: ...@@ -701,10 +704,10 @@ public:
AKAZEOptions& options, AKAZEOptions& options,
cv::Mat descriptorSamples, cv::Mat descriptorSamples,
cv::Mat descriptorBits) cv::Mat descriptorBits)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
, descriptorSamples_(descriptorSamples) , descriptorSamples_(descriptorSamples)
, descriptorBits_(descriptorBits) , descriptorBits_(descriptorBits)
{ {
...@@ -714,17 +717,17 @@ public: ...@@ -714,17 +717,17 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
Get_Upright_MLDB_Descriptor_Subset(keypoints_[i], descriptors_.ptr<unsigned char>(i)); Get_Upright_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr<unsigned char>(i));
} }
} }
void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.
cv::Mat descriptorBits_; cv::Mat descriptorBits_;
...@@ -734,10 +737,10 @@ class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody ...@@ -734,10 +737,10 @@ class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody
{ {
public: public:
MLDB_Full_Descriptor_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options) MLDB_Full_Descriptor_Invoker(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc, std::vector<TEvolution>& evolution, AKAZEOptions& options)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
{ {
} }
...@@ -745,18 +748,18 @@ public: ...@@ -745,18 +748,18 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_);
Get_MLDB_Full_Descriptor(keypoints_[i], descriptors_.ptr<unsigned char>(i)); Get_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr<unsigned char>(i));
} }
} }
void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
}; };
class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody
...@@ -768,10 +771,10 @@ public: ...@@ -768,10 +771,10 @@ public:
AKAZEOptions& options, AKAZEOptions& options,
cv::Mat descriptorSamples, cv::Mat descriptorSamples,
cv::Mat descriptorBits) cv::Mat descriptorBits)
: keypoints_(kpts) : keypoints_(&kpts)
, descriptors_(desc) , descriptors_(&desc)
, evolution_(evolution) , evolution_(&evolution)
, options_(options) , options_(&options)
, descriptorSamples_(descriptorSamples) , descriptorSamples_(descriptorSamples)
, descriptorBits_(descriptorBits) , descriptorBits_(descriptorBits)
{ {
...@@ -781,18 +784,18 @@ public: ...@@ -781,18 +784,18 @@ public:
{ {
for (int i = range.start; i < range.end; i++) for (int i = range.start; i < range.end; i++)
{ {
AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_);
Get_MLDB_Descriptor_Subset(keypoints_[i], descriptors_.ptr<unsigned char>(i)); Get_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr<unsigned char>(i));
} }
} }
void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const;
private: private:
std::vector<cv::KeyPoint>& keypoints_; std::vector<cv::KeyPoint>* keypoints_;
cv::Mat& descriptors_; cv::Mat* descriptors_;
std::vector<TEvolution>& evolution_; std::vector<TEvolution>* evolution_;
AKAZEOptions& options_; AKAZEOptions* options_;
cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.
cv::Mat descriptorBits_; cv::Mat descriptorBits_;
...@@ -805,9 +808,9 @@ private: ...@@ -805,9 +808,9 @@ private:
*/ */
void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc) { void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc) {
double t1 = 0.0, t2 = 0.0; //double t1 = 0.0, t2 = 0.0;
t1 = cv::getTickCount(); //t1 = cv::getTickCount();
// Allocate memory for the matrix with the descriptors // Allocate memory for the matrix with the descriptors
if (options_.descriptor < MLDB_UPRIGHT) { if (options_.descriptor < MLDB_UPRIGHT) {
...@@ -817,11 +820,11 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat ...@@ -817,11 +820,11 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
// We use the full length binary descriptor -> 486 bits // We use the full length binary descriptor -> 486 bits
if (options_.descriptor_size == 0) { if (options_.descriptor_size == 0) {
int t = (6 + 36 + 120)*options_.descriptor_channels; int t = (6 + 36 + 120)*options_.descriptor_channels;
desc = cv::Mat::zeros(kpts.size(), ceil(t / 8.), CV_8UC1); desc = cv::Mat::zeros(kpts.size(), (int)ceil(t / 8.), CV_8UC1);
} }
else { else {
// We use the random bit selection length binary descriptor // We use the random bit selection length binary descriptor
desc = cv::Mat::zeros(kpts.size(), ceil(options_.descriptor_size / 8.), CV_8UC1); desc = cv::Mat::zeros(kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1);
} }
} }
...@@ -898,8 +901,8 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat ...@@ -898,8 +901,8 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
break; break;
} }
t2 = cv::getTickCount(); //t2 = cv::getTickCount();
timing_.descriptor = 1000.0*(t2 - t1) / cv::getTickFrequency(); //timing_.descriptor = 1000.0*(t2 - t1) / cv::getTickFrequency();
} }
/* ************************************************************************* */ /* ************************************************************************* */
...@@ -922,7 +925,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto ...@@ -922,7 +925,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto
// Get the information from the keypoint // Get the information from the keypoint
level = kpt.class_id; level = kpt.class_id;
ratio = (float)(1 << evolution_[level].octave); ratio = (float)(1 << evolution_[level].octave);
s = fRound(0.5*kpt.size / ratio); s = fRound(0.5f*kpt.size / ratio);
xf = kpt.pt.x / ratio; xf = kpt.pt.x / ratio;
yf = kpt.pt.y / ratio; yf = kpt.pt.y / ratio;
...@@ -944,8 +947,8 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto ...@@ -944,8 +947,8 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto
} }
// Loop slides pi/3 window around feature point // Loop slides pi/3 window around feature point
for (ang1 = 0; ang1 < 2.0*CV_PI; ang1 += 0.15f) { for (ang1 = 0; ang1 < (float)(2.0 * CV_PI); ang1 += 0.15f) {
ang2 = (ang1 + CV_PI / 3.0f > 2.0*CV_PI ? ang1 - 5.0f*CV_PI / 3.0f : ang1 + CV_PI / 3.0f); ang2 = (ang1 + (float)(CV_PI / 3.0) > (float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0));
sumX = sumY = 0.f; sumX = sumY = 0.f;
for (size_t k = 0; k < Ang.size(); ++k) { for (size_t k = 0; k < Ang.size(); ++k) {
...@@ -958,7 +961,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto ...@@ -958,7 +961,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto
sumY += resY[k]; sumY += resY[k];
} }
else if (ang2 < ang1 && else if (ang2 < ang1 &&
((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0f*CV_PI))) {
sumX += resX[k]; sumX += resX[k];
sumY += resY[k]; sumY += resY[k];
} }
...@@ -991,6 +994,8 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv ...@@ -991,6 +994,8 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv
int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
int scale = 0, dsize = 0, level = 0; int scale = 0, dsize = 0, level = 0;
const std::vector<TEvolution>& evolution = *evolution_;
// Set the descriptor size and the sample and pattern sizes // Set the descriptor size and the sample and pattern sizes
dsize = 64; dsize = 64;
sample_step = 5; sample_step = 5;
...@@ -998,7 +1003,7 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv ...@@ -998,7 +1003,7 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv
// Get the information from the keypoint // Get the information from the keypoint
ratio = (float)(1 << kpt.octave); ratio = (float)(1 << kpt.octave);
scale = fRound(0.5*kpt.size / ratio); scale = fRound(0.5f*kpt.size / ratio);
level = kpt.class_id; level = kpt.class_id;
yf = kpt.pt.y / ratio; yf = kpt.pt.y / ratio;
xf = kpt.pt.x / ratio; xf = kpt.pt.x / ratio;
...@@ -1014,26 +1019,26 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv ...@@ -1014,26 +1019,26 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv
sample_y = yf + l*scale; sample_y = yf + l*scale;
sample_x = xf + k*scale; sample_x = xf + k*scale;
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
fx = sample_x - x1; fx = sample_x - x1;
fy = sample_y - y1; fy = sample_y - y1;
res1 = *(evolution_[level].Lx.ptr<float>(y1)+x1); res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Sum the derivatives to the cumulative descriptor // Sum the derivatives to the cumulative descriptor
dx += rx; dx += rx;
...@@ -1086,9 +1091,11 @@ void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, ...@@ -1086,9 +1091,11 @@ void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt,
sample_step = 5; sample_step = 5;
pattern_size = 10; pattern_size = 10;
const std::vector<TEvolution>& evolution = *evolution_;
// Get the information from the keypoint // Get the information from the keypoint
ratio = (float)(1 << kpt.octave); ratio = (float)(1 << kpt.octave);
scale = fRound(0.5*kpt.size / ratio); scale = fRound(0.5f*kpt.size / ratio);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
yf = kpt.pt.y / ratio; yf = kpt.pt.y / ratio;
...@@ -1107,26 +1114,26 @@ void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, ...@@ -1107,26 +1114,26 @@ void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt,
sample_y = yf + (l*scale*co + k*scale*si); sample_y = yf + (l*scale*co + k*scale*si);
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
fx = sample_x - x1; fx = sample_x - x1;
fy = sample_y - y1; fy = sample_y - y1;
res1 = *(evolution_[level].Lx.ptr<float>(y1)+x1); res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = rx*co + ry*si; rry = rx*co + ry*si;
...@@ -1180,7 +1187,9 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const ...@@ -1180,7 +1187,9 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const
int scale = 0, dsize = 0, level = 0; int scale = 0, dsize = 0, level = 0;
// Subregion centers for the 4x4 gaussian weighting // Subregion centers for the 4x4 gaussian weighting
float cx = -0.5, cy = 0.5; float cx = -0.5f, cy = 0.5f;
const std::vector<TEvolution>& evolution = *evolution_;
// Set the descriptor size and the sample and pattern sizes // Set the descriptor size and the sample and pattern sizes
dsize = 64; dsize = 64;
...@@ -1189,7 +1198,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const ...@@ -1189,7 +1198,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const
// Get the information from the keypoint // Get the information from the keypoint
ratio = (float)(1 << kpt.octave); ratio = (float)(1 << kpt.octave);
scale = fRound(0.5*kpt.size / ratio); scale = fRound(0.5f*kpt.size / ratio);
level = kpt.class_id; level = kpt.class_id;
yf = kpt.pt.y / ratio; yf = kpt.pt.y / ratio;
xf = kpt.pt.x / ratio; xf = kpt.pt.x / ratio;
...@@ -1202,12 +1211,12 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const ...@@ -1202,12 +1211,12 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const
j = -8; j = -8;
i = i - 4; i = i - 4;
cx += 1.0; cx += 1.0f;
cy = -0.5; cy = -0.5f;
while (j < pattern_size) { while (j < pattern_size) {
dx = dy = mdx = mdy = 0.0; dx = dy = mdx = mdy = 0.0;
cy += 1.0; cy += 1.0f;
j = j - 4; j = j - 4;
ky = i + sample_step; ky = i + sample_step;
...@@ -1222,7 +1231,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const ...@@ -1222,7 +1231,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const
sample_x = l*scale + xf; sample_x = l*scale + xf;
//Get the gaussian weighted x and y responses //Get the gaussian weighted x and y responses
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50*scale); gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50f*scale);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - .5);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - .5);
...@@ -1233,17 +1242,17 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const ...@@ -1233,17 +1242,17 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const
fx = sample_x - x1; fx = sample_x - x1;
fy = sample_y - y1; fy = sample_y - y1;
res1 = *(evolution_[level].Lx.ptr<float>(y1)+x1); res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
rx = gauss_s1*rx; rx = gauss_s1*rx;
ry = gauss_s1*ry; ry = gauss_s1*ry;
...@@ -1301,7 +1310,9 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp ...@@ -1301,7 +1310,9 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp
int scale = 0, dsize = 0, level = 0; int scale = 0, dsize = 0, level = 0;
// Subregion centers for the 4x4 gaussian weighting // Subregion centers for the 4x4 gaussian weighting
float cx = -0.5, cy = 0.5; float cx = -0.5f, cy = 0.5f;
const std::vector<TEvolution>& evolution = *evolution_;
// Set the descriptor size and the sample and pattern sizes // Set the descriptor size and the sample and pattern sizes
dsize = 64; dsize = 64;
...@@ -1310,7 +1321,7 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp ...@@ -1310,7 +1321,7 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp
// Get the information from the keypoint // Get the information from the keypoint
ratio = (float)(1 << kpt.octave); ratio = (float)(1 << kpt.octave);
scale = fRound(0.5*kpt.size / ratio); scale = fRound(0.5f*kpt.size / ratio);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
yf = kpt.pt.y / ratio; yf = kpt.pt.y / ratio;
...@@ -1326,12 +1337,12 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp ...@@ -1326,12 +1337,12 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp
j = -8; j = -8;
i = i - 4; i = i - 4;
cx += 1.0; cx += 1.0f;
cy = -0.5; cy = -0.5f;
while (j < pattern_size) { while (j < pattern_size) {
dx = dy = mdx = mdy = 0.0; dx = dy = mdx = mdy = 0.0;
cy += 1.0; cy += 1.0f;
j = j - 4; j = j - 4;
ky = i + sample_step; ky = i + sample_step;
...@@ -1347,28 +1358,28 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp ...@@ -1347,28 +1358,28 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
// Get the gaussian weighted x and y responses // Get the gaussian weighted x and y responses
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
y1 = fRound(sample_y - .5); y1 = fRound(sample_y - 0.5f);
x1 = fRound(sample_x - .5); x1 = fRound(sample_x - 0.5f);
y2 = fRound(sample_y + .5); y2 = fRound(sample_y + 0.5f);
x2 = fRound(sample_x + .5); x2 = fRound(sample_x + 0.5f);
fx = sample_x - x1; fx = sample_x - x1;
fy = sample_y - y1; fy = sample_y - y1;
res1 = *(evolution_[level].Lx.ptr<float>(y1)+x1); res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = gauss_s1*(rx*co + ry*si); rry = gauss_s1*(rx*co + ry*si);
...@@ -1421,20 +1432,23 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons ...@@ -1421,20 +1432,23 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons
int level = 0, nsamples = 0, scale = 0; int level = 0, nsamples = 0, scale = 0;
int dcount1 = 0, dcount2 = 0; int dcount1 = 0, dcount2 = 0;
const AKAZEOptions & options = *options_;
const std::vector<TEvolution>& evolution = *evolution_;
// Matrices for the M-LDB descriptor // Matrices for the M-LDB descriptor
cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1);
cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1);
cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1);
// Get the information from the keypoint // Get the information from the keypoint
ratio = (float)(1 << kpt.octave); ratio = (float)(1 << kpt.octave);
scale = fRound(0.5*kpt.size / ratio); scale = fRound(0.5f*kpt.size / ratio);
level = kpt.class_id; level = kpt.class_id;
yf = kpt.pt.y / ratio; yf = kpt.pt.y / ratio;
xf = kpt.pt.x / ratio; xf = kpt.pt.x / ratio;
// First 2x2 grid // First 2x2 grid
pattern_size = options_.descriptor_pattern_size; pattern_size = options_->descriptor_pattern_size;
sample_step = pattern_size; sample_step = pattern_size;
for (int i = -pattern_size; i < pattern_size; i += sample_step) { for (int i = -pattern_size; i < pattern_size; i += sample_step) {
...@@ -1452,9 +1466,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons ...@@ -1452,9 +1466,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
ri = *(evolution_[level].Lt.ptr<float>(y1)+x1); ri = *(evolution[level].Lt.ptr<float>(y1)+x1);
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
di += ri; di += ri;
dx += rx; dx += rx;
...@@ -1495,7 +1509,7 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons ...@@ -1495,7 +1509,7 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons
} }
// Second 3x3 grid // Second 3x3 grid
sample_step = ceil(pattern_size*2. / 3.); sample_step = static_cast<int>(ceil(pattern_size*2. / 3.));
dcount2 = 0; dcount2 = 0;
for (int i = -pattern_size; i < pattern_size; i += sample_step) { for (int i = -pattern_size; i < pattern_size; i += sample_step) {
...@@ -1513,9 +1527,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons ...@@ -1513,9 +1527,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
ri = *(evolution_[level].Lt.ptr<float>(y1)+x1); ri = *(evolution[level].Lt.ptr<float>(y1)+x1);
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
di += ri; di += ri;
dx += rx; dx += rx;
...@@ -1575,9 +1589,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons ...@@ -1575,9 +1589,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
ri = *(evolution_[level].Lt.ptr<float>(y1)+x1); ri = *(evolution[level].Lt.ptr<float>(y1)+x1);
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
di += ri; di += ri;
dx += rx; dx += rx;
...@@ -1635,14 +1649,17 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1635,14 +1649,17 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
int level = 0, nsamples = 0, scale = 0; int level = 0, nsamples = 0, scale = 0;
int dcount1 = 0, dcount2 = 0; int dcount1 = 0, dcount2 = 0;
const AKAZEOptions & options = *options_;
const std::vector<TEvolution>& evolution = *evolution_;
// Matrices for the M-LDB descriptor // Matrices for the M-LDB descriptor
cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1);
cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1);
cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1);
// Get the information from the keypoint // Get the information from the keypoint
ratio = (float)(1 << kpt.octave); ratio = (float)(1 << kpt.octave);
scale = fRound(0.5*kpt.size / ratio); scale = fRound(0.5f*kpt.size / ratio);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
yf = kpt.pt.y / ratio; yf = kpt.pt.y / ratio;
...@@ -1651,7 +1668,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1651,7 +1668,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
si = sin(angle); si = sin(angle);
// First 2x2 grid // First 2x2 grid
pattern_size = options_.descriptor_pattern_size; pattern_size = options.descriptor_pattern_size;
sample_step = pattern_size; sample_step = pattern_size;
for (int i = -pattern_size; i < pattern_size; i += sample_step) { for (int i = -pattern_size; i < pattern_size; i += sample_step) {
...@@ -1660,8 +1677,8 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1660,8 +1677,8 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
di = dx = dy = 0.0; di = dx = dy = 0.0;
nsamples = 0; nsamples = 0;
for (float k = i; k < i + sample_step; k++) { for (float k = (float)i; k < i + sample_step; k++) {
for (float l = j; l < j + sample_step; l++) { for (float l = (float)j; l < j + sample_step; l++) {
// Get the coordinates of the sample point // Get the coordinates of the sample point
sample_y = yf + (l*scale*co + k*scale*si); sample_y = yf + (l*scale*co + k*scale*si);
...@@ -1670,16 +1687,16 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1670,16 +1687,16 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
ri = *(evolution_[level].Lt.ptr<float>(y1)+x1); ri = *(evolution[level].Lt.ptr<float>(y1)+x1);
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
di += ri; di += ri;
if (options_.descriptor_channels == 2) { if (options.descriptor_channels == 2) {
dx += sqrtf(rx*rx + ry*ry); dx += sqrtf(rx*rx + ry*ry);
} }
else if (options_.descriptor_channels == 3) { else if (options.descriptor_channels == 3) {
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = rx*co + ry*si; rry = rx*co + ry*si;
rrx = -rx*si + ry*co; rrx = -rx*si + ry*co;
...@@ -1696,11 +1713,11 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1696,11 +1713,11 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
dy /= nsamples; dy /= nsamples;
*(values_1.ptr<float>(dcount2)) = di; *(values_1.ptr<float>(dcount2)) = di;
if (options_.descriptor_channels > 1) { if (options.descriptor_channels > 1) {
*(values_1.ptr<float>(dcount2)+1) = dx; *(values_1.ptr<float>(dcount2)+1) = dx;
} }
if (options_.descriptor_channels > 2) { if (options.descriptor_channels > 2) {
*(values_1.ptr<float>(dcount2)+2) = dy; *(values_1.ptr<float>(dcount2)+2) = dy;
} }
...@@ -1718,7 +1735,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1718,7 +1735,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
} }
} }
if (options_.descriptor_channels > 1) { if (options.descriptor_channels > 1) {
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
for (int j = i + 1; j < 4; j++) { for (int j = i + 1; j < 4; j++) {
if (*(values_1.ptr<float>(i)+1) > *(values_1.ptr<float>(j)+1)) { if (*(values_1.ptr<float>(i)+1) > *(values_1.ptr<float>(j)+1)) {
...@@ -1730,7 +1747,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1730,7 +1747,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
} }
} }
if (options_.descriptor_channels > 2) { if (options.descriptor_channels > 2) {
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
for (int j = i + 1; j < 4; j++) { for (int j = i + 1; j < 4; j++) {
if (*(values_1.ptr<float>(i)+2) > *(values_1.ptr<float>(j)+2)) { if (*(values_1.ptr<float>(i)+2) > *(values_1.ptr<float>(j)+2)) {
...@@ -1742,7 +1759,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1742,7 +1759,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
} }
// Second 3x3 grid // Second 3x3 grid
sample_step = ceil(pattern_size*2. / 3.); sample_step = static_cast<int>(ceil(pattern_size*2. / 3.));
dcount2 = 0; dcount2 = 0;
for (int i = -pattern_size; i < pattern_size; i += sample_step) { for (int i = -pattern_size; i < pattern_size; i += sample_step) {
...@@ -1761,15 +1778,15 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1761,15 +1778,15 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
ri = *(evolution_[level].Lt.ptr<float>(y1)+x1); ri = *(evolution[level].Lt.ptr<float>(y1)+x1);
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
di += ri; di += ri;
if (options_.descriptor_channels == 2) { if (options.descriptor_channels == 2) {
dx += sqrtf(rx*rx + ry*ry); dx += sqrtf(rx*rx + ry*ry);
} }
else if (options_.descriptor_channels == 3) { else if (options.descriptor_channels == 3) {
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = rx*co + ry*si; rry = rx*co + ry*si;
rrx = -rx*si + ry*co; rrx = -rx*si + ry*co;
...@@ -1786,11 +1803,11 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1786,11 +1803,11 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
dy /= nsamples; dy /= nsamples;
*(values_2.ptr<float>(dcount2)) = di; *(values_2.ptr<float>(dcount2)) = di;
if (options_.descriptor_channels > 1) { if (options.descriptor_channels > 1) {
*(values_2.ptr<float>(dcount2)+1) = dx; *(values_2.ptr<float>(dcount2)+1) = dx;
} }
if (options_.descriptor_channels > 2) { if (options.descriptor_channels > 2) {
*(values_2.ptr<float>(dcount2)+2) = dy; *(values_2.ptr<float>(dcount2)+2) = dy;
} }
...@@ -1808,7 +1825,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1808,7 +1825,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
} }
} }
if (options_.descriptor_channels > 1) { if (options.descriptor_channels > 1) {
for (int i = 0; i < 9; i++) { for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) { for (int j = i + 1; j < 9; j++) {
if (*(values_2.ptr<float>(i)+1) > *(values_2.ptr<float>(j)+1)) { if (*(values_2.ptr<float>(i)+1) > *(values_2.ptr<float>(j)+1)) {
...@@ -1819,7 +1836,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1819,7 +1836,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
} }
} }
if (options_.descriptor_channels > 2) { if (options.descriptor_channels > 2) {
for (int i = 0; i < 9; i++) { for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) { for (int j = i + 1; j < 9; j++) {
if (*(values_2.ptr<float>(i)+2) > *(values_2.ptr<float>(j)+2)) { if (*(values_2.ptr<float>(i)+2) > *(values_2.ptr<float>(j)+2)) {
...@@ -1849,15 +1866,15 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1849,15 +1866,15 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
ri = *(evolution_[level].Lt.ptr<float>(y1)+x1); ri = *(evolution[level].Lt.ptr<float>(y1)+x1);
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
di += ri; di += ri;
if (options_.descriptor_channels == 2) { if (options.descriptor_channels == 2) {
dx += sqrtf(rx*rx + ry*ry); dx += sqrtf(rx*rx + ry*ry);
} }
else if (options_.descriptor_channels == 3) { else if (options.descriptor_channels == 3) {
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = rx*co + ry*si; rry = rx*co + ry*si;
rrx = -rx*si + ry*co; rrx = -rx*si + ry*co;
...@@ -1874,10 +1891,10 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1874,10 +1891,10 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
dy /= nsamples; dy /= nsamples;
*(values_3.ptr<float>(dcount2)) = di; *(values_3.ptr<float>(dcount2)) = di;
if (options_.descriptor_channels > 1) if (options.descriptor_channels > 1)
*(values_3.ptr<float>(dcount2)+1) = dx; *(values_3.ptr<float>(dcount2)+1) = dx;
if (options_.descriptor_channels > 2) if (options.descriptor_channels > 2)
*(values_3.ptr<float>(dcount2)+2) = dy; *(values_3.ptr<float>(dcount2)+2) = dy;
dcount2++; dcount2++;
...@@ -1894,7 +1911,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1894,7 +1911,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
} }
} }
if (options_.descriptor_channels > 1) { if (options.descriptor_channels > 1) {
for (int i = 0; i < 16; i++) { for (int i = 0; i < 16; i++) {
for (int j = i + 1; j < 16; j++) { for (int j = i + 1; j < 16; j++) {
if (*(values_3.ptr<float>(i)+1) > *(values_3.ptr<float>(j)+1)) { if (*(values_3.ptr<float>(i)+1) > *(values_3.ptr<float>(j)+1)) {
...@@ -1905,7 +1922,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& ...@@ -1905,7 +1922,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint&
} }
} }
if (options_.descriptor_channels > 2) { if (options.descriptor_channels > 2) {
for (int i = 0; i < 16; i++) { for (int i = 0; i < 16; i++) {
for (int j = i + 1; j < 16; j++) { for (int j = i + 1; j < 16; j++) {
if (*(values_3.ptr<float>(i)+2) > *(values_3.ptr<float>(j)+2)) { if (*(values_3.ptr<float>(i)+2) > *(values_3.ptr<float>(j)+2)) {
...@@ -1932,24 +1949,27 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi ...@@ -1932,24 +1949,27 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi
float sample_x = 0.f, sample_y = 0.f; float sample_x = 0.f, sample_y = 0.f;
int x1 = 0, y1 = 0; int x1 = 0, y1 = 0;
const AKAZEOptions & options = *options_;
const std::vector<TEvolution>& evolution = *evolution_;
// Get the information from the keypoint // Get the information from the keypoint
float ratio = (float)(1 << kpt.octave); float ratio = (float)(1 << kpt.octave);
int scale = fRound(0.5*kpt.size / ratio); int scale = fRound(0.5f*kpt.size / ratio);
float angle = kpt.angle; float angle = kpt.angle;
float level = kpt.class_id; int level = kpt.class_id;
float yf = kpt.pt.y / ratio; float yf = kpt.pt.y / ratio;
float xf = kpt.pt.x / ratio; float xf = kpt.pt.x / ratio;
float co = cos(angle); float co = cos(angle);
float si = sin(angle); float si = sin(angle);
// Allocate memory for the matrix of values // Allocate memory for the matrix of values
cv::Mat values = cv::Mat_<float>::zeros((4 + 9 + 16)*options_.descriptor_channels, 1); cv::Mat values = cv::Mat_<float>::zeros((4 + 9 + 16)*options.descriptor_channels, 1);
// Sample everything, but only do the comparisons // Sample everything, but only do the comparisons
vector<int> steps(3); vector<int> steps(3);
steps.at(0) = options_.descriptor_pattern_size; steps.at(0) = options.descriptor_pattern_size;
steps.at(1) = ceil(2.f*options_.descriptor_pattern_size / 3.f); steps.at(1) = (int)ceil(2.f*options.descriptor_pattern_size / 3.f);
steps.at(2) = options_.descriptor_pattern_size / 2; steps.at(2) = options.descriptor_pattern_size / 2;
for (int i = 0; i < descriptorSamples_.rows; i++) { for (int i = 0; i < descriptorSamples_.rows; i++) {
const int *coords = descriptorSamples_.ptr<int>(i); const int *coords = descriptorSamples_.ptr<int>(i);
...@@ -1968,16 +1988,16 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi ...@@ -1968,16 +1988,16 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
di += *(evolution_[level].Lt.ptr<float>(y1)+x1); di += *(evolution[level].Lt.ptr<float>(y1)+x1);
if (options_.descriptor_channels > 1) { if (options.descriptor_channels > 1) {
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
if (options_.descriptor_channels == 2) { if (options.descriptor_channels == 2) {
dx += sqrtf(rx*rx + ry*ry); dx += sqrtf(rx*rx + ry*ry);
} }
else if (options_.descriptor_channels == 3) { else if (options.descriptor_channels == 3) {
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
dx += rx*co + ry*si; dx += rx*co + ry*si;
dy += -rx*si + ry*co; dy += -rx*si + ry*co;
...@@ -1986,14 +2006,14 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi ...@@ -1986,14 +2006,14 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi
} }
} }
*(values.ptr<float>(options_.descriptor_channels*i)) = di; *(values.ptr<float>(options.descriptor_channels*i)) = di;
if (options_.descriptor_channels == 2) { if (options.descriptor_channels == 2) {
*(values.ptr<float>(options_.descriptor_channels*i + 1)) = dx; *(values.ptr<float>(options.descriptor_channels*i + 1)) = dx;
} }
else if (options_.descriptor_channels == 3) { else if (options.descriptor_channels == 3) {
*(values.ptr<float>(options_.descriptor_channels*i + 1)) = dx; *(values.ptr<float>(options.descriptor_channels*i + 1)) = dx;
*(values.ptr<float>(options_.descriptor_channels*i + 2)) = dy; *(values.ptr<float>(options.descriptor_channels*i + 2)) = dy;
} }
} }
...@@ -2023,20 +2043,23 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( ...@@ -2023,20 +2043,23 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(
float sample_x = 0.0f, sample_y = 0.0f; float sample_x = 0.0f, sample_y = 0.0f;
int x1 = 0, y1 = 0; int x1 = 0, y1 = 0;
const AKAZEOptions & options = *options_;
const std::vector<TEvolution>& evolution = *evolution_;
// Get the information from the keypoint // Get the information from the keypoint
float ratio = (float)(1 << kpt.octave); float ratio = (float)(1 << kpt.octave);
int scale = fRound(0.5*kpt.size / ratio); int scale = fRound(0.5f*kpt.size / ratio);
float level = kpt.class_id; int level = kpt.class_id;
float yf = kpt.pt.y / ratio; float yf = kpt.pt.y / ratio;
float xf = kpt.pt.x / ratio; float xf = kpt.pt.x / ratio;
// Allocate memory for the matrix of values // Allocate memory for the matrix of values
Mat values = cv::Mat_<float>::zeros((4 + 9 + 16)*options_.descriptor_channels, 1); Mat values = cv::Mat_<float>::zeros((4 + 9 + 16)*options.descriptor_channels, 1);
vector<int> steps(3); vector<int> steps(3);
steps.at(0) = options_.descriptor_pattern_size; steps.at(0) = options.descriptor_pattern_size;
steps.at(1) = ceil(2.f*options_.descriptor_pattern_size / 3.f); steps.at(1) = static_cast<int>(ceil(2.f*options.descriptor_pattern_size / 3.f));
steps.at(2) = options_.descriptor_pattern_size / 2; steps.at(2) = options.descriptor_pattern_size / 2;
for (int i = 0; i < descriptorSamples_.rows; i++) { for (int i = 0; i < descriptorSamples_.rows; i++) {
const int *coords = descriptorSamples_.ptr<int>(i); const int *coords = descriptorSamples_.ptr<int>(i);
...@@ -2052,16 +2075,16 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( ...@@ -2052,16 +2075,16 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(
y1 = fRound(sample_y); y1 = fRound(sample_y);
x1 = fRound(sample_x); x1 = fRound(sample_x);
di += *(evolution_[level].Lt.ptr<float>(y1)+x1); di += *(evolution[level].Lt.ptr<float>(y1)+x1);
if (options_.descriptor_channels > 1) { if (options.descriptor_channels > 1) {
rx = *(evolution_[level].Lx.ptr<float>(y1)+x1); rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
ry = *(evolution_[level].Ly.ptr<float>(y1)+x1); ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
if (options_.descriptor_channels == 2) { if (options.descriptor_channels == 2) {
dx += sqrtf(rx*rx + ry*ry); dx += sqrtf(rx*rx + ry*ry);
} }
else if (options_.descriptor_channels == 3) { else if (options.descriptor_channels == 3) {
dx += rx; dx += rx;
dy += ry; dy += ry;
} }
...@@ -2069,14 +2092,14 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( ...@@ -2069,14 +2092,14 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(
} }
} }
*(values.ptr<float>(options_.descriptor_channels*i)) = di; *(values.ptr<float>(options.descriptor_channels*i)) = di;
if (options_.descriptor_channels == 2) { if (options.descriptor_channels == 2) {
*(values.ptr<float>(options_.descriptor_channels*i + 1)) = dx; *(values.ptr<float>(options.descriptor_channels*i + 1)) = dx;
} }
else if (options_.descriptor_channels == 3) { else if (options.descriptor_channels == 3) {
*(values.ptr<float>(options_.descriptor_channels*i + 1)) = dx; *(values.ptr<float>(options.descriptor_channels*i + 1)) = dx;
*(values.ptr<float>(options_.descriptor_channels*i + 2)) = dy; *(values.ptr<float>(options.descriptor_channels*i + 2)) = dy;
} }
} }
...@@ -2097,15 +2120,15 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( ...@@ -2097,15 +2120,15 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(
/** /**
* @brief This method displays the computation times * @brief This method displays the computation times
*/ */
void AKAZEFeatures::Show_Computation_Times() const { //void AKAZEFeatures::Show_Computation_Times() const {
cout << "(*) Time Scale Space: " << timing_.scale << endl; // cout << "(*) Time Scale Space: " << timing_.scale << endl;
cout << "(*) Time Detector: " << timing_.detector << endl; // cout << "(*) Time Detector: " << timing_.detector << endl;
cout << " - Time Derivatives: " << timing_.derivatives << endl; // cout << " - Time Derivatives: " << timing_.derivatives << endl;
cout << " - Time Extrema: " << timing_.extrema << endl; // cout << " - Time Extrema: " << timing_.extrema << endl;
cout << " - Time Subpixel: " << timing_.subpixel << endl; // cout << " - Time Subpixel: " << timing_.subpixel << endl;
cout << "(*) Time Descriptor: " << timing_.descriptor << endl; // cout << "(*) Time Descriptor: " << timing_.descriptor << endl;
cout << endl; // cout << endl;
} //}
/* ************************************************************************* */ /* ************************************************************************* */
/** /**
...@@ -2142,7 +2165,7 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int ...@@ -2142,7 +2165,7 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int
for (size_t i = 0, c = 0; i < 3; i++) { for (size_t i = 0, c = 0; i < 3; i++) {
int gdiv = i + 2; //grid divisions, per row int gdiv = i + 2; //grid divisions, per row
int gsz = gdiv*gdiv; int gsz = gdiv*gdiv;
int psz = ceil(2.*pattern_size / (float)gdiv); int psz = (int)ceil(2.f*pattern_size / (float)gdiv);
for (int j = 0; j < gsz; j++) { for (int j = 0; j < gsz; j++) {
for (int k = j + 1; k < gsz; k++, c++) { for (int k = j + 1; k < gsz; k++, c++) {
...@@ -2156,12 +2179,12 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int ...@@ -2156,12 +2179,12 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int
} }
srand(1024); srand(1024);
Mat_<int> comps = Mat_<int>(nchannels*ceil(nbits / (float)nchannels), 2); Mat_<int> comps = Mat_<int>(nchannels * (int)ceil(nbits / (float)nchannels), 2);
comps = 1000; comps = 1000;
// Select some samples. A sample includes all channels // Select some samples. A sample includes all channels
int count = 0; int count = 0;
size_t npicks = ceil(nbits / (float)nchannels); size_t npicks = (size_t)ceil(nbits / (float)nchannels);
Mat_<int> samples(29, 3); Mat_<int> samples(29, 3);
Mat_<int> fullcopy = fullM.clone(); Mat_<int> fullcopy = fullM.clone();
samples = -1; samples = -1;
...@@ -2235,15 +2258,15 @@ inline float get_angle(float x, float y) { ...@@ -2235,15 +2258,15 @@ inline float get_angle(float x, float y) {
} }
if (x < 0 && y >= 0) { if (x < 0 && y >= 0) {
return CV_PI - atanf(-y / x); return static_cast<float>(CV_PI) - atanf(-y / x);
} }
if (x < 0 && y < 0) { if (x < 0 && y < 0) {
return CV_PI + atanf(y / x); return static_cast<float>(CV_PI) + atanf(y / x);
} }
if (x >= 0 && y < 0) { if (x >= 0 && y < 0) {
return 2.0*CV_PI - atanf(-y / x); return static_cast<float>(2.0 * CV_PI) - atanf(-y / x);
} }
return 0; return 0;
......
...@@ -33,9 +33,6 @@ private: ...@@ -33,9 +33,6 @@ private:
cv::Mat descriptorBits_; cv::Mat descriptorBits_;
cv::Mat bitMask_; cv::Mat bitMask_;
/// Computation times variables in ms
AKAZETiming timing_;
public: public:
/// Constructor with input arguments /// Constructor with input arguments
...@@ -74,14 +71,14 @@ public: ...@@ -74,14 +71,14 @@ public:
//void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); //void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc);
// Methods for saving some results and showing computation times // Methods for saving some results and showing computation times
void Save_Scale_Space(); //void Save_Scale_Space();
void Save_Detector_Responses(); //void Save_Detector_Responses();
void Show_Computation_Times() const; //void Show_Computation_Times() const;
/// Return the computation times /// Return the computation times
AKAZETiming Get_Computation_Times() const { //AKAZETiming Get_Computation_Times() const {
return timing_; // return timing_;
} //}
}; };
/* ************************************************************************* */ /* ************************************************************************* */
......
...@@ -54,12 +54,12 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { ...@@ -54,12 +54,12 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) {
kcontrast_ = DEFAULT_KCONTRAST; kcontrast_ = DEFAULT_KCONTRAST;
ncycles_ = 0; ncycles_ = 0;
reordering_ = true; reordering_ = true;
tkcontrast_ = 0.0; //tkcontrast_ = 0.0;
tnlscale_ = 0.0; //tnlscale_ = 0.0;
tdetector_ = 0.0; //tdetector_ = 0.0;
tmderivatives_ = 0.0; //tmderivatives_ = 0.0;
tdresponse_ = 0.0; //tdresponse_ = 0.0;
tdescriptor_ = 0.0; //tdescriptor_ = 0.0;
// Now allocate memory for the evolution // Now allocate memory for the evolution
Allocate_Memory_Evolution(); Allocate_Memory_Evolution();
...@@ -99,11 +99,11 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { ...@@ -99,11 +99,11 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) {
aux.Lsmooth = cv::Mat::zeros(img_height_, img_width_, CV_32F); aux.Lsmooth = cv::Mat::zeros(img_height_, img_width_, CV_32F);
aux.Lstep = cv::Mat::zeros(img_height_, img_width_, CV_32F); aux.Lstep = cv::Mat::zeros(img_height_, img_width_, CV_32F);
aux.Ldet = cv::Mat::zeros(img_height_, img_width_, CV_32F); aux.Ldet = cv::Mat::zeros(img_height_, img_width_, CV_32F);
aux.esigma = soffset_*pow((float)2.0, (float)(j) / (float)(nsublevels_)+i); aux.esigma = soffset_*pow((float)2.0f, (float)(j) / (float)(nsublevels_)+i);
aux.etime = 0.5*(aux.esigma*aux.esigma); aux.etime = 0.5f*(aux.esigma*aux.esigma);
aux.sigma_size = fRound(aux.esigma); aux.sigma_size = fRound(aux.esigma);
aux.octave = i; aux.octave = (float)i;
aux.sublevel = j; aux.sublevel = (float)j;
evolution_.push_back(aux); evolution_.push_back(aux);
} }
} }
...@@ -115,7 +115,7 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { ...@@ -115,7 +115,7 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) {
vector<float> tau; vector<float> tau;
float ttime = 0.0; float ttime = 0.0;
ttime = evolution_[i].etime - evolution_[i - 1].etime; ttime = evolution_[i].etime - evolution_[i - 1].etime;
naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_, tau); naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau);
nsteps_.push_back(naux); nsteps_.push_back(naux);
tsteps_.push_back(tau); tsteps_.push_back(tau);
ncycles_++; ncycles_++;
...@@ -147,7 +147,7 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { ...@@ -147,7 +147,7 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) {
*/ */
int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
double t2 = 0.0, t1 = 0.0; //double t2 = 0.0, t1 = 0.0;
if (evolution_.size() == 0) { if (evolution_.size() == 0) {
cout << "Error generating the nonlinear scale space!!" << endl; cout << "Error generating the nonlinear scale space!!" << endl;
...@@ -155,7 +155,7 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { ...@@ -155,7 +155,7 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
return -1; return -1;
} }
t1 = getTickCount(); //t1 = getTickCount();
// Copy the original image to the first level of the evolution // Copy the original image to the first level of the evolution
img.copyTo(evolution_[0].Lt); img.copyTo(evolution_[0].Lt);
...@@ -165,8 +165,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { ...@@ -165,8 +165,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
// Firstly compute the kcontrast factor // Firstly compute the kcontrast factor
Compute_KContrast(evolution_[0].Lt, KCONTRAST_PERCENTILE); Compute_KContrast(evolution_[0].Lt, KCONTRAST_PERCENTILE);
t2 = getTickCount(); //t2 = getTickCount();
tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency(); //tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency();
if (verbosity_ == true) { if (verbosity_ == true) {
cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime <<
...@@ -212,8 +212,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { ...@@ -212,8 +212,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
} }
} }
t2 = getTickCount(); //t2 = getTickCount();
tnlscale_ = 1000.0*(t2 - t1) / getTickFrequency(); //tnlscale_ = 1000.0*(t2 - t1) / getTickFrequency();
return 0; return 0;
} }
...@@ -228,18 +228,18 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { ...@@ -228,18 +228,18 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
*/ */
void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) {
if (verbosity_ == true) { //if (verbosity_ == true) {
cout << "Computing Kcontrast factor." << endl; // cout << "Computing Kcontrast factor." << endl;
} //}
if (COMPUTE_KCONTRAST == true) { if (COMPUTE_KCONTRAST) {
kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0); kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0);
} }
if (verbosity_ == true) { //if (verbosity_ == true) {
cout << "kcontrast = " << kcontrast_ << endl; // cout << "kcontrast = " << kcontrast_ << endl;
cout << endl << "Now computing the nonlinear scale space!!" << endl; // cout << endl << "Now computing the nonlinear scale space!!" << endl;
} //}
} }
//************************************************************************************* //*************************************************************************************
...@@ -250,18 +250,18 @@ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentil ...@@ -250,18 +250,18 @@ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentil
*/ */
void KAZEFeatures::Compute_Multiscale_Derivatives(void) void KAZEFeatures::Compute_Multiscale_Derivatives(void)
{ {
double t2 = 0.0, t1 = 0.0; //double t2 = 0.0, t1 = 0.0;
t1 = getTickCount(); //t1 = getTickCount();
#ifdef _OPENMP #ifdef _OPENMP
#pragma omp parallel for #pragma omp parallel for
#endif #endif
for (size_t i = 0; i < evolution_.size(); i++) { for (size_t i = 0; i < evolution_.size(); i++) {
if (verbosity_ == true) { //if (verbosity_ == true) {
cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime // cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime
<< " Step (pixels): " << evolution_[i].sigma_size << endl; // << " Step (pixels): " << evolution_[i].sigma_size << endl;
} //}
// Compute multiscale derivatives for the detector // Compute multiscale derivatives for the detector
compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, evolution_[i].sigma_size); compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, evolution_[i].sigma_size);
...@@ -277,8 +277,8 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void) ...@@ -277,8 +277,8 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void)
evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size));
} }
t2 = getTickCount(); //t2 = getTickCount();
tmderivatives_ = 1000.0*(t2 - t1) / getTickFrequency(); //tmderivatives_ = 1000.0*(t2 - t1) / getTickFrequency();
} }
//************************************************************************************* //*************************************************************************************
...@@ -290,10 +290,10 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void) ...@@ -290,10 +290,10 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void)
*/ */
void KAZEFeatures::Compute_Detector_Response(void) { void KAZEFeatures::Compute_Detector_Response(void) {
double t2 = 0.0, t1 = 0.0; //double t2 = 0.0, t1 = 0.0;
float lxx = 0.0, lxy = 0.0, lyy = 0.0; float lxx = 0.0, lxy = 0.0, lyy = 0.0;
t1 = getTickCount(); //t1 = getTickCount();
// Firstly compute the multiscale derivatives // Firstly compute the multiscale derivatives
Compute_Multiscale_Derivatives(); Compute_Multiscale_Derivatives();
...@@ -301,9 +301,9 @@ void KAZEFeatures::Compute_Detector_Response(void) { ...@@ -301,9 +301,9 @@ void KAZEFeatures::Compute_Detector_Response(void) {
for (size_t i = 0; i < evolution_.size(); i++) { for (size_t i = 0; i < evolution_.size(); i++) {
// Determinant of the Hessian // Determinant of the Hessian
if (verbosity_ == true) { //if (verbosity_ == true) {
cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; // cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl;
} //}
for (int ix = 0; ix < img_height_; ix++) { for (int ix = 0; ix < img_height_; ix++) {
for (int jx = 0; jx < img_width_; jx++) { for (int jx = 0; jx < img_width_; jx++) {
...@@ -315,8 +315,8 @@ void KAZEFeatures::Compute_Detector_Response(void) { ...@@ -315,8 +315,8 @@ void KAZEFeatures::Compute_Detector_Response(void) {
} }
} }
t2 = getTickCount(); //t2 = getTickCount();
tdresponse_ = 1000.0*(t2 - t1) / getTickFrequency(); //tdresponse_ = 1000.0*(t2 - t1) / getTickFrequency();
} }
//************************************************************************************* //*************************************************************************************
...@@ -328,8 +328,8 @@ void KAZEFeatures::Compute_Detector_Response(void) { ...@@ -328,8 +328,8 @@ void KAZEFeatures::Compute_Detector_Response(void) {
*/ */
void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) { void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
double t2 = 0.0, t1 = 0.0; //double t2 = 0.0, t1 = 0.0;
t1 = getTickCount(); //t1 = getTickCount();
kpts.clear(); kpts.clear();
...@@ -342,8 +342,8 @@ void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) { ...@@ -342,8 +342,8 @@ void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
// Perform some subpixel refinement // Perform some subpixel refinement
Do_Subpixel_Refinement(kpts); Do_Subpixel_Refinement(kpts);
t2 = getTickCount(); //t2 = getTickCount();
tdetector_ = 1000.0*(t2 - t1) / getTickFrequency(); //tdetector_ = 1000.0*(t2 - t1) / getTickFrequency();
} }
//************************************************************************************* //*************************************************************************************
...@@ -476,11 +476,11 @@ void KAZEFeatures::Find_Extremum_Threading(const int& level) { ...@@ -476,11 +476,11 @@ void KAZEFeatures::Find_Extremum_Threading(const int& level) {
// Add the point of interest!! // Add the point of interest!!
if (is_extremum == true) { if (is_extremum == true) {
KeyPoint point; KeyPoint point;
point.pt.x = jx; point.pt.x = (float)jx;
point.pt.y = ix; point.pt.y = (float)ix;
point.response = fabs(value); point.response = fabs(value);
point.size = evolution_[level].esigma; point.size = evolution_[level].esigma;
point.octave = evolution_[level].octave; point.octave = (int)evolution_[level].octave;
point.class_id = level; point.class_id = level;
// We use the angle field for the sublevel value // We use the angle field for the sublevel value
...@@ -508,50 +508,50 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) { ...@@ -508,50 +508,50 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
Mat A = Mat::zeros(3, 3, CV_32F); Mat A = Mat::zeros(3, 3, CV_32F);
Mat b = Mat::zeros(3, 1, CV_32F); Mat b = Mat::zeros(3, 1, CV_32F);
Mat dst = Mat::zeros(3, 1, CV_32F); Mat dst = Mat::zeros(3, 1, CV_32F);
double t2 = 0.0, t1 = 0.0; //double t2 = 0.0, t1 = 0.0;
t1 = cv::getTickCount(); //t1 = cv::getTickCount();
vector<KeyPoint> kpts_(kpts); vector<KeyPoint> kpts_(kpts);
for (size_t i = 0; i < kpts_.size(); i++) { for (size_t i = 0; i < kpts_.size(); i++) {
x = kpts_[i].pt.x; x = static_cast<int>(kpts_[i].pt.x);
y = kpts_[i].pt.y; y = static_cast<int>(kpts_[i].pt.y);
// Compute the gradient // Compute the gradient
Dx = (1.0 / (2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step) Dx = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step)
- *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step)); - *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step));
Dy = (1.0 / (2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x) Dy = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x)
- *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x)); - *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x));
Ds = 0.5*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x) Ds = 0.5f*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x)
- *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x)); - *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x));
// Compute the Hessian // Compute the Hessian
Dxx = (1.0 / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step) Dxx = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step)
+ *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step) + *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step)
- 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x))); - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x)));
Dyy = (1.0 / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x) Dyy = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x)
+ *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x) + *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x)
- 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x))); - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x)));
Dss = *(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x) Dss = *(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x)
+ *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x) + *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x)
- 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x)); - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x));
Dxy = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x + step) Dxy = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x + step)
+ (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x - step))) + (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x - step)))
- (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x + step) - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x + step)
+ (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x - step))); + (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x - step)));
Dxs = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x + step) Dxs = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x + step)
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x - step))) + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x - step)))
- (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x - step) - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x - step)
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x + step))); + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x + step)));
Dys = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y + step) + x) Dys = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y + step) + x)
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y - step) + x))) + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y - step) + x)))
- (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y - step) + x) - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y - step) + x)
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y + step) + x))); + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y + step) + x)));
// Solve the linear system // Solve the linear system
...@@ -569,13 +569,13 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) { ...@@ -569,13 +569,13 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
solve(A, b, dst, DECOMP_LU); solve(A, b, dst, DECOMP_LU);
if (fabs(*(dst.ptr<float>(0))) <= 1.0 && fabs(*(dst.ptr<float>(1))) <= 1.0 && fabs(*(dst.ptr<float>(2))) <= 1.0) { if (fabs(*(dst.ptr<float>(0))) <= 1.0f && fabs(*(dst.ptr<float>(1))) <= 1.0f && fabs(*(dst.ptr<float>(2))) <= 1.0f) {
kpts_[i].pt.x += *(dst.ptr<float>(0)); kpts_[i].pt.x += *(dst.ptr<float>(0));
kpts_[i].pt.y += *(dst.ptr<float>(1)); kpts_[i].pt.y += *(dst.ptr<float>(1));
dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr<float>(2))) / ((float)(nsublevels_)); dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr<float>(2))) / ((float)(nsublevels_));
// In OpenCV the size of a keypoint is the diameter!! // In OpenCV the size of a keypoint is the diameter!!
kpts_[i].size = 2.0*soffset_*pow((float)2.0, dsc); kpts_[i].size = 2.0f*soffset_*pow((float)2.0f, dsc);
kpts_[i].angle = 0.0; kpts_[i].angle = 0.0;
} }
// Set the points to be deleted after the for loop // Set the points to be deleted after the for loop
...@@ -593,8 +593,8 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) { ...@@ -593,8 +593,8 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
} }
} }
t2 = getTickCount(); //t2 = getTickCount();
tsubpixel_ = 1000.0*(t2 - t1) / getTickFrequency(); //tsubpixel_ = 1000.0*(t2 - t1) / getTickFrequency();
} }
//************************************************************************************* //*************************************************************************************
...@@ -663,8 +663,8 @@ void KAZEFeatures::Feature_Suppression_Distance(std::vector<cv::KeyPoint>& kpts, ...@@ -663,8 +663,8 @@ void KAZEFeatures::Feature_Suppression_Distance(std::vector<cv::KeyPoint>& kpts,
*/ */
void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat &desc) { void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat &desc) {
double t2 = 0.0, t1 = 0.0; //double t2 = 0.0, t1 = 0.0;
t1 = getTickCount(); //t1 = getTickCount();
// Allocate memory for the matrix of descriptors // Allocate memory for the matrix of descriptors
if (use_extended_ == true) { if (use_extended_ == true) {
...@@ -796,8 +796,8 @@ void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat ...@@ -796,8 +796,8 @@ void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat
} }
} }
t2 = getTickCount(); //t2 = getTickCount();
tdescriptor_ = 1000.0*(t2 - t1) / getTickFrequency(); //tdescriptor_ = 1000.0*(t2 - t1) / getTickFrequency();
} }
//************************************************************************************* //*************************************************************************************
...@@ -822,7 +822,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) ...@@ -822,7 +822,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt)
xf = kpt.pt.x; xf = kpt.pt.x;
yf = kpt.pt.y; yf = kpt.pt.y;
level = kpt.class_id; level = kpt.class_id;
s = fRound(kpt.size / 2.0); s = fRound(kpt.size / 2.0f);
// Calculate derivatives responses for points within radius of 6*scale // Calculate derivatives responses for points within radius of 6*scale
for (int i = -6; i <= 6; ++i) { for (int i = -6; i <= 6; ++i) {
...@@ -832,7 +832,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) ...@@ -832,7 +832,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt)
ix = fRound(xf + i*s); ix = fRound(xf + i*s);
if (iy >= 0 && iy < img_height_ && ix >= 0 && ix < img_width_) { if (iy >= 0 && iy < img_height_ && ix >= 0 && ix < img_width_) {
gweight = gaussian(iy - yf, ix - xf, 2.5*s); gweight = gaussian(iy - yf, ix - xf, 2.5f*s);
resX[idx] = gweight*(*(evolution_[level].Lx.ptr<float>(iy)+ix)); resX[idx] = gweight*(*(evolution_[level].Lx.ptr<float>(iy)+ix));
resY[idx] = gweight*(*(evolution_[level].Ly.ptr<float>(iy)+ix)); resY[idx] = gweight*(*(evolution_[level].Ly.ptr<float>(iy)+ix));
} }
...@@ -848,8 +848,8 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) ...@@ -848,8 +848,8 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt)
} }
// Loop slides pi/3 window around feature point // Loop slides pi/3 window around feature point
for (ang1 = 0; ang1 < 2.0*CV_PI; ang1 += 0.15f) { for (ang1 = 0; ang1 < 2.0f*CV_PI; ang1 += 0.15f) {
ang2 = (ang1 + CV_PI / 3.0f > 2.0*CV_PI ? ang1 - 5.0f*CV_PI / 3.0f : ang1 + CV_PI / 3.0f); ang2 = (ang1 + (float)(CV_PI / 3.0) > (float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0));
sumX = sumY = 0.f; sumX = sumY = 0.f;
for (size_t k = 0; k < Ang.size(); ++k) { for (size_t k = 0; k < Ang.size(); ++k) {
...@@ -862,7 +862,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) ...@@ -862,7 +862,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt)
sumY += resY[k]; sumY += resY[k];
} }
else if (ang2 < ang1 && else if (ang2 < ang1 &&
((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { ((ang > 0 && ang < ang2) || (ang > ang1 && ang < (float)(2.0*CV_PI)))) {
sumX += resX[k]; sumX += resX[k];
sumY += resY[k]; sumY += resY[k];
} }
...@@ -907,7 +907,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float ...@@ -907,7 +907,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
level = kpt.class_id; level = kpt.class_id;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
// Calculate descriptor for this interest point // Calculate descriptor for this interest point
for (int i = -pattern_size; i < pattern_size; i += sample_step) { for (int i = -pattern_size; i < pattern_size; i += sample_step) {
...@@ -921,13 +921,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float ...@@ -921,13 +921,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float
sample_y = k*scale + yf; sample_y = k*scale + yf;
sample_x = l*scale + xf; sample_x = l*scale + xf;
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - .5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - .5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + .5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + .5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -938,13 +938,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float ...@@ -938,13 +938,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Sum the derivatives to the cumulative descriptor // Sum the derivatives to the cumulative descriptor
dx += rx; dx += rx;
...@@ -1006,7 +1006,7 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1006,7 +1006,7 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
co = cos(angle); co = cos(angle);
...@@ -1024,13 +1024,13 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1024,13 +1024,13 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
sample_y = yf + (l*scale*co + k*scale*si); sample_y = yf + (l*scale*co + k*scale*si);
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - .5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - .5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + .5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + .5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1041,13 +1041,13 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1041,13 +1041,13 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = rx*co + ry*si; rry = rx*co + ry*si;
...@@ -1107,7 +1107,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1107,7 +1107,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
int dsize = 0, scale = 0, level = 0; int dsize = 0, scale = 0, level = 0;
// Subregion centers for the 4x4 gaussian weighting // Subregion centers for the 4x4 gaussian weighting
float cx = -0.5, cy = 0.5; float cx = -0.5f, cy = 0.5f;
// Set the descriptor size and the sample and pattern sizes // Set the descriptor size and the sample and pattern sizes
dsize = 64; dsize = 64;
...@@ -1117,7 +1117,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1117,7 +1117,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
level = kpt.class_id; level = kpt.class_id;
i = -8; i = -8;
...@@ -1128,13 +1128,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1128,13 +1128,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
j = -8; j = -8;
i = i - 4; i = i - 4;
cx += 1.0; cx += 1.0f;
cy = -0.5; cy = -0.5f;
while (j < pattern_size) { while (j < pattern_size) {
dx = dy = mdx = mdy = 0.0; dx = dy = mdx = mdy = 0.0;
cy += 1.0; cy += 1.0f;
j = j - 4; j = j - 4;
ky = i + sample_step; ky = i + sample_step;
...@@ -1150,15 +1150,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1150,15 +1150,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
sample_x = l*scale + xf; sample_x = l*scale + xf;
//Get the gaussian weighted x and y responses //Get the gaussian weighted x and y responses
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1169,13 +1169,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1169,13 +1169,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
rx = gauss_s1*rx; rx = gauss_s1*rx;
ry = gauss_s1*ry; ry = gauss_s1*ry;
...@@ -1239,7 +1239,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1239,7 +1239,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
int dsize = 0, scale = 0, level = 0; int dsize = 0, scale = 0, level = 0;
// Subregion centers for the 4x4 gaussian weighting // Subregion centers for the 4x4 gaussian weighting
float cx = -0.5, cy = 0.5; float cx = -0.5f, cy = 0.5f;
// Set the descriptor size and the sample and pattern sizes // Set the descriptor size and the sample and pattern sizes
dsize = 64; dsize = 64;
...@@ -1249,7 +1249,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1249,7 +1249,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
co = cos(angle); co = cos(angle);
...@@ -1264,13 +1264,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1264,13 +1264,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
j = -8; j = -8;
i = i - 4; i = i - 4;
cx += 1.0; cx += 1.0f;
cy = -0.5; cy = -0.5f;
while (j < pattern_size) { while (j < pattern_size) {
dx = dy = mdx = mdy = 0.0; dx = dy = mdx = mdy = 0.0;
cy += 1.0; cy += 1.0f;
j = j - 4; j = j - 4;
ky = i + sample_step; ky = i + sample_step;
...@@ -1287,14 +1287,14 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1287,14 +1287,14 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
// Get the gaussian weighted x and y responses // Get the gaussian weighted x and y responses
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
y1 = fRound(sample_y - .5); y1 = fRound(sample_y - 0.5f);
x1 = fRound(sample_x - .5); x1 = fRound(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1305,13 +1305,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1305,13 +1305,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = gauss_s1*(rx*co + ry*si); rry = gauss_s1*(rx*co + ry*si);
...@@ -1379,7 +1379,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1379,7 +1379,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
level = kpt.class_id; level = kpt.class_id;
// Calculate descriptor for this interest point // Calculate descriptor for this interest point
...@@ -1395,13 +1395,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1395,13 +1395,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
sample_y = yf + l*scale; sample_y = yf + l*scale;
sample_x = xf + k*scale; sample_x = xf + k*scale;
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1412,13 +1412,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1412,13 +1412,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
modg = pow(rx, 2) + pow(ry, 2); modg = pow(rx, 2) + pow(ry, 2);
...@@ -1428,25 +1428,25 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa ...@@ -1428,25 +1428,25 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa
res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2);
rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2);
rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2);
ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg);
// Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg);
} }
else { else {
lww = 0.0; lww = 0.0;
...@@ -1514,7 +1514,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1514,7 +1514,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
co = cos(angle); co = cos(angle);
...@@ -1533,13 +1533,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1533,13 +1533,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
sample_y = yf + (l*scale*co + k*scale*si); sample_y = yf + (l*scale*co + k*scale*si);
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1550,13 +1550,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1550,13 +1550,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
modg = pow(rx, 2) + pow(ry, 2); modg = pow(rx, 2) + pow(ry, 2);
...@@ -1566,25 +1566,25 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) ...@@ -1566,25 +1566,25 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc)
res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2);
rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2);
rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2);
ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg);
// Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg);
} }
else { else {
lww = 0.0; lww = 0.0;
...@@ -1652,7 +1652,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa ...@@ -1652,7 +1652,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
level = kpt.class_id; level = kpt.class_id;
// Calculate descriptor for this interest point // Calculate descriptor for this interest point
...@@ -1668,13 +1668,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa ...@@ -1668,13 +1668,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa
sample_y = k*scale + yf; sample_y = k*scale + yf;
sample_x = l*scale + xf; sample_x = l*scale + xf;
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1685,13 +1685,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa ...@@ -1685,13 +1685,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Sum the derivatives to the cumulative descriptor // Sum the derivatives to the cumulative descriptor
if (ry >= 0.0) { if (ry >= 0.0) {
...@@ -1772,7 +1772,7 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) ...@@ -1772,7 +1772,7 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc)
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
co = cos(angle); co = cos(angle);
...@@ -1792,13 +1792,13 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) ...@@ -1792,13 +1792,13 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc)
sample_y = yf + (l*scale*co + k*scale*si); sample_y = yf + (l*scale*co + k*scale*si);
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1809,13 +1809,13 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) ...@@ -1809,13 +1809,13 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc)
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = rx*co + ry*si; rry = rx*co + ry*si;
...@@ -1895,7 +1895,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -1895,7 +1895,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
int dsize = 0, scale = 0, level = 0; int dsize = 0, scale = 0, level = 0;
// Subregion centers for the 4x4 gaussian weighting // Subregion centers for the 4x4 gaussian weighting
float cx = -0.5, cy = 0.5; float cx = -0.5f, cy = 0.5f;
// Set the descriptor size and the sample and pattern sizes // Set the descriptor size and the sample and pattern sizes
dsize = 128; dsize = 128;
...@@ -1905,7 +1905,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -1905,7 +1905,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
level = kpt.class_id; level = kpt.class_id;
i = -8; i = -8;
...@@ -1917,15 +1917,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -1917,15 +1917,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
j = -8; j = -8;
i = i - 4; i = i - 4;
cx += 1.0; cx += 1.0f;
cy = -0.5; cy = -0.5f;
while (j < pattern_size) { while (j < pattern_size) {
dxp = dxn = mdxp = mdxn = 0.0; dxp = dxn = mdxp = mdxn = 0.0;
dyp = dyn = mdyp = mdyn = 0.0; dyp = dyn = mdyp = mdyn = 0.0;
cy += 1.0; cy += 1.0f;
j = j - 4; j = j - 4;
ky = i + sample_step; ky = i + sample_step;
...@@ -1941,15 +1941,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -1941,15 +1941,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
sample_x = l*scale + xf; sample_x = l*scale + xf;
//Get the gaussian weighted x and y responses //Get the gaussian weighted x and y responses
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50*scale); gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -1960,13 +1960,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -1960,13 +1960,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
rx = gauss_s1*rx; rx = gauss_s1*rx;
ry = gauss_s1*ry; ry = gauss_s1*ry;
...@@ -2051,7 +2051,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2051,7 +2051,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
int dsize = 0, scale = 0, level = 0; int dsize = 0, scale = 0, level = 0;
// Subregion centers for the 4x4 gaussian weighting // Subregion centers for the 4x4 gaussian weighting
float cx = -0.5, cy = 0.5; float cx = -0.5f, cy = 0.5f;
// Set the descriptor size and the sample and pattern sizes // Set the descriptor size and the sample and pattern sizes
dsize = 128; dsize = 128;
...@@ -2061,7 +2061,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2061,7 +2061,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
co = cos(angle); co = cos(angle);
...@@ -2076,8 +2076,8 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2076,8 +2076,8 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
j = -8; j = -8;
i = i - 4; i = i - 4;
cx += 1.0; cx += 1.0f;
cy = -0.5; cy = -0.5f;
while (j < pattern_size) { while (j < pattern_size) {
...@@ -2101,15 +2101,15 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2101,15 +2101,15 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
// Get the gaussian weighted x and y responses // Get the gaussian weighted x and y responses
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
y1 = fRound(sample_y - .5); y1 = fRound(sample_y - 0.5f);
x1 = fRound(sample_x - .5); x1 = fRound(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -2120,13 +2120,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2120,13 +2120,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Get the x and y derivatives on the rotated axis // Get the x and y derivatives on the rotated axis
rry = gauss_s1*(rx*co + ry*si); rry = gauss_s1*(rx*co + ry*si);
...@@ -2217,7 +2217,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -2217,7 +2217,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
level = kpt.class_id; level = kpt.class_id;
// Calculate descriptor for this interest point // Calculate descriptor for this interest point
...@@ -2233,13 +2233,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -2233,13 +2233,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
sample_y = k*scale + yf; sample_y = k*scale + yf;
sample_x = l*scale + xf; sample_x = l*scale + xf;
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -2250,13 +2250,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -2250,13 +2250,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
modg = pow(rx, 2) + pow(ry, 2); modg = pow(rx, 2) + pow(ry, 2);
...@@ -2266,25 +2266,25 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo ...@@ -2266,25 +2266,25 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo
res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2);
rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2);
rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2);
ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg);
// Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg);
} }
else { else {
lww = 0.0; lww = 0.0;
...@@ -2372,7 +2372,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2372,7 +2372,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
// Get the information from the keypoint // Get the information from the keypoint
yf = kpt.pt.y; yf = kpt.pt.y;
xf = kpt.pt.x; xf = kpt.pt.x;
scale = fRound(kpt.size / 2.0); scale = fRound(kpt.size / 2.0f);
angle = kpt.angle; angle = kpt.angle;
level = kpt.class_id; level = kpt.class_id;
co = cos(angle); co = cos(angle);
...@@ -2392,13 +2392,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2392,13 +2392,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
sample_y = yf + (l*scale*co + k*scale*si); sample_y = yf + (l*scale*co + k*scale*si);
sample_x = xf + (-l*scale*si + k*scale*co); sample_x = xf + (-l*scale*si + k*scale*co);
y1 = (int)(sample_y - .5); y1 = (int)(sample_y - 0.5f);
x1 = (int)(sample_x - .5); x1 = (int)(sample_x - 0.5f);
checkDescriptorLimits(x1, y1, img_width_, img_height_); checkDescriptorLimits(x1, y1, img_width_, img_height_);
y2 = (int)(sample_y + .5); y2 = (int)(sample_y + 0.5f);
x2 = (int)(sample_x + .5); x2 = (int)(sample_x + 0.5f);
checkDescriptorLimits(x2, y2, img_width_, img_height_); checkDescriptorLimits(x2, y2, img_width_, img_height_);
...@@ -2409,13 +2409,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2409,13 +2409,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lx.ptr<float>(y2)+x2);
rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1); res1 = *(evolution_[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2); res2 = *(evolution_[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1); res3 = *(evolution_[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2); res4 = *(evolution_[level].Ly.ptr<float>(y2)+x2);
ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
modg = pow(rx, 2) + pow(ry, 2); modg = pow(rx, 2) + pow(ry, 2);
...@@ -2424,25 +2424,25 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc ...@@ -2424,25 +2424,25 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc
res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxx.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxx.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxx.ptr<float>(y2)+x2);
rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lxy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lxy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lxy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lxy.ptr<float>(y2)+x2);
rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1); res1 = *(evolution_[level].Lyy.ptr<float>(y1)+x1);
res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2); res2 = *(evolution_[level].Lyy.ptr<float>(y1)+x2);
res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1); res3 = *(evolution_[level].Lyy.ptr<float>(y2)+x1);
res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2); res4 = *(evolution_[level].Lyy.ptr<float>(y2)+x2);
ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg);
// Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg);
} }
else { else {
lww = 0.0; lww = 0.0;
...@@ -2530,7 +2530,7 @@ void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv: ...@@ -2530,7 +2530,7 @@ void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv:
AOS_Columns(Ldprev, c, stepsize); AOS_Columns(Ldprev, c, stepsize);
#endif #endif
Ld = 0.5*(Lty_ + Ltx_.t()); Ld = 0.5f*(Lty_ + Ltx_.t());
} }
//************************************************************************************* //*************************************************************************************
...@@ -2567,7 +2567,7 @@ void KAZEFeatures::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float ...@@ -2567,7 +2567,7 @@ void KAZEFeatures::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float
// a = 1 + t.*p; (p is -1*p) // a = 1 + t.*p; (p is -1*p)
// b = -t.*q; // b = -t.*q;
ay_ = 1.0 + stepsize*py_; // p is -1*p ay_ = 1.0f + stepsize*py_; // p is -1*p
by_ = -stepsize*qr_; by_ = -stepsize*qr_;
// Do Thomas algorithm to solve the linear system of equations // Do Thomas algorithm to solve the linear system of equations
...@@ -2607,7 +2607,7 @@ void KAZEFeatures::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const fl ...@@ -2607,7 +2607,7 @@ void KAZEFeatures::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const fl
} }
// a = 1 + t.*p'; // a = 1 + t.*p';
ax_ = 1.0 + stepsize*px_.t(); ax_ = 1.0f + stepsize*px_.t();
// b = -t.*q'; // b = -t.*q';
bx_ = -stepsize*qc_.t(); bx_ = -stepsize*qc_.t();
...@@ -2697,15 +2697,15 @@ inline float getAngle(const float& x, const float& y) { ...@@ -2697,15 +2697,15 @@ inline float getAngle(const float& x, const float& y) {
} }
if (x < 0 && y >= 0) { if (x < 0 && y >= 0) {
return CV_PI - atan(-y / x); return (float)CV_PI - atan(-y / x);
} }
if (x < 0 && y < 0) { if (x < 0 && y < 0) {
return CV_PI + atan(y / x); return (float)CV_PI + atan(y / x);
} }
if (x >= 0 && y < 0) { if (x >= 0 && y < 0) {
return 2.0*CV_PI - atan(-y / x); return 2.0f * (float)CV_PI - atan(-y / x);
} }
return 0; return 0;
...@@ -2723,7 +2723,7 @@ inline float getAngle(const float& x, const float& y) { ...@@ -2723,7 +2723,7 @@ inline float getAngle(const float& x, const float& y) {
*/ */
inline void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio) { inline void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio) {
float cratio = ratio / sqrt(dsize); float cratio = ratio / sqrtf(static_cast<float>(dsize));
float len = 0.0; float len = 0.0;
for (int i = 0; i < niter; i++) { for (int i = 0; i < niter; i++) {
......
...@@ -54,13 +54,13 @@ private: ...@@ -54,13 +54,13 @@ private:
std::vector<int> nsteps_; // Vector of number of steps per cycle std::vector<int> nsteps_; // Vector of number of steps per cycle
// Computation times variables in ms // Computation times variables in ms
double tkcontrast_; // Kcontrast factor computation //double tkcontrast_; // Kcontrast factor computation
double tnlscale_; // Nonlinear Scale space generation //double tnlscale_; // Nonlinear Scale space generation
double tdetector_; // Feature detector //double tdetector_; // Feature detector
double tmderivatives_; // Multiscale derivatives computation //double tmderivatives_; // Multiscale derivatives computation
double tdresponse_; // Detector response computation //double tdresponse_; // Detector response computation
double tdescriptor_; // Feature descriptor //double tdescriptor_; // Feature descriptor
double tsubpixel_; // Subpixel refinement //double tsubpixel_; // Subpixel refinement
// Some auxiliary variables used in the AOS step // Some auxiliary variables used in the AOS step
cv::Mat Ltx_, Lty_, px_, py_, ax_, ay_, bx_, by_, qr_, qc_; cv::Mat Ltx_, Lty_, px_, py_, ax_, ay_, bx_, by_, qr_, qc_;
...@@ -243,33 +243,33 @@ public: ...@@ -243,33 +243,33 @@ public:
return use_extended_; return use_extended_;
} }
float Get_Time_KContrast(void) { //float Get_Time_KContrast(void) {
return tkcontrast_; // return tkcontrast_;
} //}
float Get_Time_NLScale(void) { //float Get_Time_NLScale(void) {
return tnlscale_; // return tnlscale_;
} //}
float Get_Time_Detector(void) { //float Get_Time_Detector(void) {
return tdetector_; // return tdetector_;
} //}
float Get_Time_Multiscale_Derivatives(void) { //float Get_Time_Multiscale_Derivatives(void) {
return tmderivatives_; // return tmderivatives_;
} //}
float Get_Time_Detector_Response(void) { //float Get_Time_Detector_Response(void) {
return tdresponse_; // return tdresponse_;
} //}
float Get_Time_Descriptor(void) { //float Get_Time_Descriptor(void) {
return tdescriptor_; // return tdescriptor_;
} //}
float Get_Time_Subpixel(void) { //float Get_Time_Subpixel(void) {
return tsubpixel_; // return tsubpixel_;
} //}
}; };
//************************************************************************************* //*************************************************************************************
......
...@@ -63,7 +63,7 @@ struct KAZEOptions { ...@@ -63,7 +63,7 @@ struct KAZEOptions {
KAZEOptions() { KAZEOptions() {
// Load the default options // Load the default options
soffset = DEFAULT_SCALE_OFFSET; soffset = DEFAULT_SCALE_OFFSET;
omax = DEFAULT_OCTAVE_MAX; omax = static_cast<int>(DEFAULT_OCTAVE_MAX);
nsublevels = DEFAULT_NSUBLEVELS; nsublevels = DEFAULT_NSUBLEVELS;
dthreshold = DEFAULT_DETECTOR_THRESHOLD; dthreshold = DEFAULT_DETECTOR_THRESHOLD;
use_fed = DEFAULT_USE_FED; use_fed = DEFAULT_USE_FED;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment