From 7a594354903f7a44239854df2763d3c8f1505b56 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 4 Apr 2014 14:25:38 +0300 Subject: [PATCH 01/52] KAZE and AKAZE integration initial commit --- modules/features2d/src/akaze/AKAZE.cpp | 2068 ++++++++++++ modules/features2d/src/akaze/AKAZE.h | 175 + modules/features2d/src/akaze/config.h | 155 + modules/features2d/src/akaze/fed.h | 26 + .../src/akaze/nldiffusion_functions.cpp | 431 +++ .../src/akaze/nldiffusion_functions.h | 41 + modules/features2d/src/akaze/utils.cpp | 196 ++ modules/features2d/src/akaze/utils.h | 54 + modules/features2d/src/kaze.cpp | 0 modules/features2d/src/kaze/KAZE.cpp | 2801 +++++++++++++++++ modules/features2d/src/kaze/KAZE.h | 294 ++ modules/features2d/src/kaze/config.h | 129 + modules/features2d/src/kaze/fed.cpp | 192 ++ modules/features2d/src/kaze/fed.h | 30 + .../src/kaze/nldiffusion_functions.cpp | 386 +++ .../src/kaze/nldiffusion_functions.h | 51 + modules/features2d/src/kaze/utils.cpp | 92 + modules/features2d/src/kaze/utils.h | 41 + 18 files changed, 7162 insertions(+) create mode 100644 modules/features2d/src/akaze/AKAZE.cpp create mode 100644 modules/features2d/src/akaze/AKAZE.h create mode 100644 modules/features2d/src/akaze/config.h create mode 100644 modules/features2d/src/akaze/fed.h create mode 100644 modules/features2d/src/akaze/nldiffusion_functions.cpp create mode 100644 modules/features2d/src/akaze/nldiffusion_functions.h create mode 100644 modules/features2d/src/akaze/utils.cpp create mode 100644 modules/features2d/src/akaze/utils.h create mode 100644 modules/features2d/src/kaze.cpp create mode 100644 modules/features2d/src/kaze/KAZE.cpp create mode 100755 modules/features2d/src/kaze/KAZE.h create mode 100644 modules/features2d/src/kaze/config.h create mode 100644 modules/features2d/src/kaze/fed.cpp create mode 100644 modules/features2d/src/kaze/fed.h create mode 100644 modules/features2d/src/kaze/nldiffusion_functions.cpp create mode 100755 modules/features2d/src/kaze/nldiffusion_functions.h create mode 100644 modules/features2d/src/kaze/utils.cpp create mode 100644 modules/features2d/src/kaze/utils.h diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp new file mode 100644 index 0000000000..5a110ac175 --- /dev/null +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -0,0 +1,2068 @@ +//============================================================================= +// +// AKAZE.cpp +// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2) +// Institutions: Georgia Institute of Technology (1) +// TrueVision Solutions (2) +// Date: 15/09/2013 +// Email: pablofdezalc@gmail.com +// +// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo +// All Rights Reserved +// See LICENSE for the license information +//============================================================================= + +/** + * @file AKAZE.cpp + * @brief Main class for detecting and describing binary features in an + * accelerated nonlinear scale space + * @date Sep 15, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ + +#include "AKAZE.h" + +using namespace std; +using namespace cv; + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief AKAZE constructor with input options + * @param options AKAZE configuration options + * @note This constructor allocates memory for the nonlinear scale space +*/ +AKAZE::AKAZE(const AKAZEOptions& options) { + + soffset_ = options.soffset; + factor_size_ = DEFAULT_FACTOR_SIZE; + sderivatives_ = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; + omax_ = options.omax; + nsublevels_ = options.nsublevels; + dthreshold_ = options.dthreshold; + descriptor_ = options.descriptor; + diffusivity_ = options.diffusivity; + save_scale_space_ = options.save_scale_space; + verbosity_ = options.verbosity; + img_width_ = options.img_width; + img_height_ = options.img_height; + noctaves_ = omax_; + ncycles_ = 0; + reordering_ = true; + descriptor_size_ = options.descriptor_size; + descriptor_channels_ = options.descriptor_channels; + descriptor_pattern_size_ = options.descriptor_pattern_size; + tkcontrast_ = 0.0; + tscale_ = 0.0; + tderivatives_ = 0.0; + tdetector_ = 0.0; + textrema_ = 0.0; + tsubpixel_ = 0.0; + tdescriptor_ = 0.0; + + if (descriptor_size_ > 0 && descriptor_ >= MLDB_UPRIGHT) { + generateDescriptorSubsample(descriptorSamples_,descriptorBits_,descriptor_size_, + descriptor_pattern_size_,descriptor_channels_); + } + + Allocate_Memory_Evolution(); +} + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief AKAZE destructor +*/ +AKAZE::~AKAZE(void) { + + evolution_.clear(); +} + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief This method allocates the memory for the nonlinear diffusion evolution +*/ +void AKAZE::Allocate_Memory_Evolution(void) { + + float rfactor = 0.0; + int level_height = 0, level_width = 0; + + // Allocate the dimension of the matrices for the evolution + for (int i = 0; i <= omax_-1 && i <= DEFAULT_OCTAVE_MAX; i++) { + rfactor = 1.0/pow(2.f,i); + level_height = (int)(img_height_*rfactor); + level_width = (int)(img_width_*rfactor); + + // Smallest possible octave + if (level_width < 80 || level_height < 40) { + noctaves_ = i; + i = omax_; + break; + } + + for (int j = 0; j < nsublevels_; j++) { + tevolution aux; + aux.Lx = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Ly = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Lxx = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Lxy = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Lyy = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Lt = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Ldet = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Lflow = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.Lstep = cv::Mat::zeros(level_height,level_width,CV_32F); + aux.esigma = soffset_*pow(2.f,(float)(j)/(float)(nsublevels_) + i); + aux.sigma_size = fRound(aux.esigma); + aux.etime = 0.5*(aux.esigma*aux.esigma); + aux.octave = i; + aux.sublevel = j; + evolution_.push_back(aux); + } + } + + // Allocate memory for the number of cycles and time steps + for (size_t i = 1; i < evolution_.size(); i++) { + int naux = 0; + std::vector tau; + float ttime = 0.0; + ttime = evolution_[i].etime-evolution_[i-1].etime; + naux = fed_tau_by_process_time(ttime,1,0.25,reordering_,tau); + nsteps_.push_back(naux); + tsteps_.push_back(tau); + ncycles_++; + } +} + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief This method creates the nonlinear scale space for a given image + * @param img Input image for which the nonlinear scale space needs to be created + * @return 0 if the nonlinear scale space was created successfully, -1 otherwise +*/ +int AKAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img) { + + double t1 = 0.0, t2 = 0.0; + + if (evolution_.size() == 0) { + cout << "Error generating the nonlinear scale space!!" << endl; + cout << "Firstly you need to call AKAZE::Allocate_Memory_Evolution()" << endl; + return -1; + } + + t1 = getTickCount(); + + // Copy the original image to the first level of the evolution + img.copyTo(evolution_[0].Lt); + gaussian_2D_convolution(evolution_[0].Lt,evolution_[0].Lt,0,0,soffset_); + evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); + + // Firstly compute the kcontrast factor + kcontrast_ = compute_k_percentile(img,KCONTRAST_PERCENTILE,1.0,KCONTRAST_NBINS,0,0); + + t2 = getTickCount(); + tkcontrast_ = 1000.0*(t2-t1) / getTickFrequency(); + + // Now generate the rest of evolution levels + for (size_t i = 1; i < evolution_.size(); i++) { + + if (evolution_[i].octave > evolution_[i-1].octave) { + halfsample_image(evolution_[i-1].Lt,evolution_[i].Lt); + kcontrast_ = kcontrast_*0.75; + } + else { + evolution_[i-1].Lt.copyTo(evolution_[i].Lt); + } + + gaussian_2D_convolution(evolution_[i].Lt,evolution_[i].Lsmooth,0,0,1.0); + + // Compute the Gaussian derivatives Lx and Ly + image_derivatives_scharr(evolution_[i].Lsmooth,evolution_[i].Lx,1,0); + image_derivatives_scharr(evolution_[i].Lsmooth,evolution_[i].Ly,0,1); + + // Compute the conductivity equation + switch (diffusivity_) { + case 0: + pm_g1(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + break; + case 1: + pm_g2(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + break; + case 2: + weickert_diffusivity(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + break; + case 3: + charbonnier_diffusivity(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + break; + default: + std::cerr << "Diffusivity: " << diffusivity_ << " is not supported" << std::endl; + } + + // Perform FED n inner steps + for (int j = 0; j < nsteps_[i-1]; j++) { + nld_step_scalar(evolution_[i].Lt,evolution_[i].Lflow,evolution_[i].Lstep,tsteps_[i-1][j]); + } + } + + t2 = getTickCount(); + tscale_ = 1000.0*(t2-t1) / getTickFrequency(); + + return 0; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method selects interesting keypoints through the nonlinear scale space + * @param kpts Vector of detected keypoints +*/ +void AKAZE::Feature_Detection(std::vector& kpts) { + + double t1 = 0.0, t2 = 0.0; + + t1 = getTickCount(); + + Compute_Determinant_Hessian_Response(); + Find_Scale_Space_Extrema(kpts); + Do_Subpixel_Refinement(kpts); + + t2 = getTickCount(); + tdetector_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the multiscale derivatives for the nonlinear scale space +*/ +void AKAZE::Compute_Multiscale_Derivatives(void) { + + double t1 = 0.0, t2 = 0.0; + + t1 = getTickCount(); + +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < evolution_.size(); i++) { + float ratio = pow(2.f,evolution_[i].octave); + int sigma_size_ = fRound(evolution_[i].esigma*factor_size_/ratio); + + compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Lx,1,0,sigma_size_); + compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Ly,0,1,sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxx,1,0,sigma_size_); + compute_scharr_derivatives(evolution_[i].Ly,evolution_[i].Lyy,0,1,sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxy,0,1,sigma_size_); + + evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); + evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); + evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_)); + evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); + evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); + } + + t2 = getTickCount(); + tderivatives_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the feature detector response for the nonlinear scale space + * @note We use the Hessian determinant as the feature detector response +*/ +void AKAZE::Compute_Determinant_Hessian_Response(void) { + + // Firstly compute the multiscale derivatives + Compute_Multiscale_Derivatives(); + + for (size_t i = 0; i < evolution_.size(); i++) { + if (verbosity_ == true) { + cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; + } + + for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) { + for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) { + float lxx = *(evolution_[i].Lxx.ptr(ix)+jx); + float lxy = *(evolution_[i].Lxy.ptr(ix)+jx); + float lyy = *(evolution_[i].Lyy.ptr(ix)+jx); + *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy-lxy*lxy); + } + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method finds extrema in the nonlinear scale space + * @param kpts Vector of detected keypoints +*/ +void AKAZE::Find_Scale_Space_Extrema(std::vector& kpts) { + + double t1 = 0.0, t2 = 0.0; + float value = 0.0; + float dist = 0.0, ratio = 0.0, smax = 0.0; + int npoints = 0, id_repeated = 0; + int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0; + bool is_extremum = false, is_repeated = false, is_out = false; + cv::KeyPoint point; + + // Set maximum size + if (descriptor_ == SURF_UPRIGHT || descriptor_ == SURF || + descriptor_ == MLDB_UPRIGHT || descriptor_ == MLDB) { + smax = 10.0*sqrtf(2.0); + } + else if (descriptor_ == MSURF_UPRIGHT || descriptor_ == MSURF) { + smax = 12.0*sqrtf(2.0); + } + + t1 = getTickCount(); + + for (size_t i = 0; i < evolution_.size(); i++) { + for (int ix = 1; ix < evolution_[i].Ldet.rows-1; ix++) { + for (int jx = 1; jx < evolution_[i].Ldet.cols-1; jx++) { + is_extremum = false; + is_repeated = false; + is_out = false; + value = *(evolution_[i].Ldet.ptr(ix)+jx); + + // Filter the points with the detector threshold + if (value > dthreshold_ && value >= DEFAULT_MIN_DETECTOR_THRESHOLD && + value > *(evolution_[i].Ldet.ptr(ix)+jx-1) && + value > *(evolution_[i].Ldet.ptr(ix)+jx+1) && + value > *(evolution_[i].Ldet.ptr(ix-1)+jx-1) && + value > *(evolution_[i].Ldet.ptr(ix-1)+jx) && + value > *(evolution_[i].Ldet.ptr(ix-1)+jx+1) && + value > *(evolution_[i].Ldet.ptr(ix+1)+jx-1) && + value > *(evolution_[i].Ldet.ptr(ix+1)+jx) && + value > *(evolution_[i].Ldet.ptr(ix+1)+jx+1)) { + is_extremum = true; + + point.response = fabs(value); + point.size = evolution_[i].esigma*factor_size_; + point.octave = evolution_[i].octave; + point.class_id = i; + ratio = pow(2.f,point.octave); + sigma_size_ = fRound(point.size/ratio); + point.pt.x = jx; + point.pt.y = ix; + + for (size_t ik = 0; ik < kpts.size(); ik++) { + if (point.class_id == kpts[ik].class_id-1 || + point.class_id == kpts[ik].class_id || + point.class_id == kpts[ik].class_id+1) { + dist = sqrt(pow(point.pt.x*ratio-kpts[ik].pt.x,2)+pow(point.pt.y*ratio-kpts[ik].pt.y,2)); + if (dist <= point.size) { + if (point.response > kpts[ik].response) { + id_repeated = ik; + is_repeated = true; + } + else { + is_extremum = false; + } + break; + } + } + } + + // Check out of bounds + if (is_extremum == true) { + // Check that the point is under the image limits for the descriptor computation + left_x = fRound(point.pt.x-smax*sigma_size_)-1; + right_x = fRound(point.pt.x+smax*sigma_size_) +1; + up_y = fRound(point.pt.y-smax*sigma_size_)-1; + down_y = fRound(point.pt.y+smax*sigma_size_)+1; + + if (left_x < 0 || right_x >= evolution_[i].Ldet.cols || + up_y < 0 || down_y >= evolution_[i].Ldet.rows) { + is_out = true; + } + + if (is_out == false) { + if (is_repeated == false) { + point.pt.x *= ratio; + point.pt.y *= ratio; + kpts.push_back(point); + npoints++; + } + else { + point.pt.x *= ratio; + point.pt.y *= ratio; + kpts[id_repeated] = point; + } + } // if is_out + } //if is_extremum + } + } // for jx + } // for ix + } // for i + + t2 = getTickCount(); + textrema_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs subpixel refinement of the detected keypoints + * @param kpts Vector of detected keypoints +*/ +void AKAZE::Do_Subpixel_Refinement(std::vector& kpts) { + + double t1 = 0.0, t2 = 0.0; + float Dx = 0.0, Dy = 0.0, ratio = 0.0; + float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; + int x = 0, y = 0; + Mat A = Mat::zeros(2,2,CV_32F); + Mat b = Mat::zeros(2,1,CV_32F); + Mat dst = Mat::zeros(2,1,CV_32F); + + t1 = getTickCount(); + + for (size_t i = 0; i < kpts.size(); i++) { + ratio = pow(2.f,kpts[i].octave); + x = fRound(kpts[i].pt.x/ratio); + y = fRound(kpts[i].pt.y/ratio); + + // Compute the gradient + Dx = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x+1) + -*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x-1)); + Dy = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x) + -*(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x)); + + // Compute the Hessian + Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x+1) + + *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x-1) + -2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + + Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x) + + *(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x) + -2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + + Dxy = (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x+1) + +(*(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x-1))) + -(0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x+1) + +(*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x-1))); + + // Solve the linear system + *(A.ptr(0)) = Dxx; + *(A.ptr(1)+1) = Dyy; + *(A.ptr(0)+1) = *(A.ptr(1)) = Dxy; + *(b.ptr(0)) = -Dx; + *(b.ptr(1)) = -Dy; + + solve(A,b,dst,DECOMP_LU); + + if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0) { + kpts[i].pt.x = x + (*(dst.ptr(0))); + kpts[i].pt.y = y + (*(dst.ptr(1))); + kpts[i].pt.x *= powf(2.f,evolution_[kpts[i].class_id].octave); + kpts[i].pt.y *= powf(2.f,evolution_[kpts[i].class_id].octave); + kpts[i].angle = 0.0; + + // In OpenCV the size of a keypoint its the diameter + kpts[i].size *= 2.0; + } + // Delete the point since its not stable + else { + kpts.erase(kpts.begin()+i); + i--; + } + } + + t2 = getTickCount(); + tsubpixel_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs feature suppression based on 2D distance + * @param kpts Vector of keypoints + * @param mdist Maximum distance in pixels +*/ +void AKAZE::Feature_Suppression_Distance(std::vector& kpts, float mdist) { + + vector aux; + vector to_delete; + float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; + bool found = false; + + for (size_t i = 0; i < kpts.size(); i++) { + x1 = kpts[i].pt.x; + y1 = kpts[i].pt.y; + for (size_t j = i+1; j < kpts.size(); j++) { + x2 = kpts[j].pt.x; + y2 = kpts[j].pt.y; + dist = sqrt(pow(x1-x2,2)+pow(y1-y2,2)); + if (dist < mdist) { + if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { + to_delete.push_back(j); + } + else { + to_delete.push_back(i); + break; + } + } + } + } + + for (size_t i = 0; i < kpts.size(); i++) { + found = false; + for (size_t j = 0; j < to_delete.size(); j++) { + if (i == (size_t)(to_delete[j])) { + found = true; + break; + } + } + if (found == false) { + aux.push_back(kpts[i]); + } + } + + kpts.clear(); + kpts = aux; + aux.clear(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the set of descriptors through the nonlinear scale space + * @param kpts Vector of detected keypoints + * @param desc Matrix to store the descriptors +*/ +void AKAZE::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) { + + double t1 = 0.0, t2 = 0.0; + + t1 = getTickCount(); + + // Allocate memory for the matrix with the descriptors + if (descriptor_ < MLDB_UPRIGHT) { + desc = cv::Mat::zeros(kpts.size(),64,CV_32FC1); + } + else { + // We use the full length binary descriptor -> 486 bits + if (descriptor_size_ == 0) { + int t = (6+36+120)*descriptor_channels_; + desc = cv::Mat::zeros(kpts.size(),ceil(t/8.),CV_8UC1); + } + else { + // We use the random bit selection length binary descriptor + desc = cv::Mat::zeros(kpts.size(),ceil(descriptor_size_/8.),CV_8UC1); + } + } + + switch (descriptor_) + { + case SURF_UPRIGHT : // Upright descriptors, not invariant to rotation + { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Get_SURF_Descriptor_Upright_64(kpts[i],desc.ptr(i)); + } + } + break; + case SURF : + { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_SURF_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + break; + case MSURF_UPRIGHT : // Upright descriptors, not invariant to rotation + { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Get_MSURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + break; + case MSURF : + { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_MSURF_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + break; + case MLDB_UPRIGHT : // Upright descriptors, not invariant to rotation + { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + if (descriptor_size_ == 0) + Get_Upright_MLDB_Full_Descriptor(kpts[i],desc.ptr(i)); + else + Get_Upright_MLDB_Descriptor_Subset(kpts[i],desc.ptr(i)); + } + } + break; + case MLDB : + { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + if (descriptor_size_ == 0) + Get_MLDB_Full_Descriptor(kpts[i],desc.ptr(i)); + else + Get_MLDB_Descriptor_Subset(kpts[i],desc.ptr(i)); + } + } + break; + } + + t2 = getTickCount(); + tdescriptor_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the main orientation for a given keypoint + * @param kpt Input keypoint + * @note The orientation is computed using a similar approach as described in the + * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 +*/ +void AKAZE::Compute_Main_Orientation_SURF(cv::KeyPoint& kpt) { + + int ix = 0, iy = 0, idx = 0, s = 0, level = 0; + float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; + std::vector resX(109), resY(109), Ang(109); + const int id[] = {6,5,4,3,2,1,0,1,2,3,4,5,6}; + + // Variables for computing the dominant direction + float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; + + // Get the information from the keypoint + level = kpt.class_id; + ratio = (float)(1<(iy)+ix)); + resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); + + Ang[idx] = get_angle(resX[idx],resY[idx]); + ++idx; + } + } + } + + // Loop slides pi/3 window around feature point + for (ang1 = 0; ang1 < 2.0*CV_PI; ang1+=0.15f) { + ang2 =(ang1+CV_PI/3.0f > 2.0*CV_PI ? ang1-5.0f*CV_PI/3.0f : ang1+CV_PI/3.0f); + sumX = sumY = 0.f; + + for (size_t k = 0; k < Ang.size(); ++k) { + // Get angle from the x-axis of the sample point + const float & ang = Ang[k]; + + // Determine whether the point is within the window + if (ang1 < ang2 && ang1 < ang && ang < ang2) { + sumX+=resX[k]; + sumY+=resY[k]; + } + else if (ang2 < ang1 && + ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI) )) { + sumX+=resX[k]; + sumY+=resY[k]; + } + } + + // if the vector produced from this window is longer than all + // previous vectors then this forms the new dominant direction + if (sumX*sumX + sumY*sumY > max) { + // store largest orientation + max = sumX*sumX + sumY*sumY; + kpt.angle = get_angle(sumX, sumY); + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the upright descriptor of the provided keypoint + * @param kpt Input keypoint + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional + * Gaussian weighting is performed. The descriptor is inspired from Bay et al., + * Speeded Up Robust Features, ECCV, 2006 +*/ +void AKAZE::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) { + + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int scale = 0, dsize = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + ratio = (float)(1<(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } +} + +//************************************************************************************* +//************************************************************************************* +/** + * @brief This method computes the descriptor of the provided keypoint given the + * main orientation + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional + * Gaussian weighting is performed. The descriptor is inspired from Bay et al., + * Speeded Up Robust Features, ECCV, 2006 +*/ +void AKAZE::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { + + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int scale = 0, dsize = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + ratio = (float)(1<(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the upright descriptor (not rotation invariant) of + * the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 +*/ +void AKAZE::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { + + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int scale = 0, dsize = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + ratio = (float)(1<(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + rx = gauss_s1*rx; + ry = gauss_s1*ry; + + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); + + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; + } + + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the descriptor of the provided keypoint given the + * main orientation of the keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 +*/ +void AKAZE::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { + + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; + int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + int scale = 0, dsize = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + ratio = (float)(1<(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Get the x and y derivatives on the rotated axis + rry = gauss_s1*(rx*co + ry*si); + rrx = gauss_s1*(-rx*si + ry*co); + + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; + } + + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the rupright descriptor (not rotation invariant) of + * the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector +*/ +void AKAZE::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { + + float di = 0.0, dx = 0.0, dy = 0.0; + float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, ratio = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int level = 0, nsamples = 0, scale = 0; + int dcount1 = 0, dcount2 = 0; + + // Matrices for the M-LDB descriptor + Mat values_1 = Mat::zeros(4,descriptor_channels_,CV_32FC1); + Mat values_2 = Mat::zeros(9,descriptor_channels_,CV_32FC1); + Mat values_3 = Mat::zeros(16,descriptor_channels_,CV_32FC1); + + // Get the information from the keypoint + ratio = (float)(1<(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_1.ptr(dcount2)) = di; + *(values_1.ptr(dcount2)+1) = dx; + *(values_1.ptr(dcount2)+2) = dy; + dcount2++; + } + } + + // Do binary comparison first level + for(int i = 0; i < 4; i++) { + for (int j = i+1; j < 4; j++) { + if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + + if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + + if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + + // Second 3x3 grid + sample_step = ceil(pattern_size*2./3.); + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + di=dx=dy=0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_2.ptr(dcount2)) = di; + *(values_2.ptr(dcount2)+1) = dx; + *(values_2.ptr(dcount2)+2) = dy; + dcount2++; + } + } + + dcount2 = 0; + //Do binary comparison second level + for (int i = 0; i < 9; i++) { + for (int j = i+1; j < 9; j++) { + if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + + if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + + if(*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + + // Third 4x4 grid + sample_step = pattern_size/2; + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + di=dx=dy=0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_3.ptr(dcount2)) = di; + *(values_3.ptr(dcount2)+1) = dx; + *(values_3.ptr(dcount2)+2) = dy; + dcount2++; + } + } + + dcount2 = 0; + //Do binary comparison third level + for (int i = 0; i < 16; i++) { + for (int j = i+1; j < 16; j++) { + if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + + if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + + if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the descriptor of the provided keypoint given the + * main orientation of the keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector +*/ +void AKAZE::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { + + float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; + float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int level = 0, nsamples = 0, scale = 0; + int dcount1 = 0, dcount2 = 0; + + // Matrices for the M-LDB descriptor + Mat values_1 = Mat::zeros(4,descriptor_channels_,CV_32FC1); + Mat values_2 = Mat::zeros(9,descriptor_channels_,CV_32FC1); + Mat values_3 = Mat::zeros(16,descriptor_channels_,CV_32FC1); + + // Get the information from the keypoint + ratio = (float)(1<(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + + di += ri; + + if (descriptor_channels_ == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (descriptor_channels_ == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_1.ptr(dcount2)) = di; + if ( descriptor_channels_ > 1 ) { + *(values_1.ptr(dcount2)+1) = dx; + } + + if ( descriptor_channels_ > 2 ) { + *(values_1.ptr(dcount2)+2) = dy; + } + + dcount2++; + } + } + + // Do binary comparison first level + for (int i = 0; i < 4; i++) { + for (int j = i+1; j < 4; j++) { + if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + + if (descriptor_channels_ > 1) { + for (int i = 0; i < 4; i++) { + for (int j = i+1; j < 4; j++) { + if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + + dcount1++; + } + } + } + + if (descriptor_channels_ > 2) { + for (int i = 0; i < 4; i++) { + for ( int j = i+1; j < 4; j++) { + if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + } + + // Second 3x3 grid + sample_step = ceil(pattern_size*2./3.); + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + + di=dx=dy=0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + di += ri; + + if (descriptor_channels_ == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (descriptor_channels_ == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_2.ptr(dcount2)) = di; + if (descriptor_channels_ > 1) { + *(values_2.ptr(dcount2)+1) = dx; + } + + if (descriptor_channels_ > 2) { + *(values_2.ptr(dcount2)+2) = dy; + } + + dcount2++; + } + } + + // Do binary comparison second level + for (int i = 0; i < 9; i++) { + for (int j = i+1; j < 9; j++) { + if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + + if (descriptor_channels_ > 1) { + for (int i = 0; i < 9; i++) { + for (int j = i+1; j < 9; j++) { + if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + } + + if (descriptor_channels_ > 2) { + for (int i = 0; i < 9; i++) { + for (int j = i+1; j < 9; j++) { + if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + } + + // Third 4x4 grid + sample_step = pattern_size/2; + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + di=dx=dy=0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + di += ri; + + if (descriptor_channels_ == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (descriptor_channels_ == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_3.ptr(dcount2)) = di; + if (descriptor_channels_ > 1) { + *(values_3.ptr(dcount2)+1) = dx; + } + + if (descriptor_channels_ > 2) { + *(values_3.ptr(dcount2)+2) = dy; + } + + dcount2++; + } + } + + // Do binary comparison third level + for(int i = 0; i < 16; i++) { + for(int j = i+1; j < 16; j++) { + if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + + if (descriptor_channels_ > 1) { + for (int i = 0; i < 16; i++) { + for (int j = i+1; j < 16; j++) { + if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + } + + if (descriptor_channels_ > 2) + { + for (int i = 0; i < 16; i++) { + for (int j = i+1; j < 16; j++) { + if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { + desc[dcount1/8] |= (1<<(dcount1%8)); + } + dcount1++; + } + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the M-LDB descriptor of the provided keypoint given the + * main orientation of the keypoint. The descriptor is computed based on a subset of + * the bits of the whole descriptor + * @param kpt Input keypoint + * @param desc Descriptor vector +*/ +void AKAZE::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { + + float di, dx, dy; + float rx, ry; + float sample_x = 0.f, sample_y = 0.f; + int x1 = 0, y1 = 0; + + // Get the information from the keypoint + float ratio = (float)(1<::zeros((4+9+16)*descriptor_channels_,1); + + // Sample everything, but only do the comparisons + vector steps(3); + steps.at(0) = descriptor_pattern_size_; + steps.at(1) = ceil(2.f*descriptor_pattern_size_/3.f); + steps.at(2) = descriptor_pattern_size_/2; + + for (int i=0; i(i); + int sample_step = steps.at(coords[0]); + di=0.0f; + dx=0.0f; + dy=0.0f; + + for (int k = coords[1]; k < coords[1] + sample_step; k++) { + for (int l = coords[2]; l < coords[2] + sample_step; l++) { + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + di += *(evolution_[level].Lt.ptr(y1)+x1); + + if (descriptor_channels_ > 1) { + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + + if (descriptor_channels_ == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (descriptor_channels_ == 3) { + // Get the x and y derivatives on the rotated axis + dx += rx*co + ry*si; + dy += -rx*si + ry*co; + } + } + } + } + + *(values.ptr(descriptor_channels_*i)) = di; + + if (descriptor_channels_ == 2) { + *(values.ptr(descriptor_channels_*i+1)) = dx; + } + else if (descriptor_channels_ == 3) { + *(values.ptr(descriptor_channels_*i+1)) = dx; + *(values.ptr(descriptor_channels_*i+2)) = dy; + } + } + + // Do the comparisons + const float *vals = values.ptr(0); + const int *comps = descriptorBits_.ptr(0); + + for (int i=0; i vals[comps[2*i +1]]) { + desc[i/8] |= (1<<(i%8)); + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the upright (not rotation invariant) M-LDB descriptor + * of the provided keypoint given the main orientation of the keypoint. + * The descriptor is computed based on a subset of the bits of the whole descriptor + * @param kpt Input keypoint + * @param desc Descriptor vector +*/ +void AKAZE::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { + + float di = 0.0f, dx = 0.0f, dy = 0.0f; + float rx = 0.0f, ry = 0.0f; + float sample_x = 0.0f, sample_y = 0.0f; + int x1 = 0, y1 = 0; + + // Get the information from the keypoint + float ratio = (float)(1<::zeros((4+9+16)*descriptor_channels_,1); + + vector steps(3); + steps.at(0) = descriptor_pattern_size_; + steps.at(1) = ceil(2.f*descriptor_pattern_size_/3.f); + steps.at(2) = descriptor_pattern_size_/2; + + for (int i=0; i < descriptorSamples_.rows; i++) { + int *coords = descriptorSamples_.ptr(i); + int sample_step = steps.at(coords[0]); + di=0.0f; + dx=0.0f; + dy=0.0f; + + for (int k = coords[1]; k < coords[1] + sample_step; k++) { + for (int l = coords[2]; l < coords[2] + sample_step; l++) { + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + di += *(evolution_[level].Lt.ptr(y1)+x1); + + if (descriptor_channels_ > 1) { + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + + if (descriptor_channels_ == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (descriptor_channels_ == 3) { + dx += rx; + dy += ry; + } + } + } + } + + *(values.ptr(descriptor_channels_*i)) = di; + + if (descriptor_channels_ == 2) { + *(values.ptr(descriptor_channels_*i+1)) = dx; + } + else if (descriptor_channels_ == 3) { + *(values.ptr(descriptor_channels_*i+1)) = dx; + *(values.ptr(descriptor_channels_*i+2)) = dy; + } + } + + // Do the comparisons + const float *vals = values.ptr(0); + const int *comps = descriptorBits_.ptr(0); + + for (int i=0; i vals[comps[2*i +1]]) { + desc[i/8] |= (1<<(i%8)); + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method displays the computation times +*/ +void AKAZE::Show_Computation_Times(void) { + + cout << "(*) Time Scale Space: " << tscale_ << endl; + cout << "(*) Time Detector: " << tdetector_ << endl; + cout << " - Time Derivatives: " << tderivatives_ << endl; + cout << " - Time Extrema: " << textrema_ << endl; + cout << " - Time Subpixel: " << tsubpixel_ << endl; + cout << "(*) Time Descriptor: " << tdescriptor_ << endl; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes a (quasi-random) list of bits to be taken + * from the full descriptor. To speed the extraction, the function creates + * a list of the samples that are involved in generating at least a bit (sampleList) + * and a list of the comparisons between those samples (comparisons) + * @param sampleList + * @param comparisons The matrix with the binary comparisons + * @param nbits The number of bits of the descriptor + * @param pattern_size The pattern size for the binary descriptor + * @param nchannels Number of channels to consider in the descriptor (1-3) + * @note The function keeps the 18 bits (3-channels by 6 comparisons) of the + * coarser grid, since it provides the most robust estimations + */ +void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int nbits, + int pattern_size, int nchannels) { + + int ssz = 0; + for (int i=0; i<3; i++) { + int gz = (i+2)*(i+2); + ssz += gz*(gz-1)/2; + } + ssz *= nchannels; + + CV_Assert(nbits<=ssz && "descriptor size can't be bigger than full descriptor"); + + // Since the full descriptor is usually under 10k elements, we pick + // the selection from the full matrix. We take as many samples per + // pick as the number of channels. For every pick, we + // take the two samples involved and put them in the sampling list + + Mat_ fullM(ssz/nchannels,5); + for (size_t i=0, c=0; i<3; i++) { + int gdiv = i+2; //grid divisions, per row + int gsz = gdiv*gdiv; + int psz = ceil(2.*pattern_size/(float)gdiv); + + for (int j=0; j comps = Mat_(nchannels*ceil(nbits/(float)nchannels),2); + comps = 1000; + + // Select some samples. A sample includes all channels + int count =0; + size_t npicks = ceil(nbits/(float)nchannels); + Mat_ samples(29,3); + Mat_ fullcopy = fullM.clone(); + samples = -1; + + for (size_t i=0; i= 0 && y >= 0) { + return atanf(y/x); + } + + if (x < 0 && y >= 0) { + return CV_PI - atanf(-y/x); + } + + if (x < 0 && y < 0) { + return CV_PI + atanf(y/x); + } + + if(x >= 0 && y < 0) { + return 2.0*CV_PI - atanf(-y/x); + } + + return 0; +} + +//************************************************************************************** +//************************************************************************************** + +/** + * @brief This function computes the value of a 2D Gaussian function + * @param x X Position + * @param y Y Position + * @param sig Standard Deviation +*/ +inline float gaussian(float x, float y, float sigma) { + + return expf(-(x*x+y*y)/(2.0f*sigma*sigma)); +} + +//************************************************************************************** +//************************************************************************************** + +/** + * @brief This function checks descriptor limits + * @param x X Position + * @param y Y Position + * @param width Image width + * @param height Image height +*/ +inline void check_descriptor_limits(int &x, int &y, const int width, const int height) { + + if (x < 0) { + x = 0; + } + + if (y < 0) { + y = 0; + } + + if (x > width-1) { + x = width-1; + } + + if (y > height-1) { + y = height-1; + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This funtion rounds float to nearest integer + * @param flt Input float + * @return dst Nearest integer + */ +inline int fRound(float flt) +{ + return (int)(flt+0.5f); +} + diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZE.h new file mode 100644 index 0000000000..fd1ec07fa3 --- /dev/null +++ b/modules/features2d/src/akaze/AKAZE.h @@ -0,0 +1,175 @@ +/** + * @file AKAZE.h + * @brief Main class for detecting and computing binary descriptors in an + * accelerated nonlinear scale space + * @date Mar 27, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ + +#ifndef _AKAZE_H_ +#define _AKAZE_H_ + +//************************************************************************************* +//************************************************************************************* + +// Includes +#include "config.h" +#include "fed.h" +#include "utils.h" +#include "nldiffusion_functions.h" + +//************************************************************************************* +//************************************************************************************* + +// AKAZE Class Declaration +class AKAZE { + +private: + + // Parameters of the AKAZE class + int omax_; // Maximum octave level + int noctaves_; // Number of octaves + int nsublevels_; // Number of sublevels per octave level + int img_width_; // Width of the original image + int img_height_; // Height of the original image + float soffset_; // Base scale offset + float factor_size_; // Factor for the multiscale derivatives + float sderivatives_; // Standard deviation of the Gaussian for the nonlinear diff. derivatives + float kcontrast_; // The contrast parameter for the scalar nonlinear diffusion + float dthreshold_; // Feature detector threshold response + int diffusivity_; // Diffusivity type, 0->PM G1, 1->PM G2, 2-> Weickert, 3->Charbonnier + int descriptor_; // Descriptor mode: + // 0-> SURF_UPRIGHT, 1->SURF + // 2-> M-SURF_UPRIGHT, 3->M-SURF + // 4-> M-LDB_UPRIGHT, 5->M-LDB + int descriptor_size_; // Size of the descriptor in bits. Use 0 for the full descriptor + int descriptor_pattern_size_; // Size of the pattern. Actual size sampled is 2*pattern_size + int descriptor_channels_; // Number of channels to consider in the M-LDB descriptor + bool save_scale_space_; // For saving scale space images + bool verbosity_; // Verbosity level + std::vector evolution_; // Vector of nonlinear diffusion evolution + + // FED parameters + int ncycles_; // Number of cycles + bool reordering_; // Flag for reordering time steps + std::vector > tsteps_; // Vector of FED dynamic time steps + std::vector nsteps_; // Vector of number of steps per cycle + + // Some matrices for the M-LDB descriptor computation + cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + cv::Mat descriptorBits_; + cv::Mat bitMask_; + + // Computation times variables in ms + double tkcontrast_; // Kcontrast factor computation + double tscale_; // Nonlinear Scale space generation + double tderivatives_; // Multiscale derivatives + double tdetector_; // Feature detector + double textrema_; // Scale Space extrema + double tsubpixel_; // Subpixel refinement + double tdescriptor_; // Feature descriptors + +public: + + // Constructor + AKAZE(const AKAZEOptions &options); + + // Destructor + ~AKAZE(void); + + // Setters + void Set_Octave_Max(const int& omax) { + omax_ = omax; + } + void Set_NSublevels(const int& nsublevels) { + nsublevels_ = nsublevels; + } + void Set_Save_Scale_Space_Flag(const bool& save_scale_space) { + save_scale_space_ = save_scale_space; + } + void Set_Image_Width(const int& img_width) { + img_width_ = img_width; + } + void Set_Image_Height(const int& img_height) { + img_height_ = img_height; + } + + // Getters + int Get_Image_Width(void) { + return img_width_; + } + int Get_Image_Height(void) { + return img_height_; + } + double Get_Time_KContrast(void) { + return tkcontrast_; + } + double Get_Time_Scale_Space(void) { + return tscale_; + } + double Get_Time_Derivatives(void) { + return tderivatives_; + } + double Get_Time_Detector(void) { + return tdetector_; + } + double Get_Time_Descriptor(void) { + return tdescriptor_; + } + + // Scale Space methods + void Allocate_Memory_Evolution(void); + int Create_Nonlinear_Scale_Space(const cv::Mat& img); + void Feature_Detection(std::vector& kpts); + void Compute_Determinant_Hessian_Response(void); + void Compute_Multiscale_Derivatives(void); + void Find_Scale_Space_Extrema(std::vector& kpts); + void Do_Subpixel_Refinement(std::vector& kpts); + void Feature_Suppression_Distance(std::vector& kpts, float mdist); + + // Feature description methods + void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); + void Compute_Main_Orientation_SURF(cv::KeyPoint& kpt); + + // SURF Pattern Descriptor + void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc); + void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc); + + // M-SURF Pattern Descriptor + void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc); + void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc); + + // M-LDB Pattern Descriptor + void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc); + void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc); + void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc); + void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc); + + // Methods for saving some results and showing computation times + void Save_Scale_Space(void); + void Save_Detector_Responses(void); + void Show_Computation_Times(void); +}; + +//************************************************************************************* +//************************************************************************************* + +// Inline functions +/** + * @brief This function sets default parameters for the A-KAZE detector. + * @param options AKAZE options + */ +void setDefaultAKAZEOptions(AKAZEOptions& options); + +// Inline functions +void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, + int nbits, int pattern_size, int nchannels); +float get_angle(float x, float y); +float gaussian(float x, float y, float sigma); +void check_descriptor_limits(int& x, int& y, const int width, const int height); +int fRound(float flt); + +//************************************************************************************* +//************************************************************************************* + +#endif diff --git a/modules/features2d/src/akaze/config.h b/modules/features2d/src/akaze/config.h new file mode 100644 index 0000000000..331c89275f --- /dev/null +++ b/modules/features2d/src/akaze/config.h @@ -0,0 +1,155 @@ +#ifndef _CONFIG_H_ +#define _CONFIG_H_ + +// STL +#include +#include +#include +#include +#include + +// OpenCV +#include "precomp.hpp" + +// OpenMP +#ifdef _OPENMP +# include +#endif + +// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right +const float gauss25[7][7] = { + {0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f}, + {0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f}, + {0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f}, + {0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f}, + {0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f}, + {0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f}, + {0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f} +}; + + +// Scale Space parameters +const float DEFAULT_SCALE_OFFSET = 1.60f; // Base scale offset (sigma units) +const float DEFAULT_FACTOR_SIZE = 1.5f; // Factor for the multiscale derivatives +const int DEFAULT_OCTAVE_MIN = 0; // Initial octave level (-1 means that the size of the input image is duplicated) +const int DEFAULT_OCTAVE_MAX = 4; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) +const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level +const int DEFAULT_DIFFUSIVITY_TYPE = 1; +const float KCONTRAST_PERCENTILE = 0.7f; +const int KCONTRAST_NBINS = 300; +const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0f; +const float DEFAULT_KCONTRAST = .01f; + + +// Detector Parameters +const float DEFAULT_DETECTOR_THRESHOLD = 0.001f; // Detector response threshold to accept point +const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001f; // Minimum Detector response threshold to accept point +const int DEFAULT_LDB_DESCRIPTOR_SIZE = 0; // Use 0 for the full descriptor, or the number of bits +const int DEFAULT_LDB_PATTERN_SIZE = 10; // Actual patch size is 2*pattern_size*point.scale; +const int DEFAULT_LDB_CHANNELS = 3; + +// Descriptor Parameters +enum DESCRIPTOR_TYPE +{ + SURF_UPRIGHT = 0, // Upright descriptors, not invariant to rotation + SURF = 1, + MSURF_UPRIGHT = 2, // Upright descriptors, not invariant to rotation + MSURF = 3, + MLDB_UPRIGHT = 4, // Upright descriptors, not invariant to rotation + MLDB = 5 +}; + +const int DEFAULT_DESCRIPTOR = MLDB; + +// Some debugging options +const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images +const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) +const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios +const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints + +// Options structure +struct AKAZEOptions +{ + int omin; + int omax; + int nsublevels; + int img_width; + int img_height; + int diffusivity; + float soffset; + float sderivatives; + float dthreshold; + float dthreshold2; + int descriptor; + int descriptor_size; + int descriptor_channels; + int descriptor_pattern_size; + bool save_scale_space; + bool save_keypoints; + bool verbosity; + + AKAZEOptions() + { + // Load the default options + soffset = DEFAULT_SCALE_OFFSET; + omax = DEFAULT_OCTAVE_MAX; + nsublevels = DEFAULT_NSUBLEVELS; + dthreshold = DEFAULT_DETECTOR_THRESHOLD; + diffusivity = DEFAULT_DIFFUSIVITY_TYPE; + descriptor = DEFAULT_DESCRIPTOR; + descriptor_size = DEFAULT_LDB_DESCRIPTOR_SIZE; + descriptor_channels = DEFAULT_LDB_CHANNELS; + descriptor_pattern_size = DEFAULT_LDB_PATTERN_SIZE; + sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; + save_scale_space = DEFAULT_SAVE_SCALE_SPACE; + save_keypoints = DEFAULT_SAVE_KEYPOINTS; + verbosity = DEFAULT_VERBOSITY; + } + + friend std::ostream& operator<<(std::ostream& os, + const AKAZEOptions& akaze_options) + { + os << std::left; +#define CHECK_AKAZE_OPTION(option) \ + os << std::setw(33) << #option << " = " << option << std::endl + + // Scale-space parameters. + CHECK_AKAZE_OPTION(akaze_options.omax); + CHECK_AKAZE_OPTION(akaze_options.nsublevels); + CHECK_AKAZE_OPTION(akaze_options.soffset); + CHECK_AKAZE_OPTION(akaze_options.sderivatives); + CHECK_AKAZE_OPTION(akaze_options.diffusivity); + // Detection parameters. + CHECK_AKAZE_OPTION(akaze_options.dthreshold); + // Descriptor parameters. + CHECK_AKAZE_OPTION(akaze_options.descriptor); + CHECK_AKAZE_OPTION(akaze_options.descriptor_channels); + CHECK_AKAZE_OPTION(akaze_options.descriptor_size); + // Save scale-space + CHECK_AKAZE_OPTION(akaze_options.save_scale_space); + // Verbose option for debug. + CHECK_AKAZE_OPTION(akaze_options.verbosity); +#undef CHECK_AKAZE_OPTIONS + + return os; + } +}; + +struct tevolution +{ + cv::Mat Lx, Ly; // First order spatial derivatives + cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives + cv::Mat Lflow; // Diffusivity image + cv::Mat Lt; // Evolution image + cv::Mat Lsmooth; // Smoothed image + cv::Mat Lstep; // Evolution step update + cv::Mat Ldet; // Detector response + float etime; // Evolution time + float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 + int octave; // Image octave + int sublevel; // Image sublevel in each octave + int sigma_size; // Integer sigma. For computing the feature detector responses +}; + + +#endif \ No newline at end of file diff --git a/modules/features2d/src/akaze/fed.h b/modules/features2d/src/akaze/fed.h new file mode 100644 index 0000000000..4ac82f68e3 --- /dev/null +++ b/modules/features2d/src/akaze/fed.h @@ -0,0 +1,26 @@ +#ifndef FED_H +#define FED_H + +//****************************************************************************** +//****************************************************************************** + +// Includes +#include +#include + +//************************************************************************************* +//************************************************************************************* + +// Declaration of functions +int fed_tau_by_process_time(const float& T, const int& M, const float& tau_max, + const bool& reordering, std::vector& tau); +int fed_tau_by_cycle_time(const float& t, const float& tau_max, + const bool& reordering, std::vector &tau) ; +int fed_tau_internal(const int& n, const float& scale, const float& tau_max, + const bool& reordering, std::vector &tau); +bool fed_is_prime_internal(const int& number); + +//************************************************************************************* +//************************************************************************************* + +#endif // FED_H diff --git a/modules/features2d/src/akaze/nldiffusion_functions.cpp b/modules/features2d/src/akaze/nldiffusion_functions.cpp new file mode 100644 index 0000000000..0699e92ca0 --- /dev/null +++ b/modules/features2d/src/akaze/nldiffusion_functions.cpp @@ -0,0 +1,431 @@ +//============================================================================= +// +// nldiffusion_functions.cpp +// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2) +// Institutions: Georgia Institute of Technology (1) +// TrueVision Solutions (2) +// Date: 15/09/2013 +// Email: pablofdezalc@gmail.com +// +// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo +// All Rights Reserved +// See LICENSE for the license information +//============================================================================= + +/** + * @file nldiffusion_functions.cpp + * @brief Functions for nonlinear diffusion filtering applications + * @date Sep 15, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ + +#include "nldiffusion_functions.h" + +using namespace std; +using namespace cv; + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function smoothes an image with a Gaussian kernel + * @param src Input image + * @param dst Output image + * @param ksize_x Kernel size in X-direction (horizontal) + * @param ksize_y Kernel size in Y-direction (vertical) + * @param sigma Kernel standard deviation + */ +void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksize_x, + const size_t& ksize_y, const float& sigma) { + + size_t ksize_x_ = 0, ksize_y_ = 0; + + // Compute an appropriate kernel size according to the specified sigma + if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { + ksize_x_ = ceil(2.0*(1.0 + (sigma-0.8)/(0.3))); + ksize_y_ = ksize_x_; + } + + // The kernel size must be and odd number + if ((ksize_x_ % 2) == 0) { + ksize_x_ += 1; + } + + if ((ksize_y_ % 2) == 0) { + ksize_y_ += 1; + } + + // Perform the Gaussian Smoothing with border replication + GaussianBlur(src,dst,Size(ksize_x_,ksize_y_),sigma,sigma,BORDER_REPLICATE); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes image derivatives with Scharr kernel + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @note Scharr operator approximates better rotation invariance than + * other stencils such as Sobel. See Weickert and Scharr, + * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, + * Journal of Visual Communication and Image Representation 2002 + */ +void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, + const size_t& xorder, const size_t& yorder) { + Scharr(src,dst,CV_32F,xorder,yorder,1.0,0,BORDER_DEFAULT); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes the Perona and Malik conductivity coefficient g1 + * g1 = exp(-|dL|^2/k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ +void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + exp(-(Lx.mul(Lx)+Ly.mul(Ly))/(k*k),dst); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes the Perona and Malik conductivity coefficient g2 + * g2 = 1 / (1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ +void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + dst = 1.0/(1.0+(Lx.mul(Lx)+Ly.mul(Ly))/(k*k)); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes Weickert conductivity coefficient gw + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ +void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + Mat modg; + pow((Lx.mul(Lx) + Ly.mul(Ly))/(k*k),4,modg); + cv::exp(-3.315/modg, dst); + dst = 1.0 - dst; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes Charbonnier conductivity coefficient gc + * gc = 1 / sqrt(1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ +void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + Mat den; + cv::sqrt(1.0+(Lx.mul(Lx)+Ly.mul(Ly))/(k*k),den); + dst = 1.0/ den; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes a good empirical value for the k contrast factor + * given an input image, the percentile (0-1), the gradient scale and the number of + * bins in the histogram + * @param img Input image + * @param perc Percentile of the image gradient histogram (0-1) + * @param gscale Scale for computing the image gradient histogram + * @param nbins Number of histogram bins + * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel + * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel + * @return k contrast factor + */ +float compute_k_percentile(const cv::Mat& img, const float& perc, const float& gscale, + const size_t& nbins, const size_t& ksize_x, const size_t& ksize_y) { + + size_t nbin = 0, nelements = 0, nthreshold = 0, k = 0; + float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; + float npoints = 0.0; + float hmax = 0.0; + + // Create the array for the histogram + float *hist = new float[nbins]; + + // Create the matrices + Mat gaussian = Mat::zeros(img.rows,img.cols,CV_32F); + Mat Lx = Mat::zeros(img.rows,img.cols,CV_32F); + Mat Ly = Mat::zeros(img.rows,img.cols,CV_32F); + + // Set the histogram to zero, just in case + for (size_t i = 0; i < nbins; i++) { + hist[i] = 0.0; + } + + // Perform the Gaussian convolution + gaussian_2D_convolution(img,gaussian,ksize_x,ksize_y,gscale); + + // Compute the Gaussian derivatives Lx and Ly + image_derivatives_scharr(gaussian,Lx,1,0); + image_derivatives_scharr(gaussian,Ly,0,1); + + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows-1; i++) { + for (int j = 1; j < gaussian.cols-1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); + + // Get the maximum + if (modg > hmax) { + hmax = modg; + } + } + } + + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows-1; i++) { + for (int j = 1; j < gaussian.cols-1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); + + // Find the correspondent bin + if (modg != 0.0) { + nbin = floor(nbins*(modg/hmax)); + + if (nbin == nbins) { + nbin--; + } + + hist[nbin]++; + npoints++; + } + } + } + + // Now find the perc of the histogram percentile + nthreshold = (size_t)(npoints*perc); + + for (k = 0; nelements < nthreshold && k < nbins; k++) { + nelements = nelements + hist[k]; + } + + if (nelements < nthreshold) { + kperc = 0.03; + } + else { + kperc = hmax*((float)(k)/(float)nbins); + } + + delete [] hist; + return kperc; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes Scharr image derivatives + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @param scale Scale factor for the derivative size + */ +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, const size_t& xorder, + const size_t& yorder, const size_t& scale) { + + Mat kx, ky; + compute_derivative_kernels(kx, ky, xorder,yorder,scale); + sepFilter2D(src,dst,CV_32F,kx,ky); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function performs a scalar non-linear diffusion step + * @param Ld2 Output image in the evolution + * @param c Conductivity image + * @param Lstep Previous image in the evolution + * @param stepsize The step size in time units + * @note Forward Euler Scheme 3x3 stencil + * The function c is a scalar value that depends on the gradient norm + * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy + */ +void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize) { + +#ifdef _OPENMP +#pragma omp parallel for schedule(dynamic) +#endif + for (int i = 1; i < Lstep.rows-1; i++) { + for (int j = 1; j < Lstep.cols-1; j++) { + float xpos = ((*(c.ptr(i)+j))+(*(c.ptr(i)+j+1)))*((*(Ld.ptr(i)+j+1))-(*(Ld.ptr(i)+j))); + float xneg = ((*(c.ptr(i)+j-1))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i)+j-1))); + + float ypos = ((*(c.ptr(i)+j))+(*(c.ptr(i+1)+j)))*((*(Ld.ptr(i+1)+j))-(*(Ld.ptr(i)+j))); + float yneg = ((*(c.ptr(i-1)+j))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i-1)+j))); + + *(Lstep.ptr(i)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + } + + for (int j = 1; j < Lstep.cols-1; j++) { + float xpos = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j+1)))*((*(Ld.ptr(0)+j+1))-(*(Ld.ptr(0)+j))); + float xneg = ((*(c.ptr(0)+j-1))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j-1))); + + float ypos = ((*(c.ptr(0)+j))+(*(c.ptr(1)+j)))*((*(Ld.ptr(1)+j))-(*(Ld.ptr(0)+j))); + float yneg = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j))); + + *(Lstep.ptr(0)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + for (int j = 1; j < Lstep.cols-1; j++) { + float xpos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j+1)))*((*(Ld.ptr(Lstep.rows-1)+j+1))-(*(Ld.ptr(Lstep.rows-1)+j))); + float xneg = ((*(c.ptr(Lstep.rows-1)+j-1))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j-1))); + + float ypos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j))); + float yneg = ((*(c.ptr(Lstep.rows-2)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-2)+j))); + + *(Lstep.ptr(Lstep.rows-1)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + for (int i = 1; i < Lstep.rows-1; i++) { + float xpos = ((*(c.ptr(i)))+(*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1))-(*(Ld.ptr(i)))); + float xneg = ((*(c.ptr(i)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i)))); + + float ypos = ((*(c.ptr(i)))+(*(c.ptr(i+1))))*((*(Ld.ptr(i+1)))-(*(Ld.ptr(i)))); + float yneg = ((*(c.ptr(i-1)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i-1)))); + + *(Lstep.ptr(i)) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + for (int i = 1; i < Lstep.rows-1; i++) { + float xpos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); + float xneg = ((*(c.ptr(i)+Lstep.cols-2))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-2))); + + float ypos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i+1)+Lstep.cols-1)))*((*(Ld.ptr(i+1)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); + float yneg = ((*(c.ptr(i-1)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i-1)+Lstep.cols-1))); + + *(Lstep.ptr(i)+Lstep.cols-1) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + Ld = Ld + Lstep; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function downsamples the input image with the kernel [1/4,1/2,1/4] + * @param img Input image to be downsampled + * @param dst Output image with half of the resolution of the input image + */ +void downsample_image(const cv::Mat& src, cv::Mat& dst) { + + int i1 = 0, j1 = 0, i2 = 0, j2 = 0; + + for (i1 = 1; i1 < src.rows; i1+=2) { + j2 = 0; + for (j1 = 1; j1 < src.cols; j1+=2) { + *(dst.ptr(i2)+j2) = 0.5*(*(src.ptr(i1)+j1))+0.25*(*(src.ptr(i1)+j1-1) + *(src.ptr(i1)+j1+1)); + j2++; + } + + i2++; + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function downsamples the input image using OpenCV resize + * @param img Input image to be downsampled + * @param dst Output image with half of the resolution of the input image + */ +void halfsample_image(const cv::Mat& src, cv::Mat& dst) { + + // Make sure the destination image is of the right size + CV_Assert(src.cols/2==dst.cols); + CV_Assert(src.rows / 2 == dst.rows); + resize(src,dst,dst.size(),0,0,cv::INTER_AREA); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief Compute Scharr derivative kernels for sizes different than 3 + * @param kx_ The derivative kernel in x-direction + * @param ky_ The derivative kernel in y-direction + * @param dx The derivative order in x-direction + * @param dy The derivative order in y-direction + * @param scale The kernel size + */ +void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, + const size_t& dx, const size_t& dy, const size_t& scale) { + + const int ksize = 3 + 2*(scale-1); + + // The usual Scharr kernel + if (scale == 1) { + getDerivKernels(kx_,ky_,dx,dy,0,true,CV_32F); + return; + } + + kx_.create(ksize,1,CV_32F,-1,true); + ky_.create(ksize,1,CV_32F,-1,true); + Mat kx = kx_.getMat(); + Mat ky = ky_.getMat(); + + float w = 10.0/3.0; + float norm = 1.0/(2.0*scale*(w+2.0)); + + for (int k = 0; k < 2; k++) { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + float kerI[1000]; + + for (int t = 0; trows, kernel->cols, CV_32F, &kerI[0]); + temp.copyTo(*kernel); + } +} diff --git a/modules/features2d/src/akaze/nldiffusion_functions.h b/modules/features2d/src/akaze/nldiffusion_functions.h new file mode 100644 index 0000000000..172fa25f3e --- /dev/null +++ b/modules/features2d/src/akaze/nldiffusion_functions.h @@ -0,0 +1,41 @@ +#ifndef _NLDIFFUSION_FUNCTIONS_H_ +#define _NLDIFFUSION_FUNCTIONS_H_ + +//****************************************************************************** +//****************************************************************************** + +// Includes +#include "precomp.hpp" + +// OpenMP Includes +#ifdef _OPENMP +# include +#endif + +//************************************************************************************* +//************************************************************************************* + +// Declaration of functions +void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksize_x, + const size_t& ksize_y, const float& sigma); +void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, + const size_t& xorder, const size_t& yorder); +void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); +void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); +void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); +void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); +float compute_k_percentile(const cv::Mat& img, const float& perc, const float& gscale, + const size_t& nbins, const size_t& ksize_x, const size_t& ksize_y); +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, const size_t& xorder, + const size_t& yorder, const size_t& scale); +void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize); +void downsample_image(const cv::Mat& src, cv::Mat& dst); +void halfsample_image(const cv::Mat& src, cv::Mat& dst); +void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, + const size_t& dx, const size_t& dy, const size_t& scale); + +//************************************************************************************* +//************************************************************************************* + + +#endif diff --git a/modules/features2d/src/akaze/utils.cpp b/modules/features2d/src/akaze/utils.cpp new file mode 100644 index 0000000000..eb14abcd51 --- /dev/null +++ b/modules/features2d/src/akaze/utils.cpp @@ -0,0 +1,196 @@ +//============================================================================= +// +// utils.cpp +// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2) +// Institutions: Georgia Institute of Technology (1) +// TrueVision Solutions (2) +// +// Date: 15/09/2013 +// Email: pablofdezalc@gmail.com +// +// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo +// All Rights Reserved +// See LICENSE for the license information +//============================================================================= + +/** + * @file utils.cpp + * @brief Some utilities functions + * @date Sep 15, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ + +#include "precomp.hpp" +#include "utils.h" + +// Namespaces +using namespace std; +using namespace cv; + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes the minimum value of a float image + * @param src Input image + * @param value Minimum value + */ +void compute_min_32F(const cv::Mat &src, float &value) { + + float aux = 1000.0; + + for (int i = 0; i < src.rows; i++) { + for (int j = 0; j < src.cols; j++) { + if (src.at(i,j) < aux) { + aux = src.at(i,j); + } + } + } + + value = aux; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes the maximum value of a float image + * @param src Input image + * @param value Maximum value + */ +void compute_max_32F(const cv::Mat &src, float &value) { + + float aux = 0.0; + + for (int i = 0; i < src.rows; i++) { + for (int j = 0; j < src.cols; j++) { + if (src.at(i,j) > aux) { + aux = src.at(i,j); + } + } + } + + value = aux; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function converts the scale of the input image prior to visualization + * @param src Input/Output image + * @param value Maximum value + */ +void convert_scale(cv::Mat &src) { + + float min_val = 0, max_val = 0; + + compute_min_32F(src,min_val); + + src = src - min_val; + + compute_max_32F(src,max_val); + src = src / max_val; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function copies the input image and converts the scale of the copied + * image prior visualization + * @param src Input image + * @param dst Output image + */ +void copy_and_convert_scale(const cv::Mat &src, cv::Mat dst) { + + float min_val = 0, max_val = 0; + + src.copyTo(dst); + compute_min_32F(dst,min_val); + + dst = dst - min_val; + + compute_max_32F(dst,max_val); + dst = dst / max_val; +} + +//************************************************************************************* +//************************************************************************************* + +const size_t length = string("--descriptor_channels").size() + 2; +static inline std::ostream& cout_help() +{ cout << setw(length); return cout; } + +static inline std::string toUpper(std::string s) +{ + std::transform(s.begin(), s.end(), s.begin(), ::toupper); + return s; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function shows the possible command line configuration options + */ +void show_input_options_help(int example) { + + fflush(stdout); + cout << "A-KAZE Features" << endl; + cout << "Usage: "; + if (example == 0) { + cout << "./akaze_features -i img.jpg [options]" << endl; + } + else if (example == 1) { + cout << "./akaze_match img1.jpg img2.pgm homography.txt [options]" << endl; + } + else if (example == 2) { + cout << "./akaze_compare img1.jpg img2.pgm homography.txt [options]" << endl; + } + + cout << endl; + cout_help() << "Options below are not mandatory. Unless specified, default arguments are used." << endl << endl; + // Justify on the left + cout << left; + // Generalities + cout_help() << "--help" << "Show the command line options" << endl; + cout_help() << "--verbose " << "Verbosity is required" << endl; + cout_help() << endl; + // Scale-space parameters + cout_help() << "--soffset" << "Base scale offset (sigma units)" << endl; + cout_help() << "--omax" << "Maximum octave of image evolution" << endl; + cout_help() << "--nsublevels" << "Number of sublevels per octave" << endl; + cout_help() << "--diffusivity" << "Diffusivity function. Possible values:" << endl; + cout_help() << " " << "0 -> Perona-Malik, g1 = exp(-|dL|^2/k^2)" << endl; + cout_help() << " " << "1 -> Perona-Malik, g2 = 1 / (1 + dL^2 / k^2)" << endl; + cout_help() << " " << "2 -> Weickert diffusivity" << endl; + cout_help() << " " << "3 -> Charbonnier diffusivity" << endl; + cout_help() << endl; + // Feature detection parameters. + cout_help() << "--dthreshold" << "Feature detector threshold response for keypoints" << endl; + cout_help() << " " << "(0.001 can be a good value)" << endl; + cout_help() << endl; + // Descriptor parameters. + cout_help() << "--descriptor" << "Descriptor Type. Possible values:" << endl; + cout_help() << " " << "0 -> SURF_UPRIGHT" << endl; + cout_help() << " " << "1 -> SURF" << endl; + cout_help() << " " << "2 -> M-SURF_UPRIGHT," << endl; + cout_help() << " " << "3 -> M-SURF" << endl; + cout_help() << " " << "4 -> M-LDB_UPRIGHT" << endl; + cout_help() << " " << "5 -> M-LDB" << endl; + + cout_help() << "--descriptor_channels " << "Descriptor Channels for M-LDB. Valid values: " << endl; + cout_help() << " " << "1 -> intensity" << endl; + cout_help() << " " << "2 -> intensity + gradient magnitude" << endl; + cout_help() << " " << "3 -> intensity + X and Y gradients" < show detection results." << endl; + cout_help() << " " << "0 -> don't show detection results" << endl; + cout_help() << endl; +} diff --git a/modules/features2d/src/akaze/utils.h b/modules/features2d/src/akaze/utils.h new file mode 100644 index 0000000000..894c836ed0 --- /dev/null +++ b/modules/features2d/src/akaze/utils.h @@ -0,0 +1,54 @@ + +#ifndef _UTILS_H_ +#define _UTILS_H_ + +//****************************************************************************** +//****************************************************************************** + +// OpenCV Includes +#include "precomp.hpp" + +// System Includes +#include +#include +#include +#include +#include +#include +#include + +//****************************************************************************** +//****************************************************************************** + +// Stringify common types such as int, double and others. +template +inline std::string to_string(const T& x) { + std::stringstream oss; + oss << x; + return oss.str(); +} + +//****************************************************************************** +//****************************************************************************** + +// Stringify and format integral types as follows: +// to_formatted_string( 1, 2) produces string: '01' +// to_formatted_string( 5, 2) produces string: '05' +// to_formatted_string( 19, 2) produces string: '19' +// to_formatted_string( 19, 3) produces string: '019' +template +inline std::string to_formatted_string(Integer x, int num_digits) { + std::stringstream oss; + oss << std::setfill('0') << std::setw(num_digits) << x; + return oss.str(); +} + +//****************************************************************************** +//****************************************************************************** + +void compute_min_32F(const cv::Mat& src, float& value); +void compute_max_32F(const cv::Mat& src, float& value); +void convert_scale(cv::Mat& src); +void copy_and_convert_scale(const cv::Mat& src, cv::Mat& dst); + +#endif diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZE.cpp new file mode 100644 index 0000000000..09246e1670 --- /dev/null +++ b/modules/features2d/src/kaze/KAZE.cpp @@ -0,0 +1,2801 @@ + +//============================================================================= +// +// KAZE.cpp +// Author: Pablo F. Alcantarilla +// Institution: University d'Auvergne +// Address: Clermont Ferrand, France +// Date: 21/01/2012 +// Email: pablofdezalc@gmail.com +// +// KAZE Features Copyright 2012, Pablo F. Alcantarilla +// All Rights Reserved +// See LICENSE for the license information +//============================================================================= + +/** + * @file KAZE.cpp + * @brief Main class for detecting and describing features in a nonlinear + * scale space + * @date Jan 21, 2012 + * @author Pablo F. Alcantarilla + */ + +#include "KAZE.h" + +// Namespaces +using namespace std; +using namespace cv; + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief KAZE constructor with input options + * @param options KAZE configuration options + * @note The constructor allocates memory for the nonlinear scale space +*/ +KAZE::KAZE(KAZEOptions& options) { + + soffset_ = options.soffset; + sderivatives_ = options.sderivatives; + omax_ = options.omax; + nsublevels_ = options.nsublevels; + save_scale_space_ = options.save_scale_space; + verbosity_ = options.verbosity; + img_width_ = options.img_width; + img_height_ = options.img_height; + dthreshold_ = options.dthreshold; + diffusivity_ = options.diffusivity; + descriptor_mode_ = options.descriptor; + use_fed_ = options.use_fed; + use_upright_ = options.upright; + use_extended_ = options.extended; + kcontrast_ = DEFAULT_KCONTRAST; + ncycles_ = 0; + reordering_ = true; + tkcontrast_ = 0.0; + tnlscale_ = 0.0; + tdetector_ = 0.0; + tmderivatives_ = 0.0; + tdresponse_ = 0.0; + tdescriptor_ = 0.0; + + // Now allocate memory for the evolution + Allocate_Memory_Evolution(); +} + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief KAZE destructor +*/ +KAZE::~KAZE(void) { + + evolution_.clear(); +} + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief This method allocates the memory for the nonlinear diffusion evolution +*/ +void KAZE::Allocate_Memory_Evolution(void) { + + // Allocate the dimension of the matrices for the evolution + for (int i = 0; i <= omax_-1; i++) { + for (int j = 0; j <= nsublevels_-1; j++) { + + TEvolution aux; + aux.Lx = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Ly = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Lxx = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Lxy = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Lyy = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Lflow = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Lt = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Lsmooth = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Lstep = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.Ldet = cv::Mat::zeros(img_height_,img_width_,CV_32F); + aux.esigma = soffset_*pow((float)2.0,(float)(j)/(float)(nsublevels_) + i); + aux.etime = 0.5*(aux.esigma*aux.esigma); + aux.sigma_size = fRound(aux.esigma); + aux.octave = i; + aux.sublevel = j; + evolution_.push_back(aux); + } + } + + // Allocate memory for the FED number of cycles and time steps + if (use_fed_) { + for (size_t i = 1; i < evolution_.size(); i++) { + int naux = 0; + vector tau; + float ttime = 0.0; + ttime = evolution_[i].etime-evolution_[i-1].etime; + naux = fed_tau_by_process_time(ttime,1,0.25,reordering_,tau); + nsteps_.push_back(naux); + tsteps_.push_back(tau); + ncycles_++; + } + } + else { + // Allocate memory for the auxiliary variables that are used in the AOS scheme + Ltx_ = Mat::zeros(img_width_,img_height_,CV_32F); + Lty_ = Mat::zeros(img_height_,img_width_,CV_32F); + px_ = Mat::zeros(img_height_,img_width_,CV_32F); + py_ = Mat::zeros(img_height_,img_width_,CV_32F); + ax_ = Mat::zeros(img_height_,img_width_,CV_32F); + ay_ = Mat::zeros(img_height_,img_width_,CV_32F); + bx_ = Mat::zeros(img_height_-1,img_width_,CV_32F); + by_ = Mat::zeros(img_height_-1,img_width_,CV_32F); + qr_ = Mat::zeros(img_height_-1,img_width_,CV_32F); + qc_ = Mat::zeros(img_height_,img_width_-1,CV_32F); + } + +} + +//******************************************************************************* +//******************************************************************************* + +/** + * @brief This method creates the nonlinear scale space for a given image + * @param img Input image for which the nonlinear scale space needs to be created + * @return 0 if the nonlinear scale space was created successfully. -1 otherwise +*/ +int KAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img) { + + double t2 = 0.0, t1 = 0.0; + + if (evolution_.size() == 0) { + cout << "Error generating the nonlinear scale space!!" << endl; + cout << "Firstly you need to call KAZE::Allocate_Memory_Evolution()" << endl; + return -1; + } + + t1 = getTickCount(); + + // Copy the original image to the first level of the evolution + img.copyTo(evolution_[0].Lt); + gaussian_2D_convolution(evolution_[0].Lt,evolution_[0].Lt,0,0,soffset_); + gaussian_2D_convolution(evolution_[0].Lt,evolution_[0].Lsmooth,0,0,sderivatives_); + + // Firstly compute the kcontrast factor + Compute_KContrast(evolution_[0].Lt,KCONTRAST_PERCENTILE); + + t2 = getTickCount(); + tkcontrast_ = 1000.0*(t2-t1) / getTickFrequency(); + + if (verbosity_ == true) { + cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << + " Sigma: " << evolution_[0].esigma << endl; + } + + // Now generate the rest of evolution levels + for ( size_t i = 1; i < evolution_.size(); i++) { + + evolution_[i-1].Lt.copyTo(evolution_[i].Lt); + gaussian_2D_convolution(evolution_[i-1].Lt,evolution_[i].Lsmooth,0,0,sderivatives_); + + // Compute the Gaussian derivatives Lx and Ly + Scharr(evolution_[i].Lsmooth,evolution_[i].Lx,CV_32F,1,0,1,0,BORDER_DEFAULT); + Scharr(evolution_[i].Lsmooth,evolution_[i].Ly,CV_32F,0,1,1,0,BORDER_DEFAULT); + + // Compute the conductivity equation + if (diffusivity_ == 0) { + pm_g1(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + } + else if (diffusivity_ == 1) { + pm_g2(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + } + else if (diffusivity_ == 2) { + weickert_diffusivity(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + } + + // Perform FED n inner steps + if (use_fed_) { + for (int j = 0; j < nsteps_[i-1]; j++) { + nld_step_scalar(evolution_[i].Lt,evolution_[i].Lflow,evolution_[i].Lstep,tsteps_[i-1][j]); + } + } + else { + // Perform the evolution step with AOS + AOS_Step_Scalar(evolution_[i].Lt,evolution_[i-1].Lt,evolution_[i].Lflow, + evolution_[i].etime-evolution_[i-1].etime); + } + + if (verbosity_ == true) { + cout << "Computed image evolution step " << i << " Evolution time: " << evolution_[i].etime << + " Sigma: " << evolution_[i].esigma << endl; + } + } + + t2 = getTickCount(); + tnlscale_ = 1000.0*(t2-t1) / getTickFrequency(); + + return 0; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the k contrast factor + * @param img Input image + * @param kpercentile Percentile of the gradient histogram +*/ +void KAZE::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { + + if (verbosity_ == true) { + cout << "Computing Kcontrast factor." << endl; + } + + if (COMPUTE_KCONTRAST == true) { + kcontrast_ = compute_k_percentile(img,kpercentile,sderivatives_,KCONTRAST_NBINS,0,0); + } + + if (verbosity_ == true) { + cout << "kcontrast = " << kcontrast_ << endl; + cout << endl << "Now computing the nonlinear scale space!!" << endl; + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the multiscale derivatives for the nonlinear scale space +*/ +void KAZE::Compute_Multiscale_Derivatives(void) +{ + double t2 = 0.0, t1 = 0.0; + t1 = getTickCount(); + +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < evolution_.size(); i++) { + + if (verbosity_ == true) { + cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime + << " Step (pixels): " << evolution_[i].sigma_size << endl; + } + + // Compute multiscale derivatives for the detector + compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Lx,1,0,evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Ly,0,1,evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxx,1,0,evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Ly,evolution_[i].Lyy,0,1,evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxy,0,1,evolution_[i].sigma_size); + + evolution_[i].Lx = evolution_[i].Lx*((evolution_[i].sigma_size)); + evolution_[i].Ly = evolution_[i].Ly*((evolution_[i].sigma_size)); + evolution_[i].Lxx = evolution_[i].Lxx*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); + evolution_[i].Lxy = evolution_[i].Lxy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); + evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); + } + + t2 = getTickCount(); + tmderivatives_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the feature detector response for the nonlinear scale space + * @note We use the Hessian determinant as feature detector +*/ +void KAZE::Compute_Detector_Response(void) { + + double t2 = 0.0, t1 = 0.0; + float lxx = 0.0, lxy = 0.0, lyy = 0.0; + + t1 = getTickCount(); + + // Firstly compute the multiscale derivatives + Compute_Multiscale_Derivatives(); + + for (size_t i = 0; i < evolution_.size(); i++) { + + // Determinant of the Hessian + if (verbosity_ == true) { + cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; + } + + for (int ix = 0; ix < img_height_; ix++) { + for (int jx = 0; jx < img_width_; jx++) { + lxx = *(evolution_[i].Lxx.ptr(ix)+jx); + lxy = *(evolution_[i].Lxy.ptr(ix)+jx); + lyy = *(evolution_[i].Lyy.ptr(ix)+jx); + *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy-lxy*lxy); + } + } + } + + t2 = getTickCount(); + tdresponse_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method selects interesting keypoints through the nonlinear scale space + * @param kpts Vector of keypoints +*/ +void KAZE::Feature_Detection(std::vector& kpts) { + + double t2 = 0.0, t1 = 0.0; + t1 = getTickCount(); + + // Firstly compute the detector response for each pixel and scale level + Compute_Detector_Response(); + + // Find scale space extrema + Determinant_Hessian_Parallel(kpts); + + // Perform some subpixel refinement + Do_Subpixel_Refinement(kpts); + + t2 = getTickCount(); + tdetector_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs the detection of keypoints by using the normalized + * score of the Hessian determinant through the nonlinear scale space + * @param kpts Vector of keypoints + * @note We compute features for each of the nonlinear scale space level in a different processing thread +*/ +void KAZE::Determinant_Hessian_Parallel(std::vector& kpts) { + + int level = 0; + float dist = 0.0, smax = 3.0; + int npoints = 0, id_repeated = 0; + int left_x = 0, right_x = 0, up_y = 0, down_y = 0; + bool is_extremum = false, is_repeated = false, is_out = false; + + // Delete the memory of the vector of keypoints vectors + // In case we use the same kaze object for multiple images + for (size_t i = 0; i < kpts_par_.size(); i++) { + vector().swap(kpts_par_[i]); + } + kpts_par_.clear(); + vector aux; + + // Allocate memory for the vector of vectors + for (size_t i = 1; i < evolution_.size()-1; i++) { + kpts_par_.push_back(aux); + } + +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 1; i < evolution_.size()-1; i++) { + Find_Extremum_Threading(i); + } + + // Now fill the vector of keypoints!!! + for (size_t i = 0; i < kpts_par_.size(); i++) { + for (size_t j = 0; j < kpts_par_[i].size(); j++) { + level = i+1; + is_extremum = true; + is_repeated = false; + is_out = false; + + // Check in case we have the same point as maxima in previous evolution levels + for (size_t ik = 0; ik < kpts.size(); ik++) { + if (kpts[ik].class_id == level || kpts[ik].class_id == level+1 || kpts[ik].class_id == level-1) { + dist = pow(kpts_par_[i][j].pt.x-kpts[ik].pt.x,2)+pow(kpts_par_[i][j].pt.y-kpts[ik].pt.y,2); + + if (dist < evolution_[level].sigma_size*evolution_[level].sigma_size) { + if (kpts_par_[i][j].response > kpts[ik].response) { + id_repeated = ik; + is_repeated = true; + } + else { + is_extremum = false; + } + + break; + } + } + } + + if (is_extremum == true) { + // Check that the point is under the image limits for the descriptor computation + left_x = fRound(kpts_par_[i][j].pt.x-smax*kpts_par_[i][j].size); + right_x = fRound(kpts_par_[i][j].pt.x+smax*kpts_par_[i][j].size); + up_y = fRound(kpts_par_[i][j].pt.y-smax*kpts_par_[i][j].size); + down_y = fRound(kpts_par_[i][j].pt.y+smax*kpts_par_[i][j].size); + + if (left_x < 0 || right_x >= evolution_[level].Ldet.cols || + up_y < 0 || down_y >= evolution_[level].Ldet.rows) { + is_out = true; + } + + is_out = false; + + if (is_out == false) { + if (is_repeated == false) { + kpts.push_back(kpts_par_[i][j]); + npoints++; + } + else { + kpts[id_repeated] = kpts_par_[i][j]; + } + } + } + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method is called by the thread which is responsible of finding extrema + * at a given nonlinear scale level + * @param level Index in the nonlinear scale space evolution +*/ +void KAZE::Find_Extremum_Threading(const int& level) { + + float value = 0.0; + bool is_extremum = false; + + for (int ix = 1; ix < img_height_-1; ix++) { + for (int jx = 1; jx < img_width_-1; jx++) { + + is_extremum = false; + value = *(evolution_[level].Ldet.ptr(ix)+jx); + + // Filter the points with the detector threshold + if (value > dthreshold_ && value >= DEFAULT_MIN_DETECTOR_THRESHOLD) { + if (value >= *(evolution_[level].Ldet.ptr(ix)+jx-1)) { + // First check on the same scale + if (check_maximum_neighbourhood(evolution_[level].Ldet,1,value,ix,jx,1)) { + // Now check on the lower scale + if (check_maximum_neighbourhood(evolution_[level-1].Ldet,1,value,ix,jx,0)) { + // Now check on the upper scale + if (check_maximum_neighbourhood(evolution_[level+1].Ldet,1,value,ix,jx,0)) { + is_extremum = true; + } + } + } + } + } + + // Add the point of interest!! + if (is_extremum == true) { + KeyPoint point; + point.pt.x = jx; + point.pt.y = ix; + point.response = fabs(value); + point.size = evolution_[level].esigma; + point.octave = evolution_[level].octave; + point.class_id = level; + + // We use the angle field for the sublevel value + // Then, we will replace this angle field with the main orientation + point.angle = evolution_[level].sublevel; + kpts_par_[level-1].push_back(point); + } + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs subpixel refinement of the detected keypoints + * @param kpts Vector of detected keypoints +*/ +void KAZE::Do_Subpixel_Refinement(std::vector &kpts) { + + int step = 1; + int x = 0, y = 0; + float Dx = 0.0, Dy = 0.0, Ds = 0.0, dsc = 0.0; + float Dxx = 0.0, Dyy = 0.0, Dss = 0.0, Dxy = 0.0, Dxs = 0.0, Dys = 0.0; + Mat A = Mat::zeros(3,3,CV_32F); + Mat b = Mat::zeros(3,1,CV_32F); + Mat dst = Mat::zeros(3,1,CV_32F); + double t2 = 0.0, t1 = 0.0; + + t1 = cv::getTickCount(); + vector kpts_(kpts); + + for (size_t i = 0; i < kpts_.size(); i++) { + + x = kpts_[i].pt.x; + y = kpts_[i].pt.y; + + // Compute the gradient + Dx = (1.0/(2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x+step) + -*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x-step)); + Dy = (1.0/(2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x) + -*(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x)); + Ds = 0.5*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x) + -*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x)); + + // Compute the Hessian + Dxx = (1.0/(step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x+step) + + *(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x-step) + -2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); + + Dyy = (1.0/(step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x) + + *(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x) + -2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); + + Dss = *(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x) + + *(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x) + -2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x)); + + Dxy = (1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x+step) + +(*(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x-step))) + -(1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x+step) + +(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x-step))); + + Dxs = (1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x+step) + +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x-step))) + -(1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x-step) + +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x+step))); + + Dys = (1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y+step)+x) + +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y-step)+x))) + -(1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y-step)+x) + +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y+step)+x))); + + // Solve the linear system + *(A.ptr(0)) = Dxx; + *(A.ptr(1)+1) = Dyy; + *(A.ptr(2)+2) = Dss; + + *(A.ptr(0)+1) = *(A.ptr(1)) = Dxy; + *(A.ptr(0)+2) = *(A.ptr(2)) = Dxs; + *(A.ptr(1)+2) = *(A.ptr(2)+1) = Dys; + + *(b.ptr(0)) = -Dx; + *(b.ptr(1)) = -Dy; + *(b.ptr(2)) = -Ds; + + solve(A,b,dst,DECOMP_LU); + + if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0 && fabs(*(dst.ptr(2))) <= 1.0) { + kpts_[i].pt.x += *(dst.ptr(0)); + kpts_[i].pt.y += *(dst.ptr(1)); + dsc = kpts_[i].octave + (kpts_[i].angle+*(dst.ptr(2)))/((float)(nsublevels_)); + + // In OpenCV the size of a keypoint is the diameter!! + kpts_[i].size = 2.0*soffset_*pow((float)2.0,dsc); + kpts_[i].angle = 0.0; + } + // Set the points to be deleted after the for loop + else { + kpts_[i].response = -1; + } + } + + // Clear the vector of keypoints + kpts.clear(); + + for (size_t i = 0; i < kpts_.size(); i++) { + if (kpts_[i].response != -1) { + kpts.push_back(kpts_[i]); + } + } + + t2 = getTickCount(); + tsubpixel_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs feature suppression based on 2D distance + * @param kpts Vector of keypoints + * @param mdist Maximum distance in pixels +*/ +void KAZE::Feature_Suppression_Distance(std::vector& kpts, const float& mdist) { + + vector aux; + vector to_delete; + float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; + bool found = false; + + for (size_t i = 0; i < kpts.size(); i++) { + x1 = kpts[i].pt.x; + y1 = kpts[i].pt.y; + + for (size_t j = i+1; j < kpts.size(); j++) { + x2 = kpts[j].pt.x; + y2 = kpts[j].pt.y; + dist = sqrt(pow(x1-x2,2)+pow(y1-y2,2)); + + if (dist < mdist) { + if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { + to_delete.push_back(j); + } + else { + to_delete.push_back(i); + break; + } + } + } + } + + for (size_t i = 0; i < kpts.size(); i++) { + found = false; + + for (size_t j = 0; j < to_delete.size(); j++) { + if(i == (size_t)(to_delete[j])) { + found = true; + break; + } + } + + if (found == false) { + aux.push_back(kpts[i]); + } + } + + kpts.clear(); + kpts = aux; + aux.clear(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the set of descriptors through the nonlinear scale space + * @param kpts Vector of keypoints + * @param desc Matrix with the feature descriptors +*/ +void KAZE::Feature_Description(std::vector &kpts, cv::Mat &desc) { + + double t2 = 0.0, t1 = 0.0; + t1 = getTickCount(); + + // Allocate memory for the matrix of descriptors + if (use_extended_ == true) { + desc = Mat::zeros(kpts.size(),128,CV_32FC1); + } + else { + desc = Mat::zeros(kpts.size(),64,CV_32FC1); + } + + if (use_upright_ == true) { + if (use_extended_ == false) { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_SURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_MSURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_GSURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + } + else + { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++ ) { + kpts[i].angle = 0.0; + Get_SURF_Upright_Descriptor_128(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_MSURF_Upright_Descriptor_128(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_GSURF_Upright_Descriptor_128(kpts[i],desc.ptr(i)); + } + } + } + } + else { + if (use_extended_ == false) { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_SURF_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_MSURF_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_GSURF_Descriptor_64(kpts[i],desc.ptr(i)); + } + } + } + else { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_SURF_Descriptor_128(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for(size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_MSURF_Descriptor_128(kpts[i],desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for( size_t i = 0; i < kpts.size(); i++ ) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_GSURF_Descriptor_128(kpts[i],desc.ptr(i)); + } + } + } + } + + t2 = getTickCount(); + tdescriptor_ = 1000.0*(t2-t1) / getTickFrequency(); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the main orientation for a given keypoint + * @param kpt Input keypoint + * @note The orientation is computed using a similar approach as described in the + * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 +*/ +void KAZE::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) +{ + int ix = 0, iy = 0, idx = 0, s = 0, level = 0; + float xf = 0.0, yf = 0.0, gweight = 0.0; + vector resX(109), resY(109), Ang(109); + + // Variables for computing the dominant direction + float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; + + // Get the information from the keypoint + xf = kpt.pt.x; + yf = kpt.pt.y; + level = kpt.class_id; + s = fRound(kpt.size/2.0); + + // Calculate derivatives responses for points within radius of 6*scale + for (int i = -6; i <= 6; ++i) { + for (int j = -6; j <= 6; ++j) { + if (i*i + j*j < 36) { + iy = fRound(yf + j*s); + ix = fRound(xf + i*s); + + if (iy >= 0 && iy < img_height_ && ix >= 0 && ix < img_width_ ) { + gweight = gaussian(iy-yf,ix-xf,2.5*s); + resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); + resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); + } + else { + resX[idx] = 0.0; + resY[idx] = 0.0; + } + + Ang[idx] = getAngle(resX[idx],resY[idx]); + ++idx; + } + } + } + + // Loop slides pi/3 window around feature point + for (ang1 = 0; ang1 < 2.0*CV_PI; ang1+=0.15f) { + ang2 =(ang1+CV_PI/3.0f > 2.0*CV_PI ? ang1-5.0f*CV_PI/3.0f : ang1+CV_PI/3.0f); + sumX = sumY = 0.f; + + for (size_t k = 0; k < Ang.size(); ++k) { + // Get angle from the x-axis of the sample point + const float & ang = Ang[k]; + + // Determine whether the point is within the window + if (ang1 < ang2 && ang1 < ang && ang < ang2) { + sumX+=resX[k]; + sumY+=resY[k]; + } + else if (ang2 < ang1 && + ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { + sumX+=resX[k]; + sumY+=resY[k]; + } + } + + // if the vector produced from this window is longer than all + // previous vectors then this forms the new dominant direction + if (sumX*sumX + sumY*sumY > max) { + // store largest orientation + max = sumX*sumX + sumY*sumY; + kpt.angle = getAngle(sumX, sumY); + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the upright descriptor (no rotation invariant) + * of the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional + * Gaussian weighting is performed. The descriptor is inspired from Bay et al., + * Speeded Up Robust Features, ECCV, 2006 +*/ +void KAZE::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +{ + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + level = kpt.class_id; + scale = fRound(kpt.size/2.0); + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + + dx=dy=mdx=mdy=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + sample_y = k*scale + yf; + sample_x = l*scale + xf; + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the descriptor of the provided keypoint given the + * main orientation + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional + * Gaussian weighting is performed. The descriptor is inspired from Bay et al., + * Speeded Up Robust Features, ECCV, 2006 +*/ +void KAZE::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { + + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + dx=dy=mdx=mdy=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the upright descriptor (not rotation invariant) of + * the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 +*/ +void KAZE::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +{ + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int dsize = 0, scale = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + level = kpt.class_id; + + i = -8; + + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { + j = -8; + i = i-4; + + cx += 1.0; + cy = -0.5; + + while (j < pattern_size) { + + dx=dy=mdx=mdy=0.0; + cy += 1.0; + j = j-4; + + ky = i + sample_step; + kx = j + sample_step; + + ys = yf + (ky*scale); + xs = xf + (kx*scale); + + for (int k = i; k < i+9; k++) { + for (int l = j; l < j+9; l++) { + + sample_y = k*scale + yf; + sample_x = l*scale + xf; + + //Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + rx = gauss_s1*rx; + ry = gauss_s1*ry; + + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); + + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; + } + + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the descriptor of the provided keypoint given the + * main orientation of the keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 +*/ +void KAZE::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +{ + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; + int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); + + i = -8; + + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { + + j = -8; + i = i-4; + + cx += 1.0; + cy = -0.5; + + while (j < pattern_size) { + + dx=dy=mdx=mdy=0.0; + cy += 1.0; + j = j - 4; + + ky = i + sample_step; + kx = j + sample_step; + + xs = xf + (-kx*scale*si + ky*scale*co); + ys = yf + (kx*scale*co + ky*scale*si); + + for (int k = i; k < i + 9; ++k) { + for (int l = j; l < j + 9; ++l) { + + // Get coords of sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + // Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); + y1 = fRound(sample_y-.5); + x1 = fRound(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Get the x and y derivatives on the rotated axis + rry = gauss_s1*(rx*co + ry*si); + rrx = gauss_s1*(-rx*si + ry*co); + + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + j += 9; + } + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the upright G-SURF descriptor of the provided keypoint + * given the main orientation + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional + * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and + * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 +*/ +void KAZE::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +{ + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float lvv = 0.0, lww = 0.0, modg = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + level = kpt.class_id; + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + + dx=dy=mdx=mdy=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + modg = pow(rx,2) + pow(ry,2); + + if (modg != 0.0) { + + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); + } + else { + lww = 0.0; + lvv = 0.0; + } + + // Sum the derivatives to the cumulative descriptor + dx += lww; + dy += lvv; + mdx += fabs(lww); + mdy += fabs(lvv); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the G-SURF descriptor of the provided keypoint given the + * main orientation + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional + * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and + * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 +*/ +void KAZE::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +{ + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float lvv = 0.0, lww = 0.0, modg = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + + dx=dy=mdx=mdy=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + modg = pow(rx,2) + pow(ry,2); + + if (modg != 0.0) { + + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); + } + else { + lww = 0.0; + lvv = 0.0; + } + + // Sum the derivatives to the cumulative descriptor + dx += lww; + dy += lvv; + mdx += fabs(lww); + mdy += fabs(lvv); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } + +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the upright extended descriptor (no rotation invariant) + * of the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional + * Gaussian weighting is performed. The descriptor is inspired from Bay et al., + * Speeded Up Robust Features, ECCV, 2006 +*/ +void KAZE::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) +{ + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + level = kpt.class_id; + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + + dxp=dxn=mdxp=mdxn=0.0; + dyp=dyn=mdyp=mdyn=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + sample_y = k*scale + yf; + sample_x = l*scale + xf; + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Sum the derivatives to the cumulative descriptor + if (ry >= 0.0) { + dxp += rx; + mdxp += fabs(rx); + } + else { + dxn += rx; + mdxn += fabs(rx); + } + + if (rx >= 0.0) { + dyp += ry; + mdyp += fabs(ry); + } + else { + dyn += ry; + mdyn += fabs(ry); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the extended descriptor of the provided keypoint given the + * main orientation + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional + * Gaussian weighting is performed. The descriptor is inspired from Bay et al., + * Speeded Up Robust Features, ECCV, 2006 +*/ +void KAZE::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) +{ + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + + dxp=dxn=mdxp=mdxn=0.0; + dyp=dyn=mdyp=mdyn=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + + // Sum the derivatives to the cumulative descriptor + if (rry >= 0.0) { + dxp += rrx; + mdxp += fabs(rrx); + } + else { + dxn += rrx; + mdxn += fabs(rrx); + } + + if (rrx >= 0.0) { + dyp += rry; + mdyp += fabs(rry); + } + else { + dyn += rry; + mdyn += fabs(rry); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the extended upright descriptor (not rotation invariant) of + * the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 +*/ +void KAZE::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { + + float gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int dsize = 0, scale = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; + + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + level = kpt.class_id; + + i = -8; + + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { + + j = -8; + i = i-4; + + cx += 1.0; + cy = -0.5; + + while (j < pattern_size) { + + dxp=dxn=mdxp=mdxn=0.0; + dyp=dyn=mdyp=mdyn=0.0; + + cy += 1.0; + j = j-4; + + ky = i + sample_step; + kx = j + sample_step; + + ys = yf + (ky*scale); + xs = xf + (kx*scale); + + for (int k = i; k < i+9; k++) { + for (int l = j; l < j+9; l++) { + + sample_y = k*scale + yf; + sample_x = l*scale + xf; + + //Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.50*scale); + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + rx = gauss_s1*rx; + ry = gauss_s1*ry; + + // Sum the derivatives to the cumulative descriptor + if (ry >= 0.0) { + dxp += rx; + mdxp += fabs(rx); + } + else { + dxn += rx; + mdxn += fabs(rx); + } + + if (rx >= 0.0) { + dyp += ry; + mdyp += fabs(ry); + } + else { + dyn += ry; + mdyn += fabs(ry); + } + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); + + desc[dcount++] = dxp*gauss_s2; + desc[dcount++] = dxn*gauss_s2; + desc[dcount++] = mdxp*gauss_s2; + desc[dcount++] = mdxn*gauss_s2; + desc[dcount++] = dyp*gauss_s2; + desc[dcount++] = dyn*gauss_s2; + desc[dcount++] = mdyp*gauss_s2; + desc[dcount++] = mdyn*gauss_s2; + + // Store the current length^2 of the vector + len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; + + j += 9; + } + + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the extended G-SURF descriptor of the provided keypoint + * given the main orientation of the keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired + * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, + * ECCV 2008 +*/ +void KAZE::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { + + float gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; + int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; + + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 12; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); + + i = -8; + + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { + + j = -8; + i = i-4; + + cx += 1.0; + cy = -0.5; + + while (j < pattern_size) { + + dxp=dxn=mdxp=mdxn=0.0; + dyp=dyn=mdyp=mdyn=0.0; + + cy += 1.0f; + j = j - 4; + + ky = i + sample_step; + kx = j + sample_step; + + xs = xf + (-kx*scale*si + ky*scale*co); + ys = yf + (kx*scale*co + ky*scale*si); + + for (int k = i; k < i + 9; ++k) { + for (int l = j; l < j + 9; ++l) { + + // Get coords of sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + // Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); + + y1 = fRound(sample_y-.5); + x1 = fRound(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Get the x and y derivatives on the rotated axis + rry = gauss_s1*(rx*co + ry*si); + rrx = gauss_s1*(-rx*si + ry*co); + + // Sum the derivatives to the cumulative descriptor + if (rry >= 0.0) { + dxp += rrx; + mdxp += fabs(rrx); + } + else { + dxn += rrx; + mdxn += fabs(rrx); + } + + if (rrx >= 0.0) { + dyp += rry; + mdyp += fabs(rry); + } + else { + dyn += rry; + mdyn += fabs(rry); + } + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); + + desc[dcount++] = dxp*gauss_s2; + desc[dcount++] = dxn*gauss_s2; + desc[dcount++] = mdxp*gauss_s2; + desc[dcount++] = mdxn*gauss_s2; + desc[dcount++] = dyp*gauss_s2; + desc[dcount++] = dyn*gauss_s2; + desc[dcount++] = mdyp*gauss_s2; + desc[dcount++] = mdyn*gauss_s2; + + // Store the current length^2 of the vector + len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; + + j += 9; + } + + i += 9; + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the G-SURF upright extended descriptor + * (no rotation invariant) of the provided keypoint + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional + * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and + * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 +*/ +void KAZE::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) +{ + float len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0, lvv = 0.0, lww = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + level = kpt.class_id; + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for(int j = -pattern_size; j < pattern_size; j+=sample_step) { + + dxp=dxn=mdxp=mdxn=0.0; + dyp=dyn=mdyp=mdyn=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + sample_y = k*scale + yf; + sample_x = l*scale + xf; + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + modg = pow(rx,2) + pow(ry,2); + + if (modg != 0.0) { + + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); + } + else { + lww = 0.0; + lvv = 0.0; + } + + // Sum the derivatives to the cumulative descriptor + if (lww >= 0.0) { + dxp += lvv; + mdxp += fabs(lvv); + } + else { + dxn += lvv; + mdxn += fabs(lvv); + } + + if (lvv >= 0.0) { + dyp += lww; + mdyp += fabs(lww); + } + else { + dyn += lww; + mdyn += fabs(lww); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method computes the extended descriptor of the provided keypoint given the + * main orientation + * @param kpt Input keypoint + * @param desc Descriptor vector + * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional + * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and + * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 +*/ +void KAZE::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { + + float len = 0.0, xf = 0.0, yf = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + float lvv = 0.0, lww = 0.0, modg = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; + + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; + + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size/2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); + + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i+=sample_step) { + for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + + dxp=dxn=mdxp=mdxn=0.0; + dyp=dyn=mdyp=mdyn=0.0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = (int)(sample_y-.5); + x1 = (int)(sample_x-.5); + + checkDescriptorLimits(x1,y1,img_width_,img_height_); + + y2 = (int)(sample_y+.5); + x2 = (int)(sample_x+.5); + + checkDescriptorLimits(x2,y2,img_width_,img_height_); + + fx = sample_x-x1; + fy = sample_y-y1; + + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + modg = pow(rx,2) + pow(ry,2); + + if (modg != 0.0) { + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); + } + else { + lww = 0.0; + lvv = 0.0; + } + + // Sum the derivatives to the cumulative descriptor + if (lww >= 0.0) { + dxp += lvv; + mdxp += fabs(lvv); + } + else { + dxn += lvv; + mdxn += fabs(lvv); + } + + if (lvv >= 0.0) { + dyp += lww; + mdyp += fabs(lww); + } + else { + dyn += lww; + mdyn += fabs(lww); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; + } + } + + // convert to unit vector + len = sqrt(len); + + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } + + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs a scalar non-linear diffusion step using AOS schemes + * @param Ld Image at a given evolution step + * @param Ldprev Image at a previous evolution step + * @param c Conductivity image + * @param stepsize Stepsize for the nonlinear diffusion evolution + * @note If c is constant, the diffusion will be linear + * If c is a matrix of the same size as Ld, the diffusion will be nonlinear + * The stepsize can be arbitrarilly large +*/ +void KAZE::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { + +#ifdef _OPENMP +#pragma omp sections + { +#pragma omp section + { + AOS_Rows(Ldprev,c,stepsize); + } +#pragma omp section + { + AOS_Columns(Ldprev,c,stepsize); + } + } +#else + AOS_Rows(Ldprev,c,stepsize); + AOS_Columns(Ldprev,c,stepsize); +#endif + + Ld = 0.5*(Lty_+Ltx_.t()); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs performs 1D-AOS for the image rows + * @param Ldprev Image at a previous evolution step + * @param c Conductivity image + * @param stepsize Stepsize for the nonlinear diffusion evolution +*/ +void KAZE::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { + + // Operate on rows + for (int i = 0; i < qr_.rows; i++) { + for (int j = 0; j < qr_.cols; j++) { + *(qr_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i+1)+j); + } + } + + for (int j = 0; j < py_.cols; j++) { + *(py_.ptr(0)+j) = *(qr_.ptr(0)+j); + } + + for (int j = 0; j < py_.cols; j++) { + *(py_.ptr(py_.rows-1)+j) = *(qr_.ptr(qr_.rows-1)+j); + } + + for (int i = 1; i < py_.rows-1; i++) { + for (int j = 0; j < py_.cols; j++) { + *(py_.ptr(i)+j) = *(qr_.ptr(i-1)+j) + *(qr_.ptr(i)+j); + } + } + + // a = 1 + t.*p; (p is -1*p) + // b = -t.*q; + ay_ = 1.0 + stepsize*py_; // p is -1*p + by_ = -stepsize*qr_; + + // Do Thomas algorithm to solve the linear system of equations + Thomas(ay_,by_,Ldprev,Lty_); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method performs performs 1D-AOS for the image columns + * @param Ldprev Image at a previous evolution step + * @param c Conductivity image + * @param stepsize Stepsize for the nonlinear diffusion evolution +*/ +void KAZE::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { + + // Operate on columns + for (int j = 0; j < qc_.cols; j++) { + for (int i = 0; i < qc_.rows; i++) { + *(qc_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i)+j+1); + } + } + + for (int i = 0; i < px_.rows; i++) { + *(px_.ptr(i)) = *(qc_.ptr(i)); + } + + for (int i = 0; i < px_.rows; i++) { + *(px_.ptr(i)+px_.cols-1) = *(qc_.ptr(i)+qc_.cols-1); + } + + for (int j = 1; j < px_.cols-1; j++) { + for (int i = 0; i < px_.rows; i++) { + *(px_.ptr(i)+j) = *(qc_.ptr(i)+j-1) + *(qc_.ptr(i)+j); + } + } + + // a = 1 + t.*p'; + ax_ = 1.0 + stepsize*px_.t(); + + // b = -t.*q'; + bx_ = -stepsize*qc_.t(); + + // But take care since we need to transpose the solution!! + Mat Ldprevt = Ldprev.t(); + + // Do Thomas algorithm to solve the linear system of equations + Thomas(ax_,bx_,Ldprevt,Ltx_); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This method does the Thomas algorithm for solving a tridiagonal linear system + * @note The matrix A must be strictly diagonally dominant for a stable solution +*/ +void KAZE::Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x) { + + // Auxiliary variables + int n = a.rows; + Mat m = cv::Mat::zeros(a.rows,a.cols,CV_32F); + Mat l = cv::Mat::zeros(b.rows,b.cols,CV_32F); + Mat y = cv::Mat::zeros(Ld.rows,Ld.cols,CV_32F); + + /** A*x = d; */ + /** / a1 b1 0 0 0 ... 0 \ / x1 \ = / d1 \ */ + /** | c1 a2 b2 0 0 ... 0 | | x2 | = | d2 | */ + /** | 0 c2 a3 b3 0 ... 0 | | x3 | = | d3 | */ + /** | : : : : 0 ... 0 | | : | = | : | */ + /** | : : : : 0 cn-1 an | | xn | = | dn | */ + + /** 1. LU decomposition + / L = / 1 \ U = / m1 r1 \ + / | l1 1 | | m2 r2 | + / | l2 1 | | m3 r3 | + / | : : : | | : : : | + / \ ln-1 1 / \ mn / */ + + for (int j = 0; j < m.cols; j++) { + *(m.ptr(0)+j) = *(a.ptr(0)+j); + } + + for (int j = 0; j < y.cols; j++) { + *(y.ptr(0)+j) = *(Ld.ptr(0)+j); + } + + // 1. Forward substitution L*y = d for y + for (int k = 1; k < n; k++) { + for (int j=0; j < l.cols; j++) { + *(l.ptr(k-1)+j) = *(b.ptr(k-1)+j) / *(m.ptr(k-1)+j); + } + + for (int j=0; j < m.cols; j++) { + *(m.ptr(k)+j) = *(a.ptr(k)+j) - *(l.ptr(k-1)+j)*(*(b.ptr(k-1)+j)); + } + + for (int j=0; j < y.cols; j++) { + *(y.ptr(k)+j) = *(Ld.ptr(k)+j) - *(l.ptr(k-1)+j)*(*(y.ptr(k-1)+j)); + } + } + + // 2. Backward substitution U*x = y + for (int j=0; j < y.cols; j++) { + *(x.ptr(n-1)+j) = (*(y.ptr(n-1)+j))/(*(m.ptr(n-1)+j)); + } + + for (int i = n-2; i >= 0; i--) { + for(int j = 0; j < x.cols; j++) { + *(x.ptr(i)+j) = (*(y.ptr(i)+j) - (*(b.ptr(i)+j))*(*(x.ptr(i+1)+j)))/(*(m.ptr(i)+j)); + } + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes the angle from the vector given by (X Y). From 0 to 2*Pi +*/ +inline float getAngle(const float& x, const float& y) { + + if (x >= 0 && y >= 0) + { + return atan(y/x); + } + + if (x < 0 && y >= 0) { + return CV_PI - atan(-y/x); + } + + if(x < 0 && y < 0) { + return CV_PI + atan(y/x); + } + + if(x >= 0 && y < 0) { + return 2.0*CV_PI - atan(-y/x); + } + + return 0; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function performs descriptor clipping + * @param desc_ Pointer to the descriptor vector + * @param dsize Size of the descriptor vector + * @param iter Number of iterations + * @param ratio Clipping ratio +*/ +inline void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio) { + + float cratio = ratio / sqrt(dsize); + float len = 0.0; + + for (int i = 0; i < niter; i++) { + len = 0.0; + for (int j = 0; j < dsize; j++) { + if (desc[j] > cratio) { + desc[j] = cratio; + } + else if (desc[j] < -cratio) { + desc[j] = -cratio; + } + len += desc[j]*desc[j]; + } + + // Normalize again + len = sqrt(len); + + for (int j = 0; j < dsize; j++) { + desc[j] = desc[j] / len; + } + } +} + +//************************************************************************************** +//************************************************************************************** + +/** + * @brief This function computes the value of a 2D Gaussian function + * @param x X Position + * @param y Y Position + * @param sig Standard Deviation +*/ +inline float gaussian(const float& x, const float& y, const float& sig) { + return exp(-(x*x+y*y)/(2.0f*sig*sig)); +} + +//************************************************************************************** +//************************************************************************************** + +/** + * @brief This function checks descriptor limits + * @param x X Position + * @param y Y Position + * @param width Image width + * @param height Image height +*/ +inline void checkDescriptorLimits(int &x, int &y, const int& width, const int& height) { + + if (x < 0) { + x = 0; + } + + if (y < 0) { + y = 0; + } + + if (x > width-1) { + x = width-1; + } + + if (y > height-1) { + y = height-1; + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This funtion rounds float to nearest integer + * @param flt Input float + * @return dst Nearest integer + */ +inline int fRound(const float& flt) +{ + return (int)(flt+0.5f); +} diff --git a/modules/features2d/src/kaze/KAZE.h b/modules/features2d/src/kaze/KAZE.h new file mode 100755 index 0000000000..9d489c0480 --- /dev/null +++ b/modules/features2d/src/kaze/KAZE.h @@ -0,0 +1,294 @@ + +/** + * @file KAZE.h + * @brief Main program for detecting and computing descriptors in a nonlinear + * scale space + * @date Jan 21, 2012 + * @author Pablo F. Alcantarilla + */ + +#ifndef KAZE_H_ +#define KAZE_H_ + +//************************************************************************************* +//************************************************************************************* + +// Includes +#include "config.h" +#include "nldiffusion_functions.h" +#include "fed.h" +#include "utils.h" + +//************************************************************************************* +//************************************************************************************* + +// KAZE Class Declaration +class KAZE { + +private: + + // Parameters of the Nonlinear diffusion class + float soffset_; // Base scale offset + float sderivatives_; // Standard deviation of the Gaussian for the nonlinear diff. derivatives + int omax_; // Maximum octave level + int nsublevels_; // Number of sublevels per octave level + int img_width_; // Width of the original image + int img_height_; // Height of the original image + bool save_scale_space_; // For saving scale space images + bool verbosity_; // Verbosity level + std::vector evolution_; // Vector of nonlinear diffusion evolution + float kcontrast_; // The contrast parameter for the scalar nonlinear diffusion + float dthreshold_; // Feature detector threshold response + int diffusivity_; // Diffusivity type, 0->PM G1, 1->PM G2, 2-> Weickert + int descriptor_mode_; // Descriptor mode + bool use_fed_; // Set to true in case we want to use FED for the nonlinear diffusion filtering. Set false for using AOS + bool use_upright_; // Set to true in case we want to use the upright version of the descriptors + bool use_extended_; // Set to true in case we want to use the extended version of the descriptors + + // Vector of keypoint vectors for finding extrema in multiple threads + std::vector > kpts_par_; + + // FED parameters + int ncycles_; // Number of cycles + bool reordering_; // Flag for reordering time steps + std::vector > tsteps_; // Vector of FED dynamic time steps + std::vector nsteps_; // Vector of number of steps per cycle + + // Computation times variables in ms + double tkcontrast_; // Kcontrast factor computation + double tnlscale_; // Nonlinear Scale space generation + double tdetector_; // Feature detector + double tmderivatives_; // Multiscale derivatives computation + double tdresponse_; // Detector response computation + double tdescriptor_; // Feature descriptor + double tsubpixel_; // Subpixel refinement + + // Some auxiliary variables used in the AOS step + cv::Mat Ltx_, Lty_, px_, py_, ax_, ay_, bx_, by_, qr_, qc_; + +public: + + // Constructor + KAZE(KAZEOptions& options); + + // Destructor + ~KAZE(void); + + // Public methods for KAZE interface + void Allocate_Memory_Evolution(void); + int Create_Nonlinear_Scale_Space(const cv::Mat& img); + void Feature_Detection(std::vector& kpts); + void Feature_Description(std::vector& kpts, cv::Mat& desc); + + // Methods for saving the scale space set of images and detector responses + void Save_Nonlinear_Scale_Space(void); + void Save_Detector_Responses(void); + void Save_Flow_Responses(void); + +private: + + // Feature Detection Methods + void Compute_KContrast(const cv::Mat& img, const float& kper); + void Compute_Multiscale_Derivatives(void); + void Compute_Detector_Response(void); + void Determinant_Hessian_Parallel(std::vector& kpts); + void Find_Extremum_Threading(const int& level); + void Do_Subpixel_Refinement(std::vector& kpts); + void Feature_Suppression_Distance(std::vector& kpts, const float& mdist); + + // AOS Methods + void AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); + void AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); + void AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); + void Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x); + + // Feature Description methods + void Compute_Main_Orientation_SURF(cv::KeyPoint& kpt); + + // Descriptor Mode -> 0 SURF 64 + void Get_SURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc); + void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc); + + // Descriptor Mode -> 0 SURF 128 + void Get_SURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc); + void Get_SURF_Descriptor_128(const cv::KeyPoint& kpt, float* desc); + + // Descriptor Mode -> 1 M-SURF 64 + void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc); + void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc); + + // Descriptor Mode -> 1 M-SURF 128 + void Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc); + void Get_MSURF_Descriptor_128(const cv::KeyPoint& kpt, float *desc); + + // Descriptor Mode -> 2 G-SURF 64 + void Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc); + void Get_GSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc); + + // Descriptor Mode -> 2 G-SURF 128 + void Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc); + void Get_GSURF_Descriptor_128(const cv::KeyPoint& kpt, float* desc); + +public: + + // Setters + void Set_Scale_Offset(float soffset) { + soffset_ = soffset; + } + + void Set_SDerivatives(float sderivatives) { + sderivatives_ = sderivatives; + } + + void Set_Octave_Max(int omax) { + omax_ = omax; + } + + void Set_NSublevels(int nsublevels) { + nsublevels_ = nsublevels; + } + + void Set_Save_Scale_Space_Flag(bool save_scale_space) { + save_scale_space_ = save_scale_space; + } + + void Set_Image_Width(int img_width) { + img_width_ = img_width; + } + + void Set_Image_Height(int img_height) { + img_height_ = img_height; + } + + void Set_Verbosity_Level(bool verbosity) { + verbosity_ = verbosity; + } + + void Set_KContrast(float kcontrast) { + kcontrast_ = kcontrast; + } + + void Set_Detector_Threshold(float dthreshold) { + dthreshold_ = dthreshold; + } + + void Set_Diffusivity_Type(int diffusivity) { + diffusivity_ = diffusivity; + } + + void Set_Descriptor_Mode(int descriptor_mode) { + descriptor_mode_ = descriptor_mode; + } + + void Set_Use_FED(bool use_fed) { + use_fed_ = use_fed; + } + + void Set_Upright(bool use_upright) { + use_upright_ = use_upright; + } + + void Set_Extended(bool use_extended) { + use_extended_ = use_extended; + } + + // Getters + float Get_Scale_Offset(void) { + return soffset_; + } + + float Get_SDerivatives(void) { + return sderivatives_; + } + + int Get_Octave_Max(void) { + return omax_; + } + + int Get_NSublevels(void) { + return nsublevels_; + } + + bool Get_Save_Scale_Space_Flag(void) { + return save_scale_space_; + } + + int Get_Image_Width(void) { + return img_width_; + } + + int Get_Image_Height(void) { + return img_height_; + } + + bool Get_Verbosity_Level(void) { + return verbosity_; + } + + float Get_KContrast(void) { + return kcontrast_; + } + + float Get_Detector_Threshold(void) { + return dthreshold_; + } + + int Get_Diffusivity_Type(void) { + return diffusivity_; + } + + int Get_Descriptor_Mode(void) { + return descriptor_mode_; + } + + bool Get_Upright(void) { + return use_upright_; + } + + bool Get_Extended(void) { + return use_extended_; + } + + float Get_Time_KContrast(void) { + return tkcontrast_; + } + + float Get_Time_NLScale(void) { + return tnlscale_; + } + + float Get_Time_Detector(void) { + return tdetector_; + } + + float Get_Time_Multiscale_Derivatives(void) { + return tmderivatives_; + } + + float Get_Time_Detector_Response(void) { + return tdresponse_; + } + + float Get_Time_Descriptor(void) { + return tdescriptor_; + } + + float Get_Time_Subpixel(void) { + return tsubpixel_; + } +}; + +//************************************************************************************* +//************************************************************************************* + +// Inline functions +float getAngle(const float& x, const float& y); +float gaussian(const float& x, const float& y, const float& sig); +void checkDescriptorLimits(int &x, int &y, const int& width, const int& height); +void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio); +int fRound(const float& flt); + +//************************************************************************************* +//************************************************************************************* + +#endif // KAZE_H_ diff --git a/modules/features2d/src/kaze/config.h b/modules/features2d/src/kaze/config.h new file mode 100644 index 0000000000..ffb41ce826 --- /dev/null +++ b/modules/features2d/src/kaze/config.h @@ -0,0 +1,129 @@ + +/** + * @file config.h + * @brief Configuration file + * @date Dec 27, 2011 + * @author Pablo F. Alcantarilla + */ + +#ifndef _CONFIG_H_ +#define _CONFIG_H_ + +//****************************************************************************** +//****************************************************************************** + +// System Includes +#include +#include +#include +#include +#include +#include +#include + +// OpenCV Includes +#include "precomp.hpp" + +// OpenMP Includes +#ifdef _OPENMP +#include +#else +#define omp_get_thread_num() 0 +#endif + +//************************************************************************************* +//************************************************************************************* + +// Some defines +#define NMAX_CHAR 400 + +// Some default options +const float DEFAULT_SCALE_OFFSET = 1.60; // Base scale offset (sigma units) +const float DEFAULT_OCTAVE_MAX = 4.0; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) +const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level +const float DEFAULT_DETECTOR_THRESHOLD = 0.001; // Detector response threshold to accept point +const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001; // Minimum Detector response threshold to accept point +const int DEFAULT_DESCRIPTOR_MODE = 1; // Descriptor Mode 0->SURF, 1->M-SURF +const bool DEFAULT_USE_FED = true; // 0->AOS, 1->FED +const bool DEFAULT_UPRIGHT = false; // Upright descriptors, not invariant to rotation +const bool DEFAULT_EXTENDED = false; // Extended descriptor, dimension 128 +const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images +const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) +const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios +const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints + +// Some important configuration variables +const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0; +const float DEFAULT_KCONTRAST = .01; +const float KCONTRAST_PERCENTILE = 0.7; +const int KCONTRAST_NBINS = 300; +const bool COMPUTE_KCONTRAST = true; +const int DEFAULT_DIFFUSIVITY_TYPE = 1; // 0 -> PM G1, 1 -> PM G2, 2 -> Weickert +const bool USE_CLIPPING_NORMALIZATION = false; +const float CLIPPING_NORMALIZATION_RATIO = 1.6; +const int CLIPPING_NORMALIZATION_NITER = 5; + +//************************************************************************************* +//************************************************************************************* + +struct KAZEOptions { + + KAZEOptions() { + // Load the default options + soffset = DEFAULT_SCALE_OFFSET; + omax = DEFAULT_OCTAVE_MAX; + nsublevels = DEFAULT_NSUBLEVELS; + dthreshold = DEFAULT_DETECTOR_THRESHOLD; + use_fed = DEFAULT_USE_FED; + upright = DEFAULT_UPRIGHT; + extended = DEFAULT_EXTENDED; + descriptor = DEFAULT_DESCRIPTOR_MODE; + diffusivity = DEFAULT_DIFFUSIVITY_TYPE; + sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; + save_scale_space = DEFAULT_SAVE_SCALE_SPACE; + save_keypoints = DEFAULT_SAVE_KEYPOINTS; + verbosity = DEFAULT_VERBOSITY; + show_results = DEFAULT_SHOW_RESULTS; + } + + float soffset; + int omax; + int nsublevels; + int img_width; + int img_height; + int diffusivity; + float sderivatives; + float dthreshold; + bool use_fed; + bool upright; + bool extended; + int descriptor; + bool save_scale_space; + bool save_keypoints; + bool verbosity; + bool show_results; +}; + +struct TEvolution { + cv::Mat Lx, Ly; // First order spatial derivatives + cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives + cv::Mat Lflow; // Diffusivity image + cv::Mat Lt; // Evolution image + cv::Mat Lsmooth; // Smoothed image + cv::Mat Lstep; // Evolution step update + cv::Mat Ldet; // Detector response + float etime; // Evolution time + float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 + float octave; // Image octave + float sublevel; // Image sublevel in each octave + int sigma_size; // Integer esigma. For computing the feature detector responses +}; + +//************************************************************************************* +//************************************************************************************* + +#endif + + + + diff --git a/modules/features2d/src/kaze/fed.cpp b/modules/features2d/src/kaze/fed.cpp new file mode 100644 index 0000000000..0bd228673a --- /dev/null +++ b/modules/features2d/src/kaze/fed.cpp @@ -0,0 +1,192 @@ +//============================================================================= +// +// fed.cpp +// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2) +// Institutions: Georgia Institute of Technology (1) +// TrueVision Solutions (2) +// Date: 15/09/2013 +// Email: pablofdezalc@gmail.com +// +// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo +// All Rights Reserved +// See LICENSE for the license information +//============================================================================= + +/** + * @file fed.cpp + * @brief Functions for performing Fast Explicit Diffusion and building the + * nonlinear scale space + * @date Sep 15, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + * @note This code is derived from FED/FJ library from Grewenig et al., + * The FED/FJ library allows solving more advanced problems + * Please look at the following papers for more information about FED: + * [1] S. Grewenig, J. Weickert, C. Schroers, A. Bruhn. Cyclic Schemes for + * PDE-Based Image Analysis. Technical Report No. 327, Department of Mathematics, + * Saarland University, Saarbrücken, Germany, March 2013 + * [2] S. Grewenig, J. Weickert, A. Bruhn. From box filtering to fast explicit diffusion. + * DAGM, 2010 + * +*/ + +#include "fed.h" + +using namespace std; + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function allocates an array of the least number of time steps such + * that a certain stopping time for the whole process can be obtained and fills + * it with the respective FED time step sizes for one cycle + * The function returns the number of time steps per cycle or 0 on failure + * @param T Desired process stopping time + * @param M Desired number of cycles + * @param tau_max Stability limit for the explicit scheme + * @param reordering Reordering flag + * @param tau The vector with the dynamic step sizes + */ +int fed_tau_by_process_time(const float& T, const int& M, const float& tau_max, + const bool& reordering, std::vector& tau) { + // All cycles have the same fraction of the stopping time + return fed_tau_by_cycle_time(T/(float)M,tau_max,reordering,tau); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function allocates an array of the least number of time steps such + * that a certain stopping time for the whole process can be obtained and fills it + * it with the respective FED time step sizes for one cycle + * The function returns the number of time steps per cycle or 0 on failure + * @param t Desired cycle stopping time + * @param tau_max Stability limit for the explicit scheme + * @param reordering Reordering flag + * @param tau The vector with the dynamic step sizes + */ +int fed_tau_by_cycle_time(const float& t, const float& tau_max, + const bool& reordering, std::vector &tau) { + int n = 0; // Number of time steps + float scale = 0.0; // Ratio of t we search to maximal t + + // Compute necessary number of time steps + n = (int)(ceilf(sqrtf(3.0*t/tau_max+0.25f)-0.5f-1.0e-8f)+ 0.5f); + scale = 3.0*t/(tau_max*(float)(n*(n+1))); + + // Call internal FED time step creation routine + return fed_tau_internal(n,scale,tau_max,reordering,tau); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function allocates an array of time steps and fills it with FED + * time step sizes + * The function returns the number of time steps per cycle or 0 on failure + * @param n Number of internal steps + * @param scale Ratio of t we search to maximal t + * @param tau_max Stability limit for the explicit scheme + * @param reordering Reordering flag + * @param tau The vector with the dynamic step sizes + */ +int fed_tau_internal(const int& n, const float& scale, const float& tau_max, + const bool& reordering, std::vector &tau) { + float c = 0.0, d = 0.0; // Time savers + vector tauh; // Helper vector for unsorted taus + + if (n <= 0) { + return 0; + } + + // Allocate memory for the time step size + tau = vector(n); + + if (reordering) { + tauh = vector(n); + } + + // Compute time saver + c = 1.0f / (4.0f * (float)n + 2.0f); + d = scale * tau_max / 2.0f; + + // Set up originally ordered tau vector + for (int k = 0; k < n; ++k) { + float h = cosf(CV_PI * (2.0f * (float)k + 1.0f) * c); + + if (reordering) { + tauh[k] = d / (h * h); + } + else { + tau[k] = d / (h * h); + } + } + + // Permute list of time steps according to chosen reordering function + int kappa = 0, prime = 0; + + if (reordering == true) { + // Choose kappa cycle with k = n/2 + // This is a heuristic. We can use Leja ordering instead!! + kappa = n / 2; + + // Get modulus for permutation + prime = n + 1; + + while (!fed_is_prime_internal(prime)) { + prime++; + } + + // Perform permutation + for (int k = 0, l = 0; l < n; ++k, ++l) { + int index = 0; + while ((index = ((k+1)*kappa) % prime - 1) >= n) { + k++; + } + + tau[l] = tauh[index]; + } + } + + return n; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function checks if a number is prime or not + * @param number Number to check if it is prime or not + * @return true if the number is prime + */ +bool fed_is_prime_internal(const int& number) { + bool is_prime = false; + + if (number <= 1) { + return false; + } + else if (number == 1 || number == 2 || number == 3 || number == 5 || number == 7) { + return true; + } + else if ((number % 2) == 0 || (number % 3) == 0 || (number % 5) == 0 || (number % 7) == 0) { + return false; + } + else { + is_prime = true; + int upperLimit = sqrt(number+1.0); + int divisor = 11; + + while (divisor <= upperLimit ) { + if (number % divisor == 0) + { + is_prime = false; + } + + divisor +=2; + } + + return is_prime; + } +} diff --git a/modules/features2d/src/kaze/fed.h b/modules/features2d/src/kaze/fed.h new file mode 100644 index 0000000000..d9e8c49924 --- /dev/null +++ b/modules/features2d/src/kaze/fed.h @@ -0,0 +1,30 @@ +#ifndef FED_H +#define FED_H + +//****************************************************************************** +//****************************************************************************** + +// Includes +#include +#include +#include +#include +#include +#include + +//************************************************************************************* +//************************************************************************************* + +// Declaration of functions +int fed_tau_by_process_time(const float& T, const int& M, const float& tau_max, + const bool& reordering, std::vector& tau); +int fed_tau_by_cycle_time(const float& t, const float& tau_max, + const bool& reordering, std::vector &tau) ; +int fed_tau_internal(const int& n, const float& scale, const float& tau_max, + const bool& reordering, std::vector &tau); +bool fed_is_prime_internal(const int& number); + +//************************************************************************************* +//************************************************************************************* + +#endif // FED_H diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp new file mode 100644 index 0000000000..41a7749058 --- /dev/null +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -0,0 +1,386 @@ + +//============================================================================= +// +// nldiffusion_functions.cpp +// Author: Pablo F. Alcantarilla +// Institution: University d'Auvergne +// Address: Clermont Ferrand, France +// Date: 27/12/2011 +// Email: pablofdezalc@gmail.com +// +// KAZE Features Copyright 2012, Pablo F. Alcantarilla +// All Rights Reserved +// See LICENSE for the license information +//============================================================================= + +/** + * @file nldiffusion_functions.cpp + * @brief Functions for non-linear diffusion applications: + * 2D Gaussian Derivatives + * Perona and Malik conductivity equations + * Perona and Malik evolution + * @date Dec 27, 2011 + * @author Pablo F. Alcantarilla + */ + +#include "nldiffusion_functions.h" + +// Namespaces +using namespace std; +using namespace cv; + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function smoothes an image with a Gaussian kernel + * @param src Input image + * @param dst Output image + * @param ksize_x Kernel size in X-direction (horizontal) + * @param ksize_y Kernel size in Y-direction (vertical) + * @param sigma Kernel standard deviation + */ +void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, + int ksize_x, int ksize_y, float sigma) { + + size_t ksize_x_ = 0, ksize_y_ = 0; + + // Compute an appropriate kernel size according to the specified sigma + if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { + ksize_x_ = ceil(2.0*(1.0 + (sigma-0.8)/(0.3))); + ksize_y_ = ksize_x_; + } + + // The kernel size must be and odd number + if ((ksize_x_ % 2) == 0) { + ksize_x_ += 1; + } + + if ((ksize_y_ % 2) == 0) { + ksize_y_ += 1; + } + + // Perform the Gaussian Smoothing with border replication + GaussianBlur(src,dst,Size(ksize_x_,ksize_y_),sigma,sigma,cv::BORDER_REPLICATE); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes the Perona and Malik conductivity coefficient g1 + * g1 = exp(-|dL|^2/k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ +void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + cv::exp(-(Lx.mul(Lx) + Ly.mul(Ly))/(k*k),dst); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes the Perona and Malik conductivity coefficient g2 + * g2 = 1 / (1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ +void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + dst = 1./(1. + (Lx.mul(Lx) + Ly.mul(Ly))/(k*k)); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes Weickert conductivity coefficient g3 + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ +void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + Mat modg; + cv::pow((Lx.mul(Lx) + Ly.mul(Ly))/(k*k),4,modg); + cv::exp(-3.315/modg, dst); + dst = 1.0 - dst; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes a good empirical value for the k contrast factor + * given an input image, the percentile (0-1), the gradient scale and the number of + * bins in the histogram + * @param img Input image + * @param perc Percentile of the image gradient histogram (0-1) + * @param gscale Scale for computing the image gradient histogram + * @param nbins Number of histogram bins + * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel + * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel + * @return k contrast factor + */ +float compute_k_percentile(const cv::Mat& img, float perc, float gscale, + int nbins, int ksize_x, int ksize_y) { + + int nbin = 0, nelements = 0, nthreshold = 0, k = 0; + float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; + float npoints = 0.0; + float hmax = 0.0; + + // Create the array for the histogram + float *hist = new float[nbins]; + + // Create the matrices + Mat gaussian = Mat::zeros(img.rows,img.cols,CV_32F); + Mat Lx = Mat::zeros(img.rows,img.cols,CV_32F); + Mat Ly = Mat::zeros(img.rows,img.cols,CV_32F); + + // Set the histogram to zero, just in case + for (int i = 0; i < nbins; i++) { + hist[i] = 0.0; + } + + // Perform the Gaussian convolution + gaussian_2D_convolution(img,gaussian,ksize_x,ksize_y,gscale); + + // Compute the Gaussian derivatives Lx and Ly + Scharr(gaussian,Lx,CV_32F,1,0,1,0,cv::BORDER_DEFAULT); + Scharr(gaussian,Ly,CV_32F,0,1,1,0,cv::BORDER_DEFAULT); + + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows-1; i++) { + for (int j = 1; j < gaussian.cols-1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); + + // Get the maximum + if (modg > hmax) { + hmax = modg; + } + } + } + + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows-1; i++) { + for (int j = 1; j < gaussian.cols-1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); + + // Find the correspondent bin + if (modg != 0.0) { + nbin = floor(nbins*(modg/hmax)); + + if (nbin == nbins) { + nbin--; + } + + hist[nbin]++; + npoints++; + } + } + } + + // Now find the perc of the histogram percentile + nthreshold = (size_t)(npoints*perc); + + + for (k = 0; nelements < nthreshold && k < nbins; k++) { + nelements = nelements + hist[k]; + } + + if (nelements < nthreshold) { + kperc = 0.03; + } + else { + kperc = hmax*((float)(k)/(float)nbins); + } + + delete hist; + return kperc; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function computes Scharr image derivatives + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @param scale Scale factor or derivative size + */ +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, + int xorder, int yorder, int scale) { + Mat kx, ky; + compute_derivative_kernels(kx,ky,xorder,yorder,scale); + sepFilter2D(src,dst,CV_32F,kx,ky); +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief Compute derivative kernels for sizes different than 3 + * @param _kx Horizontal kernel values + * @param _ky Vertical kernel values + * @param dx Derivative order in X-direction (horizontal) + * @param dy Derivative order in Y-direction (vertical) + * @param scale_ Scale factor or derivative size + */ +void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, + int dx, int dy, int scale) { + + int ksize = 3 + 2*(scale-1); + + // The standard Scharr kernel + if (scale == 1) { + getDerivKernels(_kx,_ky,dx,dy,0,true,CV_32F); + return; + } + + _kx.create(ksize,1,CV_32F,-1,true); + _ky.create(ksize,1,CV_32F,-1,true); + Mat kx = _kx.getMat(); + Mat ky = _ky.getMat(); + + float w = 10.0/3.0; + float norm = 1.0/(2.0*scale*(w+2.0)); + + for (int k = 0; k < 2; k++) { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + std::vector kerI(ksize); + + for (int t=0; trows,kernel->cols,CV_32F,&kerI[0]); + temp.copyTo(*kernel); + } +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function performs a scalar non-linear diffusion step + * @param Ld2 Output image in the evolution + * @param c Conductivity image + * @param Lstep Previous image in the evolution + * @param stepsize The step size in time units + * @note Forward Euler Scheme 3x3 stencil + * The function c is a scalar value that depends on the gradient norm + * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy + */ +void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) { + +#ifdef _OPENMP +#pragma omp parallel for schedule(dynamic) +#endif + for (int i = 1; i < Lstep.rows-1; i++) { + for (int j = 1; j < Lstep.cols-1; j++) { + float xpos = ((*(c.ptr(i)+j))+(*(c.ptr(i)+j+1)))*((*(Ld.ptr(i)+j+1))-(*(Ld.ptr(i)+j))); + float xneg = ((*(c.ptr(i)+j-1))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i)+j-1))); + float ypos = ((*(c.ptr(i)+j))+(*(c.ptr(i+1)+j)))*((*(Ld.ptr(i+1)+j))-(*(Ld.ptr(i)+j))); + float yneg = ((*(c.ptr(i-1)+j))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i-1)+j))); + *(Lstep.ptr(i)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + } + + for (int j = 1; j < Lstep.cols-1; j++) { + float xpos = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j+1)))*((*(Ld.ptr(0)+j+1))-(*(Ld.ptr(0)+j))); + float xneg = ((*(c.ptr(0)+j-1))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j-1))); + float ypos = ((*(c.ptr(0)+j))+(*(c.ptr(1)+j)))*((*(Ld.ptr(1)+j))-(*(Ld.ptr(0)+j))); + float yneg = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j))); + *(Lstep.ptr(0)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + for (int j = 1; j < Lstep.cols-1; j++) { + float xpos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j+1)))*((*(Ld.ptr(Lstep.rows-1)+j+1))-(*(Ld.ptr(Lstep.rows-1)+j))); + float xneg = ((*(c.ptr(Lstep.rows-1)+j-1))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j-1))); + float ypos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j))); + float yneg = ((*(c.ptr(Lstep.rows-2)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-2)+j))); + *(Lstep.ptr(Lstep.rows-1)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + for (int i = 1; i < Lstep.rows-1; i++) { + float xpos = ((*(c.ptr(i)))+(*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1))-(*(Ld.ptr(i)))); + float xneg = ((*(c.ptr(i)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i)))); + float ypos = ((*(c.ptr(i)))+(*(c.ptr(i+1))))*((*(Ld.ptr(i+1)))-(*(Ld.ptr(i)))); + float yneg = ((*(c.ptr(i-1)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i-1)))); + *(Lstep.ptr(i)) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + for (int i = 1; i < Lstep.rows-1; i++) { + float xpos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); + float xneg = ((*(c.ptr(i)+Lstep.cols-2))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-2))); + float ypos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i+1)+Lstep.cols-1)))*((*(Ld.ptr(i+1)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); + float yneg = ((*(c.ptr(i-1)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i-1)+Lstep.cols-1))); + *(Lstep.ptr(i)+Lstep.cols-1) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + } + + Ld = Ld + Lstep; +} + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function checks if a given pixel is a maximum in a local neighbourhood + * @param img Input image where we will perform the maximum search + * @param dsize Half size of the neighbourhood + * @param value Response value at (x,y) position + * @param row Image row coordinate + * @param col Image column coordinate + * @param same_img Flag to indicate if the image value at (x,y) is in the input image + * @return 1->is maximum, 0->otherwise + */ +bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, + int row, int col, bool same_img) { + + bool response = true; + + for (int i = row-dsize; i <= row+dsize; i++) { + for (int j = col-dsize; j <= col+dsize; j++) { + if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) { + if (same_img == true) { + if (i != row || j != col) { + if ((*(img.ptr(i)+j)) > value) { + response = false; + return response; + } + } + } + else { + if ((*(img.ptr(i)+j)) > value) { + response = false; + return response; + } + } + } + } + } + + return response; +} diff --git a/modules/features2d/src/kaze/nldiffusion_functions.h b/modules/features2d/src/kaze/nldiffusion_functions.h new file mode 100755 index 0000000000..d0ece89571 --- /dev/null +++ b/modules/features2d/src/kaze/nldiffusion_functions.h @@ -0,0 +1,51 @@ + +/** + * @file nldiffusion_functions.h + * @brief Functions for non-linear diffusion applications: + * 2D Gaussian Derivatives + * Perona and Malik conductivity equations + * Perona and Malik evolution + * @date Dec 27, 2011 + * @author Pablo F. Alcantarilla + */ + +#ifndef NLDIFFUSION_FUNCTIONS_H_ +#define NLDIFFUSION_FUNCTIONS_H_ + +//****************************************************************************** +//****************************************************************************** + +// Includes +#include "config.h" + +//************************************************************************************* +//************************************************************************************* + +// Gaussian 2D convolution +void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, + int ksize_x, int ksize_y, float sigma); + +// Diffusivity functions +void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); +void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); +void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); +float compute_k_percentile(const cv::Mat& img, float perc, float gscale, + int nbins, int ksize_x, int ksize_y); + +// Image derivatives +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, + int xorder, int yorder, int scale); +void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, + int dx, int dy, int scale); + +// Nonlinear diffusion filtering scalar step +void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); + +// For non-maxima suppresion +bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, + int row, int col, bool same_img); + +//************************************************************************************* +//************************************************************************************* + +#endif // NLDIFFUSION_FUNCTIONS_H_ diff --git a/modules/features2d/src/kaze/utils.cpp b/modules/features2d/src/kaze/utils.cpp new file mode 100644 index 0000000000..7b55ac45f7 --- /dev/null +++ b/modules/features2d/src/kaze/utils.cpp @@ -0,0 +1,92 @@ + +//============================================================================= +// +// utils.cpp +// Author: Pablo F. Alcantarilla +// Institution: University d'Auvergne +// Address: Clermont Ferrand, France +// Date: 29/12/2011 +// Email: pablofdezalc@gmail.com +// +// KAZE Features Copyright 2012, Pablo F. Alcantarilla +// All Rights Reserved +// See LICENSE for the license information +//============================================================================= + +/** + * @file utils.cpp + * @brief Some useful functions + * @date Dec 29, 2011 + * @author Pablo F. Alcantarilla + */ + +#include "utils.h" + +using namespace std; +using namespace cv; + +//************************************************************************************* +//************************************************************************************* + +/** + * @brief This function copies the input image and converts the scale of the copied + * image prior visualization + * @param src Input image + * @param dst Output image + */ +void copy_and_convert_scale(const cv::Mat& src, cv::Mat& dst) { + + float min_val = 0, max_val = 0; + + src.copyTo(dst); + compute_min_32F(dst,min_val); + + dst = dst - min_val; + + compute_max_32F(dst,max_val); + dst = dst / max_val; +} + +//************************************************************************************* +//************************************************************************************* + +/* +void show_input_options_help(int example) { + + fflush(stdout); + + cout << endl; + cout << endl; + cout << "KAZE Features" << endl; + cout << "***********************************************************" << endl; + cout << "For running the program you need to type in the command line the following arguments: " << endl; + + if (example == 0) { + cout << "./kaze_features img.jpg [options]" << endl; + } + else if (example == 1) { + cout << "./kaze_match img1.jpg img2.pgm homography.txt [options]" << endl; + } + else if (example == 2) { + cout << "./kaze_compare img1.jpg img2.pgm homography.txt [options]" << endl; + } + + cout << endl; + cout << "The options are not mandatory. In case you do not specify additional options, default arguments will be used" << endl << endl; + cout << "Here is a description of the additional options: " << endl; + cout << "--verbose " << "\t\t if verbosity is required" << endl; + cout << "--help" << "\t\t for showing the command line options" << endl; + cout << "--soffset" << "\t\t the base scale offset (sigma units)" << endl; + cout << "--omax" << "\t\t maximum octave evolution of the image 2^sigma (coarsest scale)" << endl; + cout << "--nsublevels" << "\t\t number of sublevels per octave" << endl; + cout << "--dthreshold" << "\t\t Feature detector threshold response for accepting points (0.001 can be a good value)" << endl; + cout << "--descriptor" << "\t\t Descriptor Type 0 -> SURF, 1 -> M-SURF, 2 -> G-SURF" << endl; + cout << "--use_fed" "\t\t 1 -> Use FED, 0 -> Use AOS for the nonlinear diffusion filtering" << endl; + cout << "--upright" << "\t\t 0 -> Rotation Invariant, 1 -> No Rotation Invariant" << endl; + cout << "--extended" << "\t\t 0 -> Normal Descriptor (64), 1 -> Extended Descriptor (128)" << endl; + cout << "--output keypoints.txt" << "\t\t For saving the detected keypoints into a .txt file" << endl; + cout << "--save_scale_space" << "\t\t 1 in case we want to save the nonlinear scale space images. 0 otherwise" << endl; + cout << "--show_results" << "\t\t 1 in case we want to show detection results. 0 otherwise" << endl; + cout << endl; +} +*/ \ No newline at end of file diff --git a/modules/features2d/src/kaze/utils.h b/modules/features2d/src/kaze/utils.h new file mode 100644 index 0000000000..848bfe3f53 --- /dev/null +++ b/modules/features2d/src/kaze/utils.h @@ -0,0 +1,41 @@ + +/** + * @file utils.h + * @brief Some useful functions + * @date Dec 29, 2011 + * @author Pablo F. Alcantarilla + */ + +#ifndef UTILS_H_ +#define UTILS_H_ + +//****************************************************************************** +//****************************************************************************** + +// OPENCV Includes +#include "precomp.hpp" + +// System Includes +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//************************************************************************************* +//************************************************************************************* + +// Declaration of Functions +void compute_min_32F(const cv::Mat& src, float& value); +void compute_max_32F(const cv::Mat& src, float& value); +void convert_scale(cv::Mat& src); +void copy_and_convert_scale(const cv::Mat &src, cv::Mat& dst); + +//************************************************************************************* +//************************************************************************************* + +#endif // UTILS_H_ From 703e012a5b9c7a225e79f284f2adbcd0788397fa Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 5 Apr 2014 10:24:27 +0300 Subject: [PATCH 02/52] Prepare KAZE and AKAZE sources for integration --- modules/features2d/src/akaze/AKAZE.cpp | 56 +++++++++--------------- modules/features2d/src/akaze/AKAZE.h | 11 ++--- modules/features2d/src/akaze/config.h | 46 ++++++++++---------- modules/features2d/src/kaze/KAZE.cpp | 60 +++++++++++++------------- modules/features2d/src/kaze/KAZE.h | 11 ++--- modules/features2d/src/kaze/config.h | 48 ++++++++++----------- 6 files changed, 103 insertions(+), 129 deletions(-) mode change 100755 => 100644 modules/features2d/src/kaze/KAZE.h diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 5a110ac175..379e0ba1aa 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -33,7 +33,7 @@ using namespace cv; * @param options AKAZE configuration options * @note This constructor allocates memory for the nonlinear scale space */ -AKAZE::AKAZE(const AKAZEOptions& options) { +AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) { soffset_ = options.soffset; factor_size_ = DEFAULT_FACTOR_SIZE; @@ -75,7 +75,7 @@ AKAZE::AKAZE(const AKAZEOptions& options) { /** * @brief AKAZE destructor */ -AKAZE::~AKAZE(void) { +AKAZEFeatures::~AKAZEFeatures(void) { evolution_.clear(); } @@ -86,7 +86,7 @@ AKAZE::~AKAZE(void) { /** * @brief This method allocates the memory for the nonlinear diffusion evolution */ -void AKAZE::Allocate_Memory_Evolution(void) { +void AKAZEFeatures::Allocate_Memory_Evolution(void) { float rfactor = 0.0; int level_height = 0, level_width = 0; @@ -145,7 +145,7 @@ void AKAZE::Allocate_Memory_Evolution(void) { * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully, -1 otherwise */ -int AKAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img) { +int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { double t1 = 0.0, t2 = 0.0; @@ -222,7 +222,7 @@ int AKAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img) { * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of detected keypoints */ -void AKAZE::Feature_Detection(std::vector& kpts) { +void AKAZEFeatures::Feature_Detection(std::vector& kpts) { double t1 = 0.0, t2 = 0.0; @@ -242,7 +242,7 @@ void AKAZE::Feature_Detection(std::vector& kpts) { /** * @brief This method computes the multiscale derivatives for the nonlinear scale space */ -void AKAZE::Compute_Multiscale_Derivatives(void) { +void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { double t1 = 0.0, t2 = 0.0; @@ -279,7 +279,7 @@ void AKAZE::Compute_Multiscale_Derivatives(void) { * @brief This method computes the feature detector response for the nonlinear scale space * @note We use the Hessian determinant as the feature detector response */ -void AKAZE::Compute_Determinant_Hessian_Response(void) { +void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { // Firstly compute the multiscale derivatives Compute_Multiscale_Derivatives(); @@ -307,7 +307,7 @@ void AKAZE::Compute_Determinant_Hessian_Response(void) { * @brief This method finds extrema in the nonlinear scale space * @param kpts Vector of detected keypoints */ -void AKAZE::Find_Scale_Space_Extrema(std::vector& kpts) { +void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { double t1 = 0.0, t2 = 0.0; float value = 0.0; @@ -418,7 +418,7 @@ void AKAZE::Find_Scale_Space_Extrema(std::vector& kpts) { * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints */ -void AKAZE::Do_Subpixel_Refinement(std::vector& kpts) { +void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { double t1 = 0.0, t2 = 0.0; float Dx = 0.0, Dy = 0.0, ratio = 0.0; @@ -493,7 +493,7 @@ void AKAZE::Do_Subpixel_Refinement(std::vector& kpts) { * @param kpts Vector of keypoints * @param mdist Maximum distance in pixels */ -void AKAZE::Feature_Suppression_Distance(std::vector& kpts, float mdist) { +void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, float mdist) { vector aux; vector to_delete; @@ -545,7 +545,7 @@ void AKAZE::Feature_Suppression_Distance(std::vector& kpts, float * @param kpts Vector of detected keypoints * @param desc Matrix to store the descriptors */ -void AKAZE::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) { +void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) { double t1 = 0.0, t2 = 0.0; @@ -653,7 +653,7 @@ void AKAZE::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void AKAZE::Compute_Main_Orientation_SURF(cv::KeyPoint& kpt) { +void AKAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint& kpt) { int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; @@ -728,7 +728,7 @@ void AKAZE::Compute_Main_Orientation_SURF(cv::KeyPoint& kpt) { * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void AKAZE::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -819,7 +819,7 @@ void AKAZE::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void AKAZE::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -918,7 +918,7 @@ void AKAZE::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void AKAZE::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1041,7 +1041,7 @@ void AKAZE::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void AKAZE::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1165,7 +1165,7 @@ void AKAZE::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZE::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { +void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { float di = 0.0, dx = 0.0, dy = 0.0; float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; @@ -1378,7 +1378,7 @@ void AKAZE::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZE::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { +void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; @@ -1680,7 +1680,7 @@ void AKAZE::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *des * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZE::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { +void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { float di, dx, dy; float rx, ry; @@ -1772,7 +1772,7 @@ void AKAZE::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *d * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZE::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { +void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { float di = 0.0f, dx = 0.0f, dy = 0.0f; float rx = 0.0f, ry = 0.0f; @@ -1851,22 +1851,6 @@ void AKAZE::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned //************************************************************************************* //************************************************************************************* -/** - * @brief This method displays the computation times -*/ -void AKAZE::Show_Computation_Times(void) { - - cout << "(*) Time Scale Space: " << tscale_ << endl; - cout << "(*) Time Detector: " << tdetector_ << endl; - cout << " - Time Derivatives: " << tderivatives_ << endl; - cout << " - Time Extrema: " << textrema_ << endl; - cout << " - Time Subpixel: " << tsubpixel_ << endl; - cout << "(*) Time Descriptor: " << tdescriptor_ << endl; -} - -//************************************************************************************* -//************************************************************************************* - /** * @brief This function computes a (quasi-random) list of bits to be taken * from the full descriptor. To speed the extraction, the function creates diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZE.h index fd1ec07fa3..9785b0c42f 100644 --- a/modules/features2d/src/akaze/AKAZE.h +++ b/modules/features2d/src/akaze/AKAZE.h @@ -22,7 +22,7 @@ //************************************************************************************* // AKAZE Class Declaration -class AKAZE { +class AKAZEFeatures { private: @@ -72,10 +72,10 @@ private: public: // Constructor - AKAZE(const AKAZEOptions &options); + AKAZEFeatures(const AKAZEOptions &options); // Destructor - ~AKAZE(void); + ~AKAZEFeatures(void); // Setters void Set_Octave_Max(const int& omax) { @@ -144,11 +144,6 @@ public: void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc); void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc); void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc); - - // Methods for saving some results and showing computation times - void Save_Scale_Space(void); - void Save_Detector_Responses(void); - void Show_Computation_Times(void); }; //************************************************************************************* diff --git a/modules/features2d/src/akaze/config.h b/modules/features2d/src/akaze/config.h index 331c89275f..bb704bb182 100644 --- a/modules/features2d/src/akaze/config.h +++ b/modules/features2d/src/akaze/config.h @@ -1,5 +1,5 @@ -#ifndef _CONFIG_H_ -#define _CONFIG_H_ +#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_HPP__ +#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_HPP__ // STL #include @@ -17,7 +17,7 @@ #endif // Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right -const float gauss25[7][7] = { +static const float gauss25[7][7] = { {0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f}, {0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f}, {0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f}, @@ -29,24 +29,24 @@ const float gauss25[7][7] = { // Scale Space parameters -const float DEFAULT_SCALE_OFFSET = 1.60f; // Base scale offset (sigma units) -const float DEFAULT_FACTOR_SIZE = 1.5f; // Factor for the multiscale derivatives -const int DEFAULT_OCTAVE_MIN = 0; // Initial octave level (-1 means that the size of the input image is duplicated) -const int DEFAULT_OCTAVE_MAX = 4; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) -const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level -const int DEFAULT_DIFFUSIVITY_TYPE = 1; -const float KCONTRAST_PERCENTILE = 0.7f; -const int KCONTRAST_NBINS = 300; -const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0f; -const float DEFAULT_KCONTRAST = .01f; +static const float DEFAULT_SCALE_OFFSET = 1.60f; // Base scale offset (sigma units) +static const float DEFAULT_FACTOR_SIZE = 1.5f; // Factor for the multiscale derivatives +static const int DEFAULT_OCTAVE_MIN = 0; // Initial octave level (-1 means that the size of the input image is duplicated) +static const int DEFAULT_OCTAVE_MAX = 4; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) +static const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level +static const int DEFAULT_DIFFUSIVITY_TYPE = 1; +static const float KCONTRAST_PERCENTILE = 0.7f; +static const int KCONTRAST_NBINS = 300; +static const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0f; +static const float DEFAULT_KCONTRAST = .01f; // Detector Parameters -const float DEFAULT_DETECTOR_THRESHOLD = 0.001f; // Detector response threshold to accept point -const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001f; // Minimum Detector response threshold to accept point -const int DEFAULT_LDB_DESCRIPTOR_SIZE = 0; // Use 0 for the full descriptor, or the number of bits -const int DEFAULT_LDB_PATTERN_SIZE = 10; // Actual patch size is 2*pattern_size*point.scale; -const int DEFAULT_LDB_CHANNELS = 3; +static const float DEFAULT_DETECTOR_THRESHOLD = 0.001f; // Detector response threshold to accept point +static const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001f; // Minimum Detector response threshold to accept point +static const int DEFAULT_LDB_DESCRIPTOR_SIZE = 0; // Use 0 for the full descriptor, or the number of bits +static const int DEFAULT_LDB_PATTERN_SIZE = 10; // Actual patch size is 2*pattern_size*point.scale; +static const int DEFAULT_LDB_CHANNELS = 3; // Descriptor Parameters enum DESCRIPTOR_TYPE @@ -59,13 +59,13 @@ enum DESCRIPTOR_TYPE MLDB = 5 }; -const int DEFAULT_DESCRIPTOR = MLDB; +static const int DEFAULT_DESCRIPTOR = MLDB; // Some debugging options -const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images -const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) -const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios -const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints +static const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images +static const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) +static const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios +static const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints // Options structure struct AKAZEOptions diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZE.cpp index 09246e1670..f43d267e0d 100644 --- a/modules/features2d/src/kaze/KAZE.cpp +++ b/modules/features2d/src/kaze/KAZE.cpp @@ -35,7 +35,7 @@ using namespace cv; * @param options KAZE configuration options * @note The constructor allocates memory for the nonlinear scale space */ -KAZE::KAZE(KAZEOptions& options) { +KAZEFeatures::KAZEFeatures(KAZEOptions& options) { soffset_ = options.soffset; sderivatives_ = options.sderivatives; @@ -71,7 +71,7 @@ KAZE::KAZE(KAZEOptions& options) { /** * @brief KAZE destructor */ -KAZE::~KAZE(void) { +KAZEFeatures::~KAZEFeatures(void) { evolution_.clear(); } @@ -82,7 +82,7 @@ KAZE::~KAZE(void) { /** * @brief This method allocates the memory for the nonlinear diffusion evolution */ -void KAZE::Allocate_Memory_Evolution(void) { +void KAZEFeatures::Allocate_Memory_Evolution(void) { // Allocate the dimension of the matrices for the evolution for (int i = 0; i <= omax_-1; i++) { @@ -145,7 +145,7 @@ void KAZE::Allocate_Memory_Evolution(void) { * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully. -1 otherwise */ -int KAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img) { +int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { double t2 = 0.0, t1 = 0.0; @@ -226,7 +226,7 @@ int KAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img) { * @param img Input image * @param kpercentile Percentile of the gradient histogram */ -void KAZE::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { +void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { if (verbosity_ == true) { cout << "Computing Kcontrast factor." << endl; @@ -248,7 +248,7 @@ void KAZE::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { /** * @brief This method computes the multiscale derivatives for the nonlinear scale space */ -void KAZE::Compute_Multiscale_Derivatives(void) +void KAZEFeatures::Compute_Multiscale_Derivatives(void) { double t2 = 0.0, t1 = 0.0; t1 = getTickCount(); @@ -288,7 +288,7 @@ void KAZE::Compute_Multiscale_Derivatives(void) * @brief This method computes the feature detector response for the nonlinear scale space * @note We use the Hessian determinant as feature detector */ -void KAZE::Compute_Detector_Response(void) { +void KAZEFeatures::Compute_Detector_Response(void) { double t2 = 0.0, t1 = 0.0; float lxx = 0.0, lxy = 0.0, lyy = 0.0; @@ -326,7 +326,7 @@ void KAZE::Compute_Detector_Response(void) { * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of keypoints */ -void KAZE::Feature_Detection(std::vector& kpts) { +void KAZEFeatures::Feature_Detection(std::vector& kpts) { double t2 = 0.0, t1 = 0.0; t1 = getTickCount(); @@ -353,7 +353,7 @@ void KAZE::Feature_Detection(std::vector& kpts) { * @param kpts Vector of keypoints * @note We compute features for each of the nonlinear scale space level in a different processing thread */ -void KAZE::Determinant_Hessian_Parallel(std::vector& kpts) { +void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) { int level = 0; float dist = 0.0, smax = 3.0; @@ -444,7 +444,7 @@ void KAZE::Determinant_Hessian_Parallel(std::vector& kpts) { * at a given nonlinear scale level * @param level Index in the nonlinear scale space evolution */ -void KAZE::Find_Extremum_Threading(const int& level) { +void KAZEFeatures::Find_Extremum_Threading(const int& level) { float value = 0.0; bool is_extremum = false; @@ -497,7 +497,7 @@ void KAZE::Find_Extremum_Threading(const int& level) { * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints */ -void KAZE::Do_Subpixel_Refinement(std::vector &kpts) { +void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { int step = 1; int x = 0, y = 0; @@ -603,7 +603,7 @@ void KAZE::Do_Subpixel_Refinement(std::vector &kpts) { * @param kpts Vector of keypoints * @param mdist Maximum distance in pixels */ -void KAZE::Feature_Suppression_Distance(std::vector& kpts, const float& mdist) { +void KAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, const float& mdist) { vector aux; vector to_delete; @@ -659,7 +659,7 @@ void KAZE::Feature_Suppression_Distance(std::vector& kpts, const f * @param kpts Vector of keypoints * @param desc Matrix with the feature descriptors */ -void KAZE::Feature_Description(std::vector &kpts, cv::Mat &desc) { +void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) { double t2 = 0.0, t1 = 0.0; t1 = getTickCount(); @@ -807,7 +807,7 @@ void KAZE::Feature_Description(std::vector &kpts, cv::Mat &desc) { * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void KAZE::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) +void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) { int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0; @@ -888,7 +888,7 @@ void KAZE::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void KAZE::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; @@ -987,7 +987,7 @@ void KAZE::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void KAZE::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { +void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -1094,7 +1094,7 @@ void KAZE::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1226,7 +1226,7 @@ void KAZE::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1359,7 +1359,7 @@ void KAZE::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZE::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -1494,7 +1494,7 @@ void KAZE::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZE::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -1633,7 +1633,7 @@ void KAZE::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void KAZE::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; @@ -1752,7 +1752,7 @@ void KAZE::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void KAZE::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; @@ -1880,7 +1880,7 @@ void KAZE::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { +void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -2036,7 +2036,7 @@ void KAZE::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZE::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { +void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -2197,7 +2197,7 @@ void KAZE::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZE::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) +void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { float len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0; @@ -2350,7 +2350,7 @@ void KAZE::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZE::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { +void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { float len = 0.0, xf = 0.0, yf = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0; @@ -2509,7 +2509,7 @@ void KAZE::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { * If c is a matrix of the same size as Ld, the diffusion will be nonlinear * The stepsize can be arbitrarilly large */ -void KAZE::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { +void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { #ifdef _OPENMP #pragma omp sections @@ -2540,7 +2540,7 @@ void KAZE::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, * @param c Conductivity image * @param stepsize Stepsize for the nonlinear diffusion evolution */ -void KAZE::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { +void KAZEFeatures::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { // Operate on rows for (int i = 0; i < qr_.rows; i++) { @@ -2581,7 +2581,7 @@ void KAZE::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsi * @param c Conductivity image * @param stepsize Stepsize for the nonlinear diffusion evolution */ -void KAZE::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { +void KAZEFeatures::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { // Operate on columns for (int j = 0; j < qc_.cols; j++) { @@ -2624,7 +2624,7 @@ void KAZE::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& ste * @brief This method does the Thomas algorithm for solving a tridiagonal linear system * @note The matrix A must be strictly diagonally dominant for a stable solution */ -void KAZE::Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x) { +void KAZEFeatures::Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x) { // Auxiliary variables int n = a.rows; diff --git a/modules/features2d/src/kaze/KAZE.h b/modules/features2d/src/kaze/KAZE.h old mode 100755 new mode 100644 index 9d489c0480..1d7fb0beb1 --- a/modules/features2d/src/kaze/KAZE.h +++ b/modules/features2d/src/kaze/KAZE.h @@ -23,7 +23,7 @@ //************************************************************************************* // KAZE Class Declaration -class KAZE { +class KAZEFeatures { private: @@ -69,10 +69,10 @@ private: public: // Constructor - KAZE(KAZEOptions& options); + KAZEFeatures(KAZEOptions& options); // Destructor - ~KAZE(void); + ~KAZEFeatures(void); // Public methods for KAZE interface void Allocate_Memory_Evolution(void); @@ -80,11 +80,6 @@ public: void Feature_Detection(std::vector& kpts); void Feature_Description(std::vector& kpts, cv::Mat& desc); - // Methods for saving the scale space set of images and detector responses - void Save_Nonlinear_Scale_Space(void); - void Save_Detector_Responses(void); - void Save_Flow_Responses(void); - private: // Feature Detection Methods diff --git a/modules/features2d/src/kaze/config.h b/modules/features2d/src/kaze/config.h index ffb41ce826..88fcba5960 100644 --- a/modules/features2d/src/kaze/config.h +++ b/modules/features2d/src/kaze/config.h @@ -6,8 +6,8 @@ * @author Pablo F. Alcantarilla */ -#ifndef _CONFIG_H_ -#define _CONFIG_H_ +#ifndef __OPENCV_FEATURES_2D_KAZE_CONFIG_HPP__ +#define __OPENCV_FEATURES_2D_KAZE_CONFIG_HPP__ //****************************************************************************** //****************************************************************************** @@ -38,30 +38,30 @@ #define NMAX_CHAR 400 // Some default options -const float DEFAULT_SCALE_OFFSET = 1.60; // Base scale offset (sigma units) -const float DEFAULT_OCTAVE_MAX = 4.0; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) -const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level -const float DEFAULT_DETECTOR_THRESHOLD = 0.001; // Detector response threshold to accept point -const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001; // Minimum Detector response threshold to accept point -const int DEFAULT_DESCRIPTOR_MODE = 1; // Descriptor Mode 0->SURF, 1->M-SURF -const bool DEFAULT_USE_FED = true; // 0->AOS, 1->FED -const bool DEFAULT_UPRIGHT = false; // Upright descriptors, not invariant to rotation -const bool DEFAULT_EXTENDED = false; // Extended descriptor, dimension 128 -const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images -const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) -const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios -const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints +static const float DEFAULT_SCALE_OFFSET = 1.60; // Base scale offset (sigma units) +static const float DEFAULT_OCTAVE_MAX = 4.0; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) +static const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level +static const float DEFAULT_DETECTOR_THRESHOLD = 0.001; // Detector response threshold to accept point +static const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001; // Minimum Detector response threshold to accept point +static const int DEFAULT_DESCRIPTOR_MODE = 1; // Descriptor Mode 0->SURF, 1->M-SURF +static const bool DEFAULT_USE_FED = true; // 0->AOS, 1->FED +static const bool DEFAULT_UPRIGHT = false; // Upright descriptors, not invariant to rotation +static const bool DEFAULT_EXTENDED = false; // Extended descriptor, dimension 128 +static const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images +static const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) +static const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios +static const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints // Some important configuration variables -const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0; -const float DEFAULT_KCONTRAST = .01; -const float KCONTRAST_PERCENTILE = 0.7; -const int KCONTRAST_NBINS = 300; -const bool COMPUTE_KCONTRAST = true; -const int DEFAULT_DIFFUSIVITY_TYPE = 1; // 0 -> PM G1, 1 -> PM G2, 2 -> Weickert -const bool USE_CLIPPING_NORMALIZATION = false; -const float CLIPPING_NORMALIZATION_RATIO = 1.6; -const int CLIPPING_NORMALIZATION_NITER = 5; +static const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0; +static const float DEFAULT_KCONTRAST = .01; +static const float KCONTRAST_PERCENTILE = 0.7; +static const int KCONTRAST_NBINS = 300; +static const bool COMPUTE_KCONTRAST = true; +static const int DEFAULT_DIFFUSIVITY_TYPE = 1; // 0 -> PM G1, 1 -> PM G2, 2 -> Weickert +static const bool USE_CLIPPING_NORMALIZATION = false; +static const float CLIPPING_NORMALIZATION_RATIO = 1.6; +static const int CLIPPING_NORMALIZATION_NITER = 5; //************************************************************************************* //************************************************************************************* From 137ff7eccbd48f1f3d60ca423d96c721d83599e7 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 5 Apr 2014 10:25:46 +0300 Subject: [PATCH 03/52] Added KAZE and AKAZE wrappers --- .../features2d/include/opencv2/features2d.hpp | 66 ++++++++ modules/features2d/src/akaze.cpp | 149 ++++++++++++++++++ modules/features2d/src/features2d_init.cpp | 20 ++- modules/features2d/src/kaze.cpp | 120 ++++++++++++++ 4 files changed, 353 insertions(+), 2 deletions(-) create mode 100644 modules/features2d/src/akaze.cpp diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 190e8ac665..e45c17771f 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -887,7 +887,73 @@ protected: PixelTestFn test_fn_; }; +/*! +KAZE implementation +*/ +class CV_EXPORTS_W KAZE : public Feature2D +{ +public: + CV_WRAP explicit KAZE(bool _extended = false); + virtual ~KAZE(); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + // returns the default norm type + int defaultNorm() const; + + AlgorithmInfo* info() const; + + void operator()(InputArray image, InputArray mask, + std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints) const; + +protected: + void detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const; + void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; + + CV_PROP bool extended; +}; + +/*! +AKAZE implementation +*/ +class CV_EXPORTS_W AKAZE : public Feature2D +{ +public: + CV_WRAP explicit AKAZE(int _descriptor = 5, int _descriptor_size = 0, int _descriptor_channels = 3); + + virtual ~AKAZE(); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + // returns the default norm type + int defaultNorm() const; + + // Compute the AKAZE features on an image + void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; + + // Compute the BRISK features and descriptors on an image + void operator()(InputArray image, InputArray mask, std::vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints = false) const; + + AlgorithmInfo* info() const; + +protected: + + void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; + void detectImpl(InputArray image, std::vector& keypoints, InputArray mask = noArray()) const; + + CV_PROP int descriptor_channels; + CV_PROP int descriptor; + CV_PROP int descriptor_size; + +}; /****************************************************************************************\ * Distance * \****************************************************************************************/ diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp new file mode 100644 index 0000000000..8cba3b6d20 --- /dev/null +++ b/modules/features2d/src/akaze.cpp @@ -0,0 +1,149 @@ +#include "precomp.hpp" +#include "akaze/AKAZE.h" + +namespace cv +{ + + AKAZE::AKAZE(int _descriptor, int _descriptor_size, int _descriptor_channels) + : descriptor_channels(_descriptor_channels) + , descriptor(_descriptor) + , descriptor_size(_descriptor_size) + { + + } + + AKAZE::~AKAZE() + { + + } + + // returns the descriptor size in bytes + int AKAZE::descriptorSize() const + { + if (descriptor < MLDB_UPRIGHT) + { + return 64; + } + else + { + // We use the full length binary descriptor -> 486 bits + if (descriptor_size == 0) + { + int t = (6 + 36 + 120) * descriptor_channels; + return ceil(t / 8.); + } + else + { + // We use the random bit selection length binary descriptor + return ceil(descriptor_size / 8.); + } + } + } + + // returns the descriptor type + int AKAZE::descriptorType() const + { + if (descriptor < MLDB_UPRIGHT) + { + return CV_32FC1; + } + else + { + return CV_8UC1; + } + } + + // returns the default norm type + int AKAZE::defaultNorm() const + { + if (descriptor < MLDB_UPRIGHT) + { + return NORM_L2; + } + else + { + return NORM_HAMMING; + } + } + + + void AKAZE::operator()(InputArray image, InputArray mask, + std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints) const + { + cv::Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + cv::Mat& desc = descriptors.getMatRef(); + + AKAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + + AKAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + + if (!useProvidedKeypoints) + { + impl.Feature_Detection(keypoints); + } + + if (!mask.empty()) + { + cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + } + + impl.Compute_Descriptors(keypoints, desc); + } + + void AKAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const + { + cv::Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + AKAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + + AKAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + impl.Feature_Detection(keypoints); + + if (!mask.empty()) + { + cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + } + } + + void AKAZE::computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const + { + cv::Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + cv::Mat& desc = descriptors.getMatRef(); + + AKAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + + AKAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + impl.Compute_Descriptors(keypoints, desc); + + CV_Assert(!desc.rows || desc.cols == descriptorSize() && "Descriptor size does not match expected"); + CV_Assert(!desc.rows || (desc.type() & descriptorType()) && "Descriptor type does not match expected"); + } +} \ No newline at end of file diff --git a/modules/features2d/src/features2d_init.cpp b/modules/features2d/src/features2d_init.cpp index 889c5b64ca..e3a3b3c363 100644 --- a/modules/features2d/src/features2d_init.cpp +++ b/modules/features2d/src/features2d_init.cpp @@ -125,6 +125,20 @@ CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT", /////////////////////////////////////////////////////////////////////////////////////////////////////////// +CV_INIT_ALGORITHM(KAZE, "Feature2D.KAZE", + obj.info()->addParam(obj, "extended", obj.extended)) + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +CV_INIT_ALGORITHM(AKAZE, "Feature2D.AKAZE", + obj.info()->addParam(obj, "descriptor_channels", obj.descriptor_channels); + obj.info()->addParam(obj, "descriptor", obj.descriptor); + obj.info()->addParam(obj, "descriptor_size", obj.descriptor_size)) + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + + + CV_INIT_ALGORITHM(SimpleBlobDetector, "Feature2D.SimpleBlob", obj.info()->addParam(obj, "thresholdStep", obj.params.thresholdStep); obj.info()->addParam(obj, "minThreshold", obj.params.minThreshold); @@ -202,11 +216,13 @@ bool cv::initModule_features2d(void) all &= !FREAK_info_auto.name().empty(); all &= !ORB_info_auto.name().empty(); all &= !GFTTDetector_info_auto.name().empty(); - all &= !HarrisDetector_info_auto.name().empty(); + all &= !KAZE_info_auto.name().empty(); + all &= !AKAZE_info_auto.name().empty(); + all &= !HarrisDetector_info_auto.name().empty(); all &= !DenseFeatureDetector_info_auto.name().empty(); all &= !GridAdaptedFeatureDetector_info_auto.name().empty(); all &= !BFMatcher_info_auto.name().empty(); all &= !FlannBasedMatcher_info_auto.name().empty(); return all; -} +} \ No newline at end of file diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index e69de29bb2..1944f1e4e0 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -0,0 +1,120 @@ +#include "precomp.hpp" +#include "kaze/KAZE.h" + +namespace cv +{ + KAZE::KAZE(bool _extended /* = false */) + : extended(_extended) + { + } + + KAZE::~KAZE() + { + + } + + // returns the descriptor size in bytes + int KAZE::descriptorSize() const + { + return extended ? 128 : 64; + } + + // returns the descriptor type + int KAZE::descriptorType() const + { + return CV_32F; + } + + // returns the default norm type + int KAZE::defaultNorm() const + { + return NORM_L2; + } + + + void KAZE::operator()(InputArray image, InputArray mask, + std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints) const + { + cv::Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + cv::Mat& desc = descriptors.getMatRef(); + + KAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + options.extended = extended; + + KAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + + if (!useProvidedKeypoints) + { + impl.Feature_Detection(keypoints); + } + + if (!mask.empty()) + { + cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + } + + impl.Feature_Description(keypoints, desc); + + CV_Assert(!desc.rows || desc.cols == descriptorSize() && "Descriptor size does not match expected"); + CV_Assert(!desc.rows || (desc.type() & descriptorType()) && "Descriptor type does not match expected"); + } + + void KAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const + { + Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + KAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + options.extended = extended; + + KAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + impl.Feature_Detection(keypoints); + + if (!mask.empty()) + { + cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat()); + } + } + + void KAZE::computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const + { + cv::Mat img = image.getMat(); + if (img.type() != CV_8UC1) + cvtColor(image, img, COLOR_BGR2GRAY); + + Mat img1_32; + img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); + + cv::Mat& desc = descriptors.getMatRef(); + + KAZEOptions options; + options.img_width = img.cols; + options.img_height = img.rows; + options.extended = extended; + + KAZEFeatures impl(options); + impl.Create_Nonlinear_Scale_Space(img1_32); + impl.Feature_Description(keypoints, desc); + + CV_Assert(!desc.rows || desc.cols == descriptorSize() && "Descriptor size does not match expected"); + CV_Assert(!desc.rows || (desc.type() & descriptorType()) && "Descriptor type does not match expected"); + } +} \ No newline at end of file From 17f305140bbfb358076db0e51b9d24b8997d02f6 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 5 Apr 2014 10:25:59 +0300 Subject: [PATCH 04/52] Added unit-tests for KAZE and AKAZE features --- modules/features2d/test/test_keypoints.cpp | 12 ++++++++++++ .../test/test_rotation_and_scale_invariance.cpp | 15 +++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index e15d4fa17f..f8163c1f36 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -166,3 +166,15 @@ TEST(Features2d_Detector_Keypoints_Dense, validation) CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.Dense")); test.safe_run(); } + +TEST(Features2d_Detector_Keypoints_KAZE, validation) +{ + CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.KAZE")); + test.safe_run(); +} + +TEST(Features2d_Detector_Keypoints_AKAZE, validation) +{ + CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.AKAZE")); + test.safe_run(); +} \ No newline at end of file diff --git a/modules/features2d/test/test_rotation_and_scale_invariance.cpp b/modules/features2d/test/test_rotation_and_scale_invariance.cpp index 2fe59ca7fc..07123bed13 100644 --- a/modules/features2d/test/test_rotation_and_scale_invariance.cpp +++ b/modules/features2d/test/test_rotation_and_scale_invariance.cpp @@ -652,6 +652,21 @@ TEST(Features2d_ScaleInvariance_Detector_BRISK, regression) test.safe_run(); } +TEST(Features2d_ScaleInvariance_Detector_KAZE, regression) +{ + DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.KAZE"), + 0.08f, + 0.49f); + test.safe_run(); +} + +TEST(Features2d_ScaleInvariance_Detector_AKAZE, regression) +{ + DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.AKAZE"), + 0.08f, + 0.49f); + test.safe_run(); +} //TEST(Features2d_ScaleInvariance_Detector_ORB, regression) //{ // DetectorScaleInvarianceTest test(Algorithm::create("Feature2D.ORB"), From 5848e751683ad1a352e51c27ee97f0ff8ff03bf1 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 5 Apr 2014 15:25:59 +0300 Subject: [PATCH 05/52] Clean-up from unused utils.h/utils/cpp --- modules/features2d/src/akaze/AKAZE.h | 1 - modules/features2d/src/akaze/utils.cpp | 196 ------------------------- modules/features2d/src/akaze/utils.h | 54 ------- modules/features2d/src/kaze/KAZE.h | 1 - modules/features2d/src/kaze/fed.cpp | 2 +- modules/features2d/src/kaze/utils.cpp | 92 ------------ modules/features2d/src/kaze/utils.h | 41 ------ 7 files changed, 1 insertion(+), 386 deletions(-) delete mode 100644 modules/features2d/src/akaze/utils.cpp delete mode 100644 modules/features2d/src/akaze/utils.h delete mode 100644 modules/features2d/src/kaze/utils.cpp delete mode 100644 modules/features2d/src/kaze/utils.h diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZE.h index 9785b0c42f..ad0364e7a2 100644 --- a/modules/features2d/src/akaze/AKAZE.h +++ b/modules/features2d/src/akaze/AKAZE.h @@ -15,7 +15,6 @@ // Includes #include "config.h" #include "fed.h" -#include "utils.h" #include "nldiffusion_functions.h" //************************************************************************************* diff --git a/modules/features2d/src/akaze/utils.cpp b/modules/features2d/src/akaze/utils.cpp deleted file mode 100644 index eb14abcd51..0000000000 --- a/modules/features2d/src/akaze/utils.cpp +++ /dev/null @@ -1,196 +0,0 @@ -//============================================================================= -// -// utils.cpp -// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2) -// Institutions: Georgia Institute of Technology (1) -// TrueVision Solutions (2) -// -// Date: 15/09/2013 -// Email: pablofdezalc@gmail.com -// -// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo -// All Rights Reserved -// See LICENSE for the license information -//============================================================================= - -/** - * @file utils.cpp - * @brief Some utilities functions - * @date Sep 15, 2013 - * @author Pablo F. Alcantarilla, Jesus Nuevo - */ - -#include "precomp.hpp" -#include "utils.h" - -// Namespaces -using namespace std; -using namespace cv; - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function computes the minimum value of a float image - * @param src Input image - * @param value Minimum value - */ -void compute_min_32F(const cv::Mat &src, float &value) { - - float aux = 1000.0; - - for (int i = 0; i < src.rows; i++) { - for (int j = 0; j < src.cols; j++) { - if (src.at(i,j) < aux) { - aux = src.at(i,j); - } - } - } - - value = aux; -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function computes the maximum value of a float image - * @param src Input image - * @param value Maximum value - */ -void compute_max_32F(const cv::Mat &src, float &value) { - - float aux = 0.0; - - for (int i = 0; i < src.rows; i++) { - for (int j = 0; j < src.cols; j++) { - if (src.at(i,j) > aux) { - aux = src.at(i,j); - } - } - } - - value = aux; -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function converts the scale of the input image prior to visualization - * @param src Input/Output image - * @param value Maximum value - */ -void convert_scale(cv::Mat &src) { - - float min_val = 0, max_val = 0; - - compute_min_32F(src,min_val); - - src = src - min_val; - - compute_max_32F(src,max_val); - src = src / max_val; -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function copies the input image and converts the scale of the copied - * image prior visualization - * @param src Input image - * @param dst Output image - */ -void copy_and_convert_scale(const cv::Mat &src, cv::Mat dst) { - - float min_val = 0, max_val = 0; - - src.copyTo(dst); - compute_min_32F(dst,min_val); - - dst = dst - min_val; - - compute_max_32F(dst,max_val); - dst = dst / max_val; -} - -//************************************************************************************* -//************************************************************************************* - -const size_t length = string("--descriptor_channels").size() + 2; -static inline std::ostream& cout_help() -{ cout << setw(length); return cout; } - -static inline std::string toUpper(std::string s) -{ - std::transform(s.begin(), s.end(), s.begin(), ::toupper); - return s; -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function shows the possible command line configuration options - */ -void show_input_options_help(int example) { - - fflush(stdout); - cout << "A-KAZE Features" << endl; - cout << "Usage: "; - if (example == 0) { - cout << "./akaze_features -i img.jpg [options]" << endl; - } - else if (example == 1) { - cout << "./akaze_match img1.jpg img2.pgm homography.txt [options]" << endl; - } - else if (example == 2) { - cout << "./akaze_compare img1.jpg img2.pgm homography.txt [options]" << endl; - } - - cout << endl; - cout_help() << "Options below are not mandatory. Unless specified, default arguments are used." << endl << endl; - // Justify on the left - cout << left; - // Generalities - cout_help() << "--help" << "Show the command line options" << endl; - cout_help() << "--verbose " << "Verbosity is required" << endl; - cout_help() << endl; - // Scale-space parameters - cout_help() << "--soffset" << "Base scale offset (sigma units)" << endl; - cout_help() << "--omax" << "Maximum octave of image evolution" << endl; - cout_help() << "--nsublevels" << "Number of sublevels per octave" << endl; - cout_help() << "--diffusivity" << "Diffusivity function. Possible values:" << endl; - cout_help() << " " << "0 -> Perona-Malik, g1 = exp(-|dL|^2/k^2)" << endl; - cout_help() << " " << "1 -> Perona-Malik, g2 = 1 / (1 + dL^2 / k^2)" << endl; - cout_help() << " " << "2 -> Weickert diffusivity" << endl; - cout_help() << " " << "3 -> Charbonnier diffusivity" << endl; - cout_help() << endl; - // Feature detection parameters. - cout_help() << "--dthreshold" << "Feature detector threshold response for keypoints" << endl; - cout_help() << " " << "(0.001 can be a good value)" << endl; - cout_help() << endl; - // Descriptor parameters. - cout_help() << "--descriptor" << "Descriptor Type. Possible values:" << endl; - cout_help() << " " << "0 -> SURF_UPRIGHT" << endl; - cout_help() << " " << "1 -> SURF" << endl; - cout_help() << " " << "2 -> M-SURF_UPRIGHT," << endl; - cout_help() << " " << "3 -> M-SURF" << endl; - cout_help() << " " << "4 -> M-LDB_UPRIGHT" << endl; - cout_help() << " " << "5 -> M-LDB" << endl; - - cout_help() << "--descriptor_channels " << "Descriptor Channels for M-LDB. Valid values: " << endl; - cout_help() << " " << "1 -> intensity" << endl; - cout_help() << " " << "2 -> intensity + gradient magnitude" << endl; - cout_help() << " " << "3 -> intensity + X and Y gradients" < show detection results." << endl; - cout_help() << " " << "0 -> don't show detection results" << endl; - cout_help() << endl; -} diff --git a/modules/features2d/src/akaze/utils.h b/modules/features2d/src/akaze/utils.h deleted file mode 100644 index 894c836ed0..0000000000 --- a/modules/features2d/src/akaze/utils.h +++ /dev/null @@ -1,54 +0,0 @@ - -#ifndef _UTILS_H_ -#define _UTILS_H_ - -//****************************************************************************** -//****************************************************************************** - -// OpenCV Includes -#include "precomp.hpp" - -// System Includes -#include -#include -#include -#include -#include -#include -#include - -//****************************************************************************** -//****************************************************************************** - -// Stringify common types such as int, double and others. -template -inline std::string to_string(const T& x) { - std::stringstream oss; - oss << x; - return oss.str(); -} - -//****************************************************************************** -//****************************************************************************** - -// Stringify and format integral types as follows: -// to_formatted_string( 1, 2) produces string: '01' -// to_formatted_string( 5, 2) produces string: '05' -// to_formatted_string( 19, 2) produces string: '19' -// to_formatted_string( 19, 3) produces string: '019' -template -inline std::string to_formatted_string(Integer x, int num_digits) { - std::stringstream oss; - oss << std::setfill('0') << std::setw(num_digits) << x; - return oss.str(); -} - -//****************************************************************************** -//****************************************************************************** - -void compute_min_32F(const cv::Mat& src, float& value); -void compute_max_32F(const cv::Mat& src, float& value); -void convert_scale(cv::Mat& src); -void copy_and_convert_scale(const cv::Mat& src, cv::Mat& dst); - -#endif diff --git a/modules/features2d/src/kaze/KAZE.h b/modules/features2d/src/kaze/KAZE.h index 1d7fb0beb1..3e86ab2d86 100644 --- a/modules/features2d/src/kaze/KAZE.h +++ b/modules/features2d/src/kaze/KAZE.h @@ -17,7 +17,6 @@ #include "config.h" #include "nldiffusion_functions.h" #include "fed.h" -#include "utils.h" //************************************************************************************* //************************************************************************************* diff --git a/modules/features2d/src/kaze/fed.cpp b/modules/features2d/src/kaze/fed.cpp index 0bd228673a..f07d072d61 100644 --- a/modules/features2d/src/kaze/fed.cpp +++ b/modules/features2d/src/kaze/fed.cpp @@ -28,7 +28,7 @@ * DAGM, 2010 * */ - +#include "precomp.hpp" #include "fed.h" using namespace std; diff --git a/modules/features2d/src/kaze/utils.cpp b/modules/features2d/src/kaze/utils.cpp deleted file mode 100644 index 7b55ac45f7..0000000000 --- a/modules/features2d/src/kaze/utils.cpp +++ /dev/null @@ -1,92 +0,0 @@ - -//============================================================================= -// -// utils.cpp -// Author: Pablo F. Alcantarilla -// Institution: University d'Auvergne -// Address: Clermont Ferrand, France -// Date: 29/12/2011 -// Email: pablofdezalc@gmail.com -// -// KAZE Features Copyright 2012, Pablo F. Alcantarilla -// All Rights Reserved -// See LICENSE for the license information -//============================================================================= - -/** - * @file utils.cpp - * @brief Some useful functions - * @date Dec 29, 2011 - * @author Pablo F. Alcantarilla - */ - -#include "utils.h" - -using namespace std; -using namespace cv; - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function copies the input image and converts the scale of the copied - * image prior visualization - * @param src Input image - * @param dst Output image - */ -void copy_and_convert_scale(const cv::Mat& src, cv::Mat& dst) { - - float min_val = 0, max_val = 0; - - src.copyTo(dst); - compute_min_32F(dst,min_val); - - dst = dst - min_val; - - compute_max_32F(dst,max_val); - dst = dst / max_val; -} - -//************************************************************************************* -//************************************************************************************* - -/* -void show_input_options_help(int example) { - - fflush(stdout); - - cout << endl; - cout << endl; - cout << "KAZE Features" << endl; - cout << "***********************************************************" << endl; - cout << "For running the program you need to type in the command line the following arguments: " << endl; - - if (example == 0) { - cout << "./kaze_features img.jpg [options]" << endl; - } - else if (example == 1) { - cout << "./kaze_match img1.jpg img2.pgm homography.txt [options]" << endl; - } - else if (example == 2) { - cout << "./kaze_compare img1.jpg img2.pgm homography.txt [options]" << endl; - } - - cout << endl; - cout << "The options are not mandatory. In case you do not specify additional options, default arguments will be used" << endl << endl; - cout << "Here is a description of the additional options: " << endl; - cout << "--verbose " << "\t\t if verbosity is required" << endl; - cout << "--help" << "\t\t for showing the command line options" << endl; - cout << "--soffset" << "\t\t the base scale offset (sigma units)" << endl; - cout << "--omax" << "\t\t maximum octave evolution of the image 2^sigma (coarsest scale)" << endl; - cout << "--nsublevels" << "\t\t number of sublevels per octave" << endl; - cout << "--dthreshold" << "\t\t Feature detector threshold response for accepting points (0.001 can be a good value)" << endl; - cout << "--descriptor" << "\t\t Descriptor Type 0 -> SURF, 1 -> M-SURF, 2 -> G-SURF" << endl; - cout << "--use_fed" "\t\t 1 -> Use FED, 0 -> Use AOS for the nonlinear diffusion filtering" << endl; - cout << "--upright" << "\t\t 0 -> Rotation Invariant, 1 -> No Rotation Invariant" << endl; - cout << "--extended" << "\t\t 0 -> Normal Descriptor (64), 1 -> Extended Descriptor (128)" << endl; - cout << "--output keypoints.txt" << "\t\t For saving the detected keypoints into a .txt file" << endl; - cout << "--save_scale_space" << "\t\t 1 in case we want to save the nonlinear scale space images. 0 otherwise" << endl; - cout << "--show_results" << "\t\t 1 in case we want to show detection results. 0 otherwise" << endl; - cout << endl; -} -*/ \ No newline at end of file diff --git a/modules/features2d/src/kaze/utils.h b/modules/features2d/src/kaze/utils.h deleted file mode 100644 index 848bfe3f53..0000000000 --- a/modules/features2d/src/kaze/utils.h +++ /dev/null @@ -1,41 +0,0 @@ - -/** - * @file utils.h - * @brief Some useful functions - * @date Dec 29, 2011 - * @author Pablo F. Alcantarilla - */ - -#ifndef UTILS_H_ -#define UTILS_H_ - -//****************************************************************************** -//****************************************************************************** - -// OPENCV Includes -#include "precomp.hpp" - -// System Includes -#include -#include -#include -#include -#include -#include -#include -#include -#include - -//************************************************************************************* -//************************************************************************************* - -// Declaration of Functions -void compute_min_32F(const cv::Mat& src, float& value); -void compute_max_32F(const cv::Mat& src, float& value); -void convert_scale(cv::Mat& src); -void copy_and_convert_scale(const cv::Mat &src, cv::Mat& dst); - -//************************************************************************************* -//************************************************************************************* - -#endif // UTILS_H_ From d27ed856f227cb0fa4945e3b4fe23eb181e10b61 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Wed, 23 Apr 2014 22:44:03 +0100 Subject: [PATCH 06/52] Replace config with AKAZEConfig.h --- modules/features2d/src/akaze/AKAZEConfig.h | 189 +++++++++++++++++++++ modules/features2d/src/akaze/config.h | 155 ----------------- 2 files changed, 189 insertions(+), 155 deletions(-) create mode 100644 modules/features2d/src/akaze/AKAZEConfig.h delete mode 100644 modules/features2d/src/akaze/config.h diff --git a/modules/features2d/src/akaze/AKAZEConfig.h b/modules/features2d/src/akaze/AKAZEConfig.h new file mode 100644 index 0000000000..444e07aac2 --- /dev/null +++ b/modules/features2d/src/akaze/AKAZEConfig.h @@ -0,0 +1,189 @@ +/** + * @file AKAZEConfig.h + * @brief AKAZE configuration file + * @date Feb 23, 2014 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ + +#pragma once + +/* ************************************************************************* */ +// OpenCV +#include +#include + +// OpenMP +#ifdef _OPENMP +# include +#endif + +// System Includes +#include +#include +#include +#include +#include + +/* ************************************************************************* */ +/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right +const float gauss25[7][7] = { + {0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f}, + {0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f}, + {0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f}, + {0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f}, + {0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f}, + {0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f}, + {0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f} +}; + +/* ************************************************************************* */ +/// AKAZE Descriptor Type +enum DESCRIPTOR_TYPE { + SURF_UPRIGHT = 0, ///< Upright descriptors, not invariant to rotation + SURF = 1, + MSURF_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + MSURF = 3, + MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + MLDB = 5 +}; + +/* ************************************************************************* */ +/// AKAZE Diffusivities +enum DIFFUSIVITY_TYPE { + PM_G1 = 0, + PM_G2 = 1, + WEICKERT = 2, + CHARBONNIER = 3 +}; + +/* ************************************************************************* */ +/// AKAZE Timing structure +struct AKAZETiming { + + AKAZETiming() { + kcontrast = 0.0; + scale = 0.0; + derivatives = 0.0; + detector = 0.0; + extrema = 0.0; + subpixel = 0.0; + descriptor = 0.0; + } + + double kcontrast; ///< Contrast factor computation time in ms + double scale; ///< Nonlinear scale space computation time in ms + double derivatives; ///< Multiscale derivatives computation time in ms + double detector; ///< Feature detector computation time in ms + double extrema; ///< Scale space extrema computation time in ms + double subpixel; ///< Subpixel refinement computation time in ms + double descriptor; ///< Descriptors computation time in ms +}; + +/* ************************************************************************* */ +/// AKAZE configuration options structure +struct AKAZEOptions { + + AKAZEOptions() { + soffset = 1.6f; + derivative_factor = 1.5f; + omax = 4; + nsublevels = 4; + dthreshold = 0.001f; + min_dthreshold = 0.00001f; + + diffusivity = PM_G2; + descriptor = MLDB; + descriptor_size = 0; + descriptor_channels = 3; + descriptor_pattern_size = 10; + sderivatives = 1.0; + + kcontrast = 0.001f; + kcontrast_percentile = 0.7f; + kcontrast_nbins = 300; + + save_scale_space = false; + save_keypoints = false; + verbosity = false; + } + + int omin; ///< Initial octave level (-1 means that the size of the input image is duplicated) + int omax; ///< Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) + int nsublevels; ///< Default number of sublevels per scale level + int img_width; ///< Width of the input image + int img_height; ///< Height of the input image + float soffset; ///< Base scale offset (sigma units) + float derivative_factor; ///< Factor for the multiscale derivatives + float sderivatives; ///< Smoothing factor for the derivatives + DIFFUSIVITY_TYPE diffusivity; ///< Diffusivity type + + float dthreshold; ///< Detector response threshold to accept point + float min_dthreshold; ///< Minimum detector threshold to accept a point + + DESCRIPTOR_TYPE descriptor; ///< Type of descriptor + int descriptor_size; ///< Size of the descriptor in bits. 0->Full size + int descriptor_channels; ///< Number of channels in the descriptor (1, 2, 3) + int descriptor_pattern_size; ///< Actual patch size is 2*pattern_size*point.scale + + float kcontrast; ///< The contrast factor parameter + float kcontrast_percentile; ///< Percentile level for the contrast factor + size_t kcontrast_nbins; ///< Number of bins for the contrast factor histogram + + bool save_scale_space; ///< Set to true for saving the scale space images + bool save_keypoints; ///< Set to true for saving the detected keypoints and descriptors + bool verbosity; ///< Set to true for displaying verbosity information + + friend std::ostream& operator<<(std::ostream& os, + const AKAZEOptions& akaze_options) { + + os << std::left; +#define CHECK_AKAZE_OPTION(option) \ + os << std::setw(33) << #option << " = " << option << std::endl + + // Scale-space parameters. + CHECK_AKAZE_OPTION(akaze_options.omax); + CHECK_AKAZE_OPTION(akaze_options.nsublevels); + CHECK_AKAZE_OPTION(akaze_options.soffset); + CHECK_AKAZE_OPTION(akaze_options.sderivatives); + CHECK_AKAZE_OPTION(akaze_options.diffusivity); + // Detection parameters. + CHECK_AKAZE_OPTION(akaze_options.dthreshold); + // Descriptor parameters. + CHECK_AKAZE_OPTION(akaze_options.descriptor); + CHECK_AKAZE_OPTION(akaze_options.descriptor_channels); + CHECK_AKAZE_OPTION(akaze_options.descriptor_size); + // Save scale-space + CHECK_AKAZE_OPTION(akaze_options.save_scale_space); + // Verbose option for debug. + CHECK_AKAZE_OPTION(akaze_options.verbosity); +#undef CHECK_AKAZE_OPTIONS + + return os; + } +}; + +/* ************************************************************************* */ +/// AKAZE nonlinear diffusion filtering evolution +struct TEvolution { + + TEvolution() { + etime = 0.0f; + esigma = 0.0f; + octave = 0; + sublevel = 0; + sigma_size = 0; + } + + cv::Mat Lx, Ly; // First order spatial derivatives + cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives + cv::Mat Lflow; // Diffusivity image + cv::Mat Lt; // Evolution image + cv::Mat Lsmooth; // Smoothed image + cv::Mat Lstep; // Evolution step update + cv::Mat Ldet; // Detector response + float etime; // Evolution time + float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 + size_t octave; // Image octave + size_t sublevel; // Image sublevel in each octave + size_t sigma_size; // Integer sigma. For computing the feature detector responses +}; \ No newline at end of file diff --git a/modules/features2d/src/akaze/config.h b/modules/features2d/src/akaze/config.h deleted file mode 100644 index bb704bb182..0000000000 --- a/modules/features2d/src/akaze/config.h +++ /dev/null @@ -1,155 +0,0 @@ -#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_HPP__ -#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_HPP__ - -// STL -#include -#include -#include -#include -#include - -// OpenCV -#include "precomp.hpp" - -// OpenMP -#ifdef _OPENMP -# include -#endif - -// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right -static const float gauss25[7][7] = { - {0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f}, - {0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f}, - {0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f}, - {0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f}, - {0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f}, - {0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f}, - {0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f} -}; - - -// Scale Space parameters -static const float DEFAULT_SCALE_OFFSET = 1.60f; // Base scale offset (sigma units) -static const float DEFAULT_FACTOR_SIZE = 1.5f; // Factor for the multiscale derivatives -static const int DEFAULT_OCTAVE_MIN = 0; // Initial octave level (-1 means that the size of the input image is duplicated) -static const int DEFAULT_OCTAVE_MAX = 4; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) -static const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level -static const int DEFAULT_DIFFUSIVITY_TYPE = 1; -static const float KCONTRAST_PERCENTILE = 0.7f; -static const int KCONTRAST_NBINS = 300; -static const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0f; -static const float DEFAULT_KCONTRAST = .01f; - - -// Detector Parameters -static const float DEFAULT_DETECTOR_THRESHOLD = 0.001f; // Detector response threshold to accept point -static const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001f; // Minimum Detector response threshold to accept point -static const int DEFAULT_LDB_DESCRIPTOR_SIZE = 0; // Use 0 for the full descriptor, or the number of bits -static const int DEFAULT_LDB_PATTERN_SIZE = 10; // Actual patch size is 2*pattern_size*point.scale; -static const int DEFAULT_LDB_CHANNELS = 3; - -// Descriptor Parameters -enum DESCRIPTOR_TYPE -{ - SURF_UPRIGHT = 0, // Upright descriptors, not invariant to rotation - SURF = 1, - MSURF_UPRIGHT = 2, // Upright descriptors, not invariant to rotation - MSURF = 3, - MLDB_UPRIGHT = 4, // Upright descriptors, not invariant to rotation - MLDB = 5 -}; - -static const int DEFAULT_DESCRIPTOR = MLDB; - -// Some debugging options -static const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images -static const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) -static const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios -static const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints - -// Options structure -struct AKAZEOptions -{ - int omin; - int omax; - int nsublevels; - int img_width; - int img_height; - int diffusivity; - float soffset; - float sderivatives; - float dthreshold; - float dthreshold2; - int descriptor; - int descriptor_size; - int descriptor_channels; - int descriptor_pattern_size; - bool save_scale_space; - bool save_keypoints; - bool verbosity; - - AKAZEOptions() - { - // Load the default options - soffset = DEFAULT_SCALE_OFFSET; - omax = DEFAULT_OCTAVE_MAX; - nsublevels = DEFAULT_NSUBLEVELS; - dthreshold = DEFAULT_DETECTOR_THRESHOLD; - diffusivity = DEFAULT_DIFFUSIVITY_TYPE; - descriptor = DEFAULT_DESCRIPTOR; - descriptor_size = DEFAULT_LDB_DESCRIPTOR_SIZE; - descriptor_channels = DEFAULT_LDB_CHANNELS; - descriptor_pattern_size = DEFAULT_LDB_PATTERN_SIZE; - sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; - save_scale_space = DEFAULT_SAVE_SCALE_SPACE; - save_keypoints = DEFAULT_SAVE_KEYPOINTS; - verbosity = DEFAULT_VERBOSITY; - } - - friend std::ostream& operator<<(std::ostream& os, - const AKAZEOptions& akaze_options) - { - os << std::left; -#define CHECK_AKAZE_OPTION(option) \ - os << std::setw(33) << #option << " = " << option << std::endl - - // Scale-space parameters. - CHECK_AKAZE_OPTION(akaze_options.omax); - CHECK_AKAZE_OPTION(akaze_options.nsublevels); - CHECK_AKAZE_OPTION(akaze_options.soffset); - CHECK_AKAZE_OPTION(akaze_options.sderivatives); - CHECK_AKAZE_OPTION(akaze_options.diffusivity); - // Detection parameters. - CHECK_AKAZE_OPTION(akaze_options.dthreshold); - // Descriptor parameters. - CHECK_AKAZE_OPTION(akaze_options.descriptor); - CHECK_AKAZE_OPTION(akaze_options.descriptor_channels); - CHECK_AKAZE_OPTION(akaze_options.descriptor_size); - // Save scale-space - CHECK_AKAZE_OPTION(akaze_options.save_scale_space); - // Verbose option for debug. - CHECK_AKAZE_OPTION(akaze_options.verbosity); -#undef CHECK_AKAZE_OPTIONS - - return os; - } -}; - -struct tevolution -{ - cv::Mat Lx, Ly; // First order spatial derivatives - cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives - cv::Mat Lflow; // Diffusivity image - cv::Mat Lt; // Evolution image - cv::Mat Lsmooth; // Smoothed image - cv::Mat Lstep; // Evolution step update - cv::Mat Ldet; // Detector response - float etime; // Evolution time - float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 - int octave; // Image octave - int sublevel; // Image sublevel in each octave - int sigma_size; // Integer sigma. For computing the feature detector responses -}; - - -#endif \ No newline at end of file From 6d500cc6f784f698c64734a7828b4fbc46599cea Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Wed, 23 Apr 2014 23:02:02 +0100 Subject: [PATCH 07/52] Merge KAZE and AKAZE features with most recent version --- modules/features2d/src/akaze/AKAZE.cpp | 586 ++++++++---------- modules/features2d/src/akaze/AKAZE.h | 181 ++---- modules/features2d/src/akaze/AKAZEConfig.h | 8 +- .../src/akaze/nldiffusion_functions.cpp | 419 ++++++------- .../src/akaze/nldiffusion_functions.h | 33 +- .../src/kaze/nldiffusion_functions.cpp | 6 +- .../src/kaze/nldiffusion_functions.h | 0 7 files changed, 535 insertions(+), 698 deletions(-) mode change 100755 => 100644 modules/features2d/src/kaze/nldiffusion_functions.h diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 379e0ba1aa..661a1cad8b 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -1,19 +1,5 @@ -//============================================================================= -// -// AKAZE.cpp -// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2) -// Institutions: Georgia Institute of Technology (1) -// TrueVision Solutions (2) -// Date: 15/09/2013 -// Email: pablofdezalc@gmail.com -// -// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo -// All Rights Reserved -// See LICENSE for the license information -//============================================================================= - /** - * @file AKAZE.cpp + * @file AKAZEFeatures.cpp * @brief Main class for detecting and describing binary features in an * accelerated nonlinear scale space * @date Sep 15, 2013 @@ -21,68 +7,41 @@ */ #include "AKAZE.h" +#include "fed.h" +#include "nldiffusion_functions.h" using namespace std; using namespace cv; -//******************************************************************************* -//******************************************************************************* - +/* ************************************************************************* */ /** - * @brief AKAZE constructor with input options - * @param options AKAZE configuration options + * @brief AKAZEFeatures constructor with input options + * @param options AKAZEFeatures configuration options * @note This constructor allocates memory for the nonlinear scale space */ -AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) { +AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { - soffset_ = options.soffset; - factor_size_ = DEFAULT_FACTOR_SIZE; - sderivatives_ = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; - omax_ = options.omax; - nsublevels_ = options.nsublevels; - dthreshold_ = options.dthreshold; - descriptor_ = options.descriptor; - diffusivity_ = options.diffusivity; - save_scale_space_ = options.save_scale_space; - verbosity_ = options.verbosity; - img_width_ = options.img_width; - img_height_ = options.img_height; - noctaves_ = omax_; ncycles_ = 0; reordering_ = true; - descriptor_size_ = options.descriptor_size; - descriptor_channels_ = options.descriptor_channels; - descriptor_pattern_size_ = options.descriptor_pattern_size; - tkcontrast_ = 0.0; - tscale_ = 0.0; - tderivatives_ = 0.0; - tdetector_ = 0.0; - textrema_ = 0.0; - tsubpixel_ = 0.0; - tdescriptor_ = 0.0; - if (descriptor_size_ > 0 && descriptor_ >= MLDB_UPRIGHT) { - generateDescriptorSubsample(descriptorSamples_,descriptorBits_,descriptor_size_, - descriptor_pattern_size_,descriptor_channels_); + if (options_.descriptor_size > 0 && options_.descriptor >= MLDB_UPRIGHT) { + generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, + options_.descriptor_pattern_size, options_.descriptor_channels); } Allocate_Memory_Evolution(); } -//******************************************************************************* -//******************************************************************************* - +/* ************************************************************************* */ /** - * @brief AKAZE destructor + * @brief AKAZEFeatures destructor */ AKAZEFeatures::~AKAZEFeatures(void) { evolution_.clear(); } -//******************************************************************************* -//******************************************************************************* - +/* ************************************************************************* */ /** * @brief This method allocates the memory for the nonlinear diffusion evolution */ @@ -92,132 +51,128 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { int level_height = 0, level_width = 0; // Allocate the dimension of the matrices for the evolution - for (int i = 0; i <= omax_-1 && i <= DEFAULT_OCTAVE_MAX; i++) { - rfactor = 1.0/pow(2.f,i); - level_height = (int)(img_height_*rfactor); - level_width = (int)(img_width_*rfactor); + for (int i = 0; i <= options_.omax-1; i++) { + rfactor = 1.0/pow(2.f, i); + level_height = (int)(options_.img_height*rfactor); + level_width = (int)(options_.img_width*rfactor); - // Smallest possible octave - if (level_width < 80 || level_height < 40) { - noctaves_ = i; - i = omax_; + // Smallest possible octave and allow one scale if the image is small + if ((level_width < 80 || level_height < 40) && i != 0) { + options_.omax = i; break; } - for (int j = 0; j < nsublevels_; j++) { - tevolution aux; - aux.Lx = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Ly = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Lxx = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Lxy = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Lyy = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Lt = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Ldet = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Lflow = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.Lstep = cv::Mat::zeros(level_height,level_width,CV_32F); - aux.esigma = soffset_*pow(2.f,(float)(j)/(float)(nsublevels_) + i); - aux.sigma_size = fRound(aux.esigma); - aux.etime = 0.5*(aux.esigma*aux.esigma); - aux.octave = i; - aux.sublevel = j; - evolution_.push_back(aux); + for (int j = 0; j < options_.nsublevels; j++) { + TEvolution step; + step.Lx = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Ly = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lxx = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lxy = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lyy = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lt = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Ldet = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lflow = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lstep = cv::Mat::zeros(level_height, level_width, CV_32F); + step.esigma = options_.soffset*pow(2.f, (float)(j)/(float)(options_.nsublevels) + i); + step.sigma_size = fRound(step.esigma); + step.etime = 0.5*(step.esigma*step.esigma); + step.octave = i; + step.sublevel = j; + evolution_.push_back(step); } } // Allocate memory for the number of cycles and time steps for (size_t i = 1; i < evolution_.size(); i++) { int naux = 0; - std::vector tau; + vector tau; float ttime = 0.0; ttime = evolution_[i].etime-evolution_[i-1].etime; - naux = fed_tau_by_process_time(ttime,1,0.25,reordering_,tau); + naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_,tau); nsteps_.push_back(naux); tsteps_.push_back(tau); ncycles_++; } } -//******************************************************************************* -//******************************************************************************* - +/* ************************************************************************* */ /** * @brief This method creates the nonlinear scale space for a given image * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully, -1 otherwise */ -int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { +int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { double t1 = 0.0, t2 = 0.0; if (evolution_.size() == 0) { - cout << "Error generating the nonlinear scale space!!" << endl; - cout << "Firstly you need to call AKAZE::Allocate_Memory_Evolution()" << endl; + cerr << "Error generating the nonlinear scale space!!" << endl; + cerr << "Firstly you need to call AKAZEFeatures::Allocate_Memory_Evolution()" << endl; return -1; } - t1 = getTickCount(); + t1 = cv::getTickCount(); // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); - gaussian_2D_convolution(evolution_[0].Lt,evolution_[0].Lt,0,0,soffset_); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); - // Firstly compute the kcontrast factor - kcontrast_ = compute_k_percentile(img,KCONTRAST_PERCENTILE,1.0,KCONTRAST_NBINS,0,0); + // First compute the kcontrast factor + options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, + 1.0, options_.kcontrast_nbins, 0, 0); - t2 = getTickCount(); - tkcontrast_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = cv::getTickCount(); + timing_.kcontrast = 1000.0*(t2-t1) / cv::getTickFrequency(); // Now generate the rest of evolution levels for (size_t i = 1; i < evolution_.size(); i++) { if (evolution_[i].octave > evolution_[i-1].octave) { - halfsample_image(evolution_[i-1].Lt,evolution_[i].Lt); - kcontrast_ = kcontrast_*0.75; + halfsample_image(evolution_[i-1].Lt, evolution_[i].Lt); + options_.kcontrast = options_.kcontrast*0.75; } else { evolution_[i-1].Lt.copyTo(evolution_[i].Lt); } - gaussian_2D_convolution(evolution_[i].Lt,evolution_[i].Lsmooth,0,0,1.0); + gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0); // Compute the Gaussian derivatives Lx and Ly - image_derivatives_scharr(evolution_[i].Lsmooth,evolution_[i].Lx,1,0); - image_derivatives_scharr(evolution_[i].Lsmooth,evolution_[i].Ly,0,1); + image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0); + image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1); // Compute the conductivity equation - switch (diffusivity_) { - case 0: - pm_g1(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + switch (options_.diffusivity) { + case PM_G1: + pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; - case 1: - pm_g2(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + case PM_G2: + pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; - case 2: - weickert_diffusivity(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + case WEICKERT: + weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; - case 3: - charbonnier_diffusivity(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + case CHARBONNIER: + charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; default: - std::cerr << "Diffusivity: " << diffusivity_ << " is not supported" << std::endl; + cerr << "Diffusivity: " << options_.diffusivity << " is not supported" << endl; } // Perform FED n inner steps for (int j = 0; j < nsteps_[i-1]; j++) { - nld_step_scalar(evolution_[i].Lt,evolution_[i].Lflow,evolution_[i].Lstep,tsteps_[i-1][j]); + nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i-1][j]); } } - t2 = getTickCount(); - tscale_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = cv::getTickCount(); + timing_.scale = 1000.0*(t2-t1) / cv::getTickFrequency(); return 0; } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of detected keypoints @@ -226,19 +181,18 @@ void AKAZEFeatures::Feature_Detection(std::vector& kpts) { double t1 = 0.0, t2 = 0.0; - t1 = getTickCount(); + t1 = cv::getTickCount(); + vector().swap(kpts); Compute_Determinant_Hessian_Response(); Find_Scale_Space_Extrema(kpts); Do_Subpixel_Refinement(kpts); - t2 = getTickCount(); - tdetector_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = cv::getTickCount(); + timing_.detector = 1000.0*(t2-t1) / cv::getTickFrequency(); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the multiscale derivatives for the nonlinear scale space */ @@ -246,20 +200,21 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { double t1 = 0.0, t2 = 0.0; - t1 = getTickCount(); + t1 = cv::getTickCount(); #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < evolution_.size(); i++) { - float ratio = pow(2.f,evolution_[i].octave); - int sigma_size_ = fRound(evolution_[i].esigma*factor_size_/ratio); + for (int i = 0; i < (int)(evolution_.size()); i++) { - compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Lx,1,0,sigma_size_); - compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Ly,0,1,sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxx,1,0,sigma_size_); - compute_scharr_derivatives(evolution_[i].Ly,evolution_[i].Lyy,0,1,sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxy,0,1,sigma_size_); + float ratio = pow(2.f,(float)evolution_[i].octave); + int sigma_size_ = fRound(evolution_[i].esigma*options_.derivative_factor/ratio); + + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_); evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); @@ -268,13 +223,11 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); } - t2 = getTickCount(); - tderivatives_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = cv::getTickCount(); + timing_.derivatives = 1000.0*(t2-t1) / cv::getTickFrequency(); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the feature detector response for the nonlinear scale space * @note We use the Hessian determinant as the feature detector response @@ -285,7 +238,7 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { Compute_Multiscale_Derivatives(); for (size_t i = 0; i < evolution_.size(); i++) { - if (verbosity_ == true) { + if (options_.verbosity == true) { cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; } @@ -300,9 +253,7 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method finds extrema in the nonlinear scale space * @param kpts Vector of detected keypoints @@ -316,17 +267,18 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0; bool is_extremum = false, is_repeated = false, is_out = false; cv::KeyPoint point; + vector kpts_aux; // Set maximum size - if (descriptor_ == SURF_UPRIGHT || descriptor_ == SURF || - descriptor_ == MLDB_UPRIGHT || descriptor_ == MLDB) { + if (options_.descriptor == SURF_UPRIGHT || options_.descriptor == SURF || + options_.descriptor == MLDB_UPRIGHT || options_.descriptor == MLDB) { smax = 10.0*sqrtf(2.0); } - else if (descriptor_ == MSURF_UPRIGHT || descriptor_ == MSURF) { + else if (options_.descriptor == MSURF_UPRIGHT || options_.descriptor == MSURF) { smax = 12.0*sqrtf(2.0); } - t1 = getTickCount(); + t1 = cv::getTickCount(); for (size_t i = 0; i < evolution_.size(); i++) { for (int ix = 1; ix < evolution_[i].Ldet.rows-1; ix++) { @@ -337,7 +289,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { value = *(evolution_[i].Ldet.ptr(ix)+jx); // Filter the points with the detector threshold - if (value > dthreshold_ && value >= DEFAULT_MIN_DETECTOR_THRESHOLD && + if (value > options_.dthreshold && value >= options_.min_dthreshold && value > *(evolution_[i].Ldet.ptr(ix)+jx-1) && value > *(evolution_[i].Ldet.ptr(ix)+jx+1) && value > *(evolution_[i].Ldet.ptr(ix-1)+jx-1) && @@ -346,10 +298,10 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { value > *(evolution_[i].Ldet.ptr(ix+1)+jx-1) && value > *(evolution_[i].Ldet.ptr(ix+1)+jx) && value > *(evolution_[i].Ldet.ptr(ix+1)+jx+1)) { - is_extremum = true; + is_extremum = true; point.response = fabs(value); - point.size = evolution_[i].esigma*factor_size_; + point.size = evolution_[i].esigma*options_.derivative_factor; point.octave = evolution_[i].octave; point.class_id = i; ratio = pow(2.f,point.octave); @@ -357,13 +309,14 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { point.pt.x = jx; point.pt.y = ix; - for (size_t ik = 0; ik < kpts.size(); ik++) { - if (point.class_id == kpts[ik].class_id-1 || - point.class_id == kpts[ik].class_id || - point.class_id == kpts[ik].class_id+1) { - dist = sqrt(pow(point.pt.x*ratio-kpts[ik].pt.x,2)+pow(point.pt.y*ratio-kpts[ik].pt.y,2)); + // Compare response with the same and lower scale + for (size_t ik = 0; ik < kpts_aux.size(); ik++) { + + if ((point.class_id-1) == kpts_aux[ik].class_id || + point.class_id == kpts_aux[ik].class_id) { + dist = sqrt(pow(point.pt.x*ratio-kpts_aux[ik].pt.x,2)+pow(point.pt.y*ratio-kpts_aux[ik].pt.y,2)); if (dist <= point.size) { - if (point.response > kpts[ik].response) { + if (point.response > kpts_aux[ik].response) { id_repeated = ik; is_repeated = true; } @@ -377,6 +330,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { // Check out of bounds if (is_extremum == true) { + // Check that the point is under the image limits for the descriptor computation left_x = fRound(point.pt.x-smax*sigma_size_)-1; right_x = fRound(point.pt.x+smax*sigma_size_) +1; @@ -392,13 +346,13 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { if (is_repeated == false) { point.pt.x *= ratio; point.pt.y *= ratio; - kpts.push_back(point); + kpts_aux.push_back(point); npoints++; } else { point.pt.x *= ratio; point.pt.y *= ratio; - kpts[id_repeated] = point; + kpts_aux[id_repeated] = point; } } // if is_out } //if is_extremum @@ -407,13 +361,34 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { } // for ix } // for i - t2 = getTickCount(); - textrema_ = 1000.0*(t2-t1) / getTickFrequency(); + // Now filter points with the upper scale level + for (size_t i = 0; i < kpts_aux.size(); i++) { + + is_repeated = false; + const cv::KeyPoint& point = kpts_aux[i]; + for (size_t j = i+1; j < kpts_aux.size(); j++) { + + // Compare response with the upper scale + if ((point.class_id+1) == kpts_aux[j].class_id) { + dist = sqrt(pow(point.pt.x-kpts_aux[j].pt.x,2)+pow(point.pt.y-kpts_aux[j].pt.y,2)); + if (dist <= point.size) { + if (point.response < kpts_aux[j].response) { + is_repeated = true; + break; + } + } + } + } + + if (is_repeated == false) + kpts.push_back(point); + } + + t2 = cv::getTickCount(); + timing_.extrema = 1000.0*(t2-t1) / cv::getTickFrequency(); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints @@ -424,11 +399,11 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { float Dx = 0.0, Dy = 0.0, ratio = 0.0; float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; int x = 0, y = 0; - Mat A = Mat::zeros(2,2,CV_32F); - Mat b = Mat::zeros(2,1,CV_32F); - Mat dst = Mat::zeros(2,1,CV_32F); + cv::Mat A = cv::Mat::zeros(2, 2, CV_32F); + cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); + cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); - t1 = getTickCount(); + t1 = cv::getTickCount(); for (size_t i = 0; i < kpts.size(); i++) { ratio = pow(2.f,kpts[i].octave); @@ -462,7 +437,7 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { *(b.ptr(0)) = -Dx; *(b.ptr(1)) = -Dy; - solve(A,b,dst,DECOMP_LU); + cv::solve(A, b, dst, DECOMP_LU); if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0) { kpts[i].pt.x = x + (*(dst.ptr(0))); @@ -481,21 +456,19 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { } } - t2 = getTickCount(); - tsubpixel_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = cv::getTickCount(); + timing_.subpixel = 1000.0*(t2-t1) / cv::getTickFrequency(); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method performs feature suppression based on 2D distance * @param kpts Vector of keypoints * @param mdist Maximum distance in pixels */ -void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, float mdist) { +void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, float mdist) const { - vector aux; + vector aux; vector to_delete; float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; bool found = false; @@ -537,9 +510,7 @@ void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts aux.clear(); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the set of descriptors through the nonlinear scale space * @param kpts Vector of detected keypoints @@ -549,32 +520,32 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat double t1 = 0.0, t2 = 0.0; - t1 = getTickCount(); + t1 = cv::getTickCount(); // Allocate memory for the matrix with the descriptors - if (descriptor_ < MLDB_UPRIGHT) { - desc = cv::Mat::zeros(kpts.size(),64,CV_32FC1); + if (options_.descriptor < MLDB_UPRIGHT) { + desc = cv::Mat::zeros(kpts.size(), 64, CV_32FC1); } else { // We use the full length binary descriptor -> 486 bits - if (descriptor_size_ == 0) { - int t = (6+36+120)*descriptor_channels_; - desc = cv::Mat::zeros(kpts.size(),ceil(t/8.),CV_8UC1); + if (options_.descriptor_size == 0) { + int t = (6+36+120)*options_.descriptor_channels; + desc = cv::Mat::zeros(kpts.size(), ceil(t/8.), CV_8UC1); } else { // We use the random bit selection length binary descriptor - desc = cv::Mat::zeros(kpts.size(),ceil(descriptor_size_/8.),CV_8UC1); + desc = cv::Mat::zeros(kpts.size(), ceil(options_.descriptor_size/8.), CV_8UC1); } } - switch (descriptor_) - { + switch (options_.descriptor) { + case SURF_UPRIGHT : // Upright descriptors, not invariant to rotation { #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < kpts.size(); i++) { + for (int i = 0; i < (int)(kpts.size()); i++) { Get_SURF_Descriptor_Upright_64(kpts[i],desc.ptr(i)); } } @@ -584,8 +555,8 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); + for (int i = 0; i < (int)(kpts.size()); i++) { + Compute_Main_Orientation(kpts[i]); Get_SURF_Descriptor_64(kpts[i],desc.ptr(i)); } } @@ -595,7 +566,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < kpts.size(); i++) { + for (int i = 0; i < (int)(kpts.size()); i++) { Get_MSURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); } } @@ -605,8 +576,8 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); + for (int i = 0; i < (int)(kpts.size()); i++) { + Compute_Main_Orientation(kpts[i]); Get_MSURF_Descriptor_64(kpts[i],desc.ptr(i)); } } @@ -616,11 +587,11 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < kpts.size(); i++) { - if (descriptor_size_ == 0) - Get_Upright_MLDB_Full_Descriptor(kpts[i],desc.ptr(i)); + for (int i = 0; i < (int)(kpts.size()); i++) { + if (options_.descriptor_size == 0) + Get_Upright_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); else - Get_Upright_MLDB_Descriptor_Subset(kpts[i],desc.ptr(i)); + Get_Upright_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); } } break; @@ -629,31 +600,29 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - if (descriptor_size_ == 0) - Get_MLDB_Full_Descriptor(kpts[i],desc.ptr(i)); + for (int i = 0; i < (int)(kpts.size()); i++) { + Compute_Main_Orientation(kpts[i]); + if (options_.descriptor_size == 0) + Get_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); else - Get_MLDB_Descriptor_Subset(kpts[i],desc.ptr(i)); + Get_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); } } break; } - t2 = getTickCount(); - tdescriptor_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = cv::getTickCount(); + timing_.descriptor = 1000.0*(t2-t1) / cv::getTickFrequency(); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the main orientation for a given keypoint * @param kpt Input keypoint * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void AKAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint& kpt) { +void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt) const { int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; @@ -718,9 +687,7 @@ void AKAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint& kpt) { } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the upright descriptor of the provided keypoint * @param kpt Input keypoint @@ -728,7 +695,7 @@ void AKAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint& kpt) { * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -808,8 +775,7 @@ void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, floa } } -//************************************************************************************* -//************************************************************************************* +/* ************************************************************************* */ /** * @brief This method computes the descriptor of the provided keypoint given the * main orientation @@ -819,7 +785,7 @@ void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, floa * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -906,9 +872,7 @@ void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the upright descriptor (not rotation invariant) of * the provided keypoint @@ -918,7 +882,7 @@ void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1029,9 +993,7 @@ void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, flo } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the descriptor of the provided keypoint given the * main orientation of the keypoint @@ -1041,7 +1003,7 @@ void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, flo * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) { +void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1156,16 +1118,14 @@ void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the rupright descriptor (not rotation invariant) of * the provided keypoint * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { +void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { float di = 0.0, dx = 0.0, dy = 0.0; float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; @@ -1175,9 +1135,9 @@ void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, un int dcount1 = 0, dcount2 = 0; // Matrices for the M-LDB descriptor - Mat values_1 = Mat::zeros(4,descriptor_channels_,CV_32FC1); - Mat values_2 = Mat::zeros(9,descriptor_channels_,CV_32FC1); - Mat values_3 = Mat::zeros(16,descriptor_channels_,CV_32FC1); + cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); // Get the information from the keypoint ratio = (float)(1<(i)) > *(values_2.ptr(j))) { @@ -1318,6 +1280,7 @@ void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, un for (int k = i; k < i + sample_step; k++) { for (int l = j; l < j + sample_step; l++) { + // Get the coordinates of the sample point sample_y = yf + l*scale; sample_x = xf + k*scale; @@ -1347,8 +1310,8 @@ void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, un } } - dcount2 = 0; //Do binary comparison third level + dcount2 = 0; for (int i = 0; i < 16; i++) { for (int j = i+1; j < 16; j++) { if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { @@ -1369,16 +1332,14 @@ void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, un } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the descriptor of the provided keypoint given the * main orientation of the keypoint * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) { +void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; @@ -1388,9 +1349,9 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c int dcount1 = 0, dcount2 = 0; // Matrices for the M-LDB descriptor - Mat values_1 = Mat::zeros(4,descriptor_channels_,CV_32FC1); - Mat values_2 = Mat::zeros(9,descriptor_channels_,CV_32FC1); - Mat values_3 = Mat::zeros(16,descriptor_channels_,CV_32FC1); + cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); // Get the information from the keypoint ratio = (float)(1<(dcount2)) = di; - if ( descriptor_channels_ > 1 ) { + if (options_.descriptor_channels > 1 ) { *(values_1.ptr(dcount2)+1) = dx; } - if ( descriptor_channels_ > 2 ) { + if (options_.descriptor_channels > 2 ) { *(values_1.ptr(dcount2)+2) = dy; } @@ -1469,7 +1431,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c } } - if (descriptor_channels_ > 1) { + if (options_.descriptor_channels > 1) { for (int i = 0; i < 4; i++) { for (int j = i+1; j < 4; j++) { if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { @@ -1481,7 +1443,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c } } - if (descriptor_channels_ > 2) { + if (options_.descriptor_channels > 2) { for (int i = 0; i < 4; i++) { for ( int j = i+1; j < 4; j++) { if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { @@ -1517,10 +1479,10 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c ry = *(evolution_[level].Ly.ptr(y1)+x1); di += ri; - if (descriptor_channels_ == 2) { + if (options_.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (descriptor_channels_ == 3) { + else if (options_.descriptor_channels == 3) { // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; rrx = -rx*si + ry*co; @@ -1537,11 +1499,11 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c dy /= nsamples; *(values_2.ptr(dcount2)) = di; - if (descriptor_channels_ > 1) { + if (options_.descriptor_channels > 1) { *(values_2.ptr(dcount2)+1) = dx; } - if (descriptor_channels_ > 2) { + if (options_.descriptor_channels > 2) { *(values_2.ptr(dcount2)+2) = dy; } @@ -1559,7 +1521,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c } } - if (descriptor_channels_ > 1) { + if (options_.descriptor_channels > 1) { for (int i = 0; i < 9; i++) { for (int j = i+1; j < 9; j++) { if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { @@ -1570,7 +1532,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c } } - if (descriptor_channels_ > 2) { + if (options_.descriptor_channels > 2) { for (int i = 0; i < 9; i++) { for (int j = i+1; j < 9; j++) { if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { @@ -1592,6 +1554,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c for (int k = i; k < i + sample_step; k++) { for (int l = j; l < j + sample_step; l++) { + // Get the coordinates of the sample point sample_y = yf + (l*scale*co + k*scale*si); sample_x = xf + (-l*scale*si + k*scale*co); @@ -1604,10 +1567,10 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c ry = *(evolution_[level].Ly.ptr(y1)+x1); di += ri; - if (descriptor_channels_ == 2) { + if (options_.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (descriptor_channels_ == 3) { + else if (options_.descriptor_channels == 3) { // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; rrx = -rx*si + ry*co; @@ -1624,13 +1587,11 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c dy /= nsamples; *(values_3.ptr(dcount2)) = di; - if (descriptor_channels_ > 1) { + if (options_.descriptor_channels > 1) *(values_3.ptr(dcount2)+1) = dx; - } - if (descriptor_channels_ > 2) { + if (options_.descriptor_channels > 2) *(values_3.ptr(dcount2)+2) = dy; - } dcount2++; } @@ -1646,7 +1607,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c } } - if (descriptor_channels_ > 1) { + if (options_.descriptor_channels > 1) { for (int i = 0; i < 16; i++) { for (int j = i+1; j < 16; j++) { if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { @@ -1657,8 +1618,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c } } - if (descriptor_channels_ > 2) - { + if (options_.descriptor_channels > 2) { for (int i = 0; i < 16; i++) { for (int j = i+1; j < 16; j++) { if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { @@ -1670,9 +1630,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the M-LDB descriptor of the provided keypoint given the * main orientation of the keypoint. The descriptor is computed based on a subset of @@ -1682,8 +1640,8 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c */ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { - float di, dx, dy; - float rx, ry; + float di = 0.f, dx = 0.f, dy = 0.f; + float rx = 0.f, ry = 0.f; float sample_x = 0.f, sample_y = 0.f; int x1 = 0, y1 = 0; @@ -1698,15 +1656,15 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned float si = sin(angle); // Allocate memory for the matrix of values - Mat values = cv::Mat_::zeros((4+9+16)*descriptor_channels_,1); + cv::Mat values = cv::Mat_::zeros((4+9+16)*options_.descriptor_channels, 1); // Sample everything, but only do the comparisons vector steps(3); - steps.at(0) = descriptor_pattern_size_; - steps.at(1) = ceil(2.f*descriptor_pattern_size_/3.f); - steps.at(2) = descriptor_pattern_size_/2; + steps.at(0) = options_.descriptor_pattern_size; + steps.at(1) = ceil(2.f*options_.descriptor_pattern_size/3.f); + steps.at(2) = options_.descriptor_pattern_size/2; - for (int i=0; i(i); int sample_step = steps.at(coords[0]); di=0.0f; @@ -1715,6 +1673,7 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned for (int k = coords[1]; k < coords[1] + sample_step; k++) { for (int l = coords[2]; l < coords[2] + sample_step; l++) { + // Get the coordinates of the sample point sample_y = yf + (l*scale*co + k*scale*si); sample_x = xf + (-l*scale*si + k*scale*co); @@ -1724,14 +1683,14 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned di += *(evolution_[level].Lt.ptr(y1)+x1); - if (descriptor_channels_ > 1) { + if (options_.descriptor_channels > 1) { rx = *(evolution_[level].Lx.ptr(y1)+x1); ry = *(evolution_[level].Ly.ptr(y1)+x1); - if (descriptor_channels_ == 2) { + if (options_.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (descriptor_channels_ == 3) { + else if (options_.descriptor_channels == 3) { // Get the x and y derivatives on the rotated axis dx += rx*co + ry*si; dy += -rx*si + ry*co; @@ -1740,14 +1699,14 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned } } - *(values.ptr(descriptor_channels_*i)) = di; + *(values.ptr(options_.descriptor_channels*i)) = di; - if (descriptor_channels_ == 2) { - *(values.ptr(descriptor_channels_*i+1)) = dx; + if (options_.descriptor_channels == 2) { + *(values.ptr(options_.descriptor_channels*i+1)) = dx; } - else if (descriptor_channels_ == 3) { - *(values.ptr(descriptor_channels_*i+1)) = dx; - *(values.ptr(descriptor_channels_*i+2)) = dy; + else if (options_.descriptor_channels == 3) { + *(values.ptr(options_.descriptor_channels*i+1)) = dx; + *(values.ptr(options_.descriptor_channels*i+2)) = dy; } } @@ -1762,9 +1721,7 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This method computes the upright (not rotation invariant) M-LDB descriptor * of the provided keypoint given the main orientation of the keypoint. @@ -1787,22 +1744,21 @@ void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, float xf = kpt.pt.x/ratio; // Allocate memory for the matrix of values - Mat values = cv::Mat_::zeros((4+9+16)*descriptor_channels_,1); + Mat values = cv::Mat_::zeros((4+9+16)*options_.descriptor_channels, 1); vector steps(3); - steps.at(0) = descriptor_pattern_size_; - steps.at(1) = ceil(2.f*descriptor_pattern_size_/3.f); - steps.at(2) = descriptor_pattern_size_/2; + steps.at(0) = options_.descriptor_pattern_size; + steps.at(1) = ceil(2.f*options_.descriptor_pattern_size/3.f); + steps.at(2) = options_.descriptor_pattern_size/2; for (int i=0; i < descriptorSamples_.rows; i++) { int *coords = descriptorSamples_.ptr(i); int sample_step = steps.at(coords[0]); - di=0.0f; - dx=0.0f; - dy=0.0f; + di=0.0f, dx=0.0f, dy=0.0f; for (int k = coords[1]; k < coords[1] + sample_step; k++) { for (int l = coords[2]; l < coords[2] + sample_step; l++) { + // Get the coordinates of the sample point sample_y = yf + l*scale; sample_x = xf + k*scale; @@ -1811,14 +1767,14 @@ void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, x1 = fRound(sample_x); di += *(evolution_[level].Lt.ptr(y1)+x1); - if (descriptor_channels_ > 1) { + if (options_.descriptor_channels > 1) { rx = *(evolution_[level].Lx.ptr(y1)+x1); ry = *(evolution_[level].Ly.ptr(y1)+x1); - if (descriptor_channels_ == 2) { + if (options_.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (descriptor_channels_ == 3) { + else if (options_.descriptor_channels == 3) { dx += rx; dy += ry; } @@ -1826,14 +1782,14 @@ void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, } } - *(values.ptr(descriptor_channels_*i)) = di; + *(values.ptr(options_.descriptor_channels*i)) = di; - if (descriptor_channels_ == 2) { - *(values.ptr(descriptor_channels_*i+1)) = dx; + if (options_.descriptor_channels == 2) { + *(values.ptr(options_.descriptor_channels*i+1)) = dx; } - else if (descriptor_channels_ == 3) { - *(values.ptr(descriptor_channels_*i+1)) = dx; - *(values.ptr(descriptor_channels_*i+2)) = dy; + else if (options_.descriptor_channels == 3) { + *(values.ptr(options_.descriptor_channels*i+1)) = dx; + *(values.ptr(options_.descriptor_channels*i+2)) = dy; } } @@ -1848,9 +1804,23 @@ void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, } } -//************************************************************************************* -//************************************************************************************* + +/* ************************************************************************* */ +/** + * @brief This method displays the computation times +*/ +void AKAZEFeatures::Show_Computation_Times() const { + cout << "(*) Time Scale Space: " << timing_.scale << endl; + cout << "(*) Time Detector: " << timing_.detector << endl; + cout << " - Time Derivatives: " << timing_.derivatives << endl; + cout << " - Time Extrema: " << timing_.extrema << endl; + cout << " - Time Subpixel: " << timing_.subpixel << endl; + cout << "(*) Time Descriptor: " << timing_.descriptor << endl; + cout << endl; +} + +/* ************************************************************************* */ /** * @brief This function computes a (quasi-random) list of bits to be taken * from the full descriptor. To speed the extraction, the function creates @@ -1967,9 +1937,7 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int comparisons = comps.rowRange(0,nbits).clone(); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes the angle from the vector given by (X Y). From 0 to 2*Pi */ @@ -1994,9 +1962,7 @@ inline float get_angle(float x, float y) { return 0; } -//************************************************************************************** -//************************************************************************************** - +/* ************************************************************************* */ /** * @brief This function computes the value of a 2D Gaussian function * @param x X Position @@ -2004,13 +1970,10 @@ inline float get_angle(float x, float y) { * @param sig Standard Deviation */ inline float gaussian(float x, float y, float sigma) { - return expf(-(x*x+y*y)/(2.0f*sigma*sigma)); } -//************************************************************************************** -//************************************************************************************** - +/* ************************************************************************* */ /** * @brief This function checks descriptor limits * @param x X Position @@ -2018,7 +1981,7 @@ inline float gaussian(float x, float y, float sigma) { * @param width Image width * @param height Image height */ -inline void check_descriptor_limits(int &x, int &y, const int width, const int height) { +inline void check_descriptor_limits(int &x, int &y, int width, int height) { if (x < 0) { x = 0; @@ -2037,16 +2000,13 @@ inline void check_descriptor_limits(int &x, int &y, const int width, const int h } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This funtion rounds float to nearest integer * @param flt Input float * @return dst Nearest integer */ -inline int fRound(float flt) -{ +inline int fRound(float flt) { return (int)(flt+0.5f); } diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZE.h index ad0364e7a2..c4929571f4 100644 --- a/modules/features2d/src/akaze/AKAZE.h +++ b/modules/features2d/src/akaze/AKAZE.h @@ -6,148 +6,84 @@ * @author Pablo F. Alcantarilla, Jesus Nuevo */ -#ifndef _AKAZE_H_ -#define _AKAZE_H_ - -//************************************************************************************* -//************************************************************************************* +#pragma once +/* ************************************************************************* */ // Includes -#include "config.h" -#include "fed.h" -#include "nldiffusion_functions.h" - -//************************************************************************************* -//************************************************************************************* +#include "precomp.hpp" +#include "AKAZEConfig.h" +/* ************************************************************************* */ // AKAZE Class Declaration class AKAZEFeatures { private: - // Parameters of the AKAZE class - int omax_; // Maximum octave level - int noctaves_; // Number of octaves - int nsublevels_; // Number of sublevels per octave level - int img_width_; // Width of the original image - int img_height_; // Height of the original image - float soffset_; // Base scale offset - float factor_size_; // Factor for the multiscale derivatives - float sderivatives_; // Standard deviation of the Gaussian for the nonlinear diff. derivatives - float kcontrast_; // The contrast parameter for the scalar nonlinear diffusion - float dthreshold_; // Feature detector threshold response - int diffusivity_; // Diffusivity type, 0->PM G1, 1->PM G2, 2-> Weickert, 3->Charbonnier - int descriptor_; // Descriptor mode: - // 0-> SURF_UPRIGHT, 1->SURF - // 2-> M-SURF_UPRIGHT, 3->M-SURF - // 4-> M-LDB_UPRIGHT, 5->M-LDB - int descriptor_size_; // Size of the descriptor in bits. Use 0 for the full descriptor - int descriptor_pattern_size_; // Size of the pattern. Actual size sampled is 2*pattern_size - int descriptor_channels_; // Number of channels to consider in the M-LDB descriptor - bool save_scale_space_; // For saving scale space images - bool verbosity_; // Verbosity level - std::vector evolution_; // Vector of nonlinear diffusion evolution + AKAZEOptions options_; ///< Configuration options for AKAZE + std::vector evolution_; ///< Vector of nonlinear diffusion evolution - // FED parameters - int ncycles_; // Number of cycles - bool reordering_; // Flag for reordering time steps - std::vector > tsteps_; // Vector of FED dynamic time steps - std::vector nsteps_; // Vector of number of steps per cycle + /// FED parameters + int ncycles_; ///< Number of cycles + bool reordering_; ///< Flag for reordering time steps + std::vector > tsteps_; ///< Vector of FED dynamic time steps + std::vector nsteps_; ///< Vector of number of steps per cycle - // Some matrices for the M-LDB descriptor computation - cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. - cv::Mat descriptorBits_; - cv::Mat bitMask_; + /// Matrices for the M-LDB descriptor computation + cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + cv::Mat descriptorBits_; + cv::Mat bitMask_; - // Computation times variables in ms - double tkcontrast_; // Kcontrast factor computation - double tscale_; // Nonlinear Scale space generation - double tderivatives_; // Multiscale derivatives - double tdetector_; // Feature detector - double textrema_; // Scale Space extrema - double tsubpixel_; // Subpixel refinement - double tdescriptor_; // Feature descriptors + /// Computation times variables in ms + AKAZETiming timing_; public: - // Constructor - AKAZEFeatures(const AKAZEOptions &options); + /// Constructor with input arguments + AKAZEFeatures(const AKAZEOptions& options); - // Destructor - ~AKAZEFeatures(void); + /// Destructor + ~AKAZEFeatures(); - // Setters - void Set_Octave_Max(const int& omax) { - omax_ = omax; - } - void Set_NSublevels(const int& nsublevels) { - nsublevels_ = nsublevels; - } - void Set_Save_Scale_Space_Flag(const bool& save_scale_space) { - save_scale_space_ = save_scale_space; - } - void Set_Image_Width(const int& img_width) { - img_width_ = img_width; - } - void Set_Image_Height(const int& img_height) { - img_height_ = img_height; - } + /// Scale Space methods + void Allocate_Memory_Evolution(); + int Create_Nonlinear_Scale_Space(const cv::Mat& img); + void Feature_Detection(std::vector& kpts); + void Compute_Determinant_Hessian_Response(void); + void Compute_Multiscale_Derivatives(void); + void Find_Scale_Space_Extrema(std::vector& kpts); + void Do_Subpixel_Refinement(std::vector& kpts); + void Feature_Suppression_Distance(std::vector& kpts, float mdist) const; - // Getters - int Get_Image_Width(void) { - return img_width_; - } - int Get_Image_Height(void) { - return img_height_; - } - double Get_Time_KContrast(void) { - return tkcontrast_; - } - double Get_Time_Scale_Space(void) { - return tscale_; - } - double Get_Time_Derivatives(void) { - return tderivatives_; - } - double Get_Time_Detector(void) { - return tdetector_; - } - double Get_Time_Descriptor(void) { - return tdescriptor_; - } + // Feature description methods + void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); + void Compute_Main_Orientation(cv::KeyPoint& kpt) const; - // Scale Space methods - void Allocate_Memory_Evolution(void); - int Create_Nonlinear_Scale_Space(const cv::Mat& img); - void Feature_Detection(std::vector& kpts); - void Compute_Determinant_Hessian_Response(void); - void Compute_Multiscale_Derivatives(void); - void Find_Scale_Space_Extrema(std::vector& kpts); - void Do_Subpixel_Refinement(std::vector& kpts); - void Feature_Suppression_Distance(std::vector& kpts, float mdist); + // SURF Pattern Descriptor + void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - // Feature description methods - void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); - void Compute_Main_Orientation_SURF(cv::KeyPoint& kpt); + // M-SURF Pattern Descriptor + void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - // SURF Pattern Descriptor - void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc); - void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc); + // M-LDB Pattern Descriptor + void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); + void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); - // M-SURF Pattern Descriptor - void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc); - void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc); + // Methods for saving some results and showing computation times + void Save_Scale_Space(); + void Save_Detector_Responses(); + void Show_Computation_Times() const; - // M-LDB Pattern Descriptor - void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc); - void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc); - void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc); - void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc); + /// Return the computation times + AKAZETiming Get_Computation_Times() const { + return timing_; + } }; -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ // Inline functions /** * @brief This function sets default parameters for the A-KAZE detector. @@ -157,13 +93,8 @@ void setDefaultAKAZEOptions(AKAZEOptions& options); // Inline functions void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, - int nbits, int pattern_size, int nchannels); + int nbits, int pattern_size, int nchannels); float get_angle(float x, float y); float gaussian(float x, float y, float sigma); -void check_descriptor_limits(int& x, int& y, const int width, const int height); +void check_descriptor_limits(int& x, int& y, int width, int height); int fRound(float flt); - -//************************************************************************************* -//************************************************************************************* - -#endif diff --git a/modules/features2d/src/akaze/AKAZEConfig.h b/modules/features2d/src/akaze/AKAZEConfig.h index 444e07aac2..d82b0f4271 100644 --- a/modules/features2d/src/akaze/AKAZEConfig.h +++ b/modules/features2d/src/akaze/AKAZEConfig.h @@ -9,13 +9,7 @@ /* ************************************************************************* */ // OpenCV -#include -#include - -// OpenMP -#ifdef _OPENMP -# include -#endif +#include "precomp.hpp" // System Includes #include diff --git a/modules/features2d/src/akaze/nldiffusion_functions.cpp b/modules/features2d/src/akaze/nldiffusion_functions.cpp index 0699e92ca0..5300223fc3 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/akaze/nldiffusion_functions.cpp @@ -24,9 +24,7 @@ using namespace std; using namespace cv; -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function smoothes an image with a Gaussian kernel * @param src Input image @@ -36,32 +34,30 @@ using namespace cv; * @param sigma Kernel standard deviation */ void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksize_x, - const size_t& ksize_y, const float& sigma) { + const size_t& ksize_y, const float& sigma) { - size_t ksize_x_ = 0, ksize_y_ = 0; + size_t ksize_x_ = 0, ksize_y_ = 0; - // Compute an appropriate kernel size according to the specified sigma - if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = ceil(2.0*(1.0 + (sigma-0.8)/(0.3))); - ksize_y_ = ksize_x_; - } + // Compute an appropriate kernel size according to the specified sigma + if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { + ksize_x_ = ceil(2.0*(1.0 + (sigma - 0.8) / (0.3))); + ksize_y_ = ksize_x_; + } - // The kernel size must be and odd number - if ((ksize_x_ % 2) == 0) { - ksize_x_ += 1; - } + // The kernel size must be and odd number + if ((ksize_x_ % 2) == 0) { + ksize_x_ += 1; + } - if ((ksize_y_ % 2) == 0) { - ksize_y_ += 1; - } + if ((ksize_y_ % 2) == 0) { + ksize_y_ += 1; + } - // Perform the Gaussian Smoothing with border replication - GaussianBlur(src,dst,Size(ksize_x_,ksize_y_),sigma,sigma,BORDER_REPLICATE); + // Perform the Gaussian Smoothing with border replication + GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes image derivatives with Scharr kernel * @param src Input image @@ -74,13 +70,11 @@ void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksi * Journal of Visual Communication and Image Representation 2002 */ void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, - const size_t& xorder, const size_t& yorder) { - Scharr(src,dst,CV_32F,xorder,yorder,1.0,0,BORDER_DEFAULT); + const size_t& xorder, const size_t& yorder) { + Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes the Perona and Malik conductivity coefficient g1 * g1 = exp(-|dL|^2/k^2) @@ -90,12 +84,10 @@ void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, * @param k Contrast factor parameter */ void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - exp(-(Lx.mul(Lx)+Ly.mul(Ly))/(k*k),dst); + exp(-(Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), dst); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes the Perona and Malik conductivity coefficient g2 * g2 = 1 / (1 + dL^2 / k^2) @@ -105,12 +97,10 @@ void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { * @param k Contrast factor parameter */ void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - dst = 1.0/(1.0+(Lx.mul(Lx)+Ly.mul(Ly))/(k*k)); + dst = 1.0 / (1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes Weickert conductivity coefficient gw * @param Lx First order image derivative in X-direction (horizontal) @@ -122,15 +112,13 @@ void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { * Proceedings of Algorithmy 2000 */ void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - Mat modg; - pow((Lx.mul(Lx) + Ly.mul(Ly))/(k*k),4,modg); - cv::exp(-3.315/modg, dst); - dst = 1.0 - dst; + Mat modg; + pow((Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), 4, modg); + cv::exp(-3.315 / modg, dst); + dst = 1.0 - dst; } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes Charbonnier conductivity coefficient gc * gc = 1 / sqrt(1 + dL^2 / k^2) @@ -143,14 +131,12 @@ void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, co * Proceedings of Algorithmy 2000 */ void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - Mat den; - cv::sqrt(1.0+(Lx.mul(Lx)+Ly.mul(Ly))/(k*k),den); - dst = 1.0/ den; + Mat den; + cv::sqrt(1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), den); + dst = 1.0 / den; } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes a good empirical value for the k contrast factor * given an input image, the percentile (0-1), the gradient scale and the number of @@ -163,90 +149,87 @@ void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel * @return k contrast factor */ -float compute_k_percentile(const cv::Mat& img, const float& perc, const float& gscale, - const size_t& nbins, const size_t& ksize_x, const size_t& ksize_y) { +float compute_k_percentile(const cv::Mat& img, float perc, float gscale, + size_t nbins, size_t ksize_x, size_t ksize_y) { - size_t nbin = 0, nelements = 0, nthreshold = 0, k = 0; - float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; - float npoints = 0.0; - float hmax = 0.0; + size_t nbin = 0, nelements = 0, nthreshold = 0, k = 0; + float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; + float npoints = 0.0; + float hmax = 0.0; - // Create the array for the histogram - float *hist = new float[nbins]; + // Create the array for the histogram + float *hist = new float[nbins]; - // Create the matrices - Mat gaussian = Mat::zeros(img.rows,img.cols,CV_32F); - Mat Lx = Mat::zeros(img.rows,img.cols,CV_32F); - Mat Ly = Mat::zeros(img.rows,img.cols,CV_32F); + // Create the matrices + cv::Mat gaussian = cv::Mat::zeros(img.rows, img.cols, CV_32F); + cv::Mat Lx = cv::Mat::zeros(img.rows, img.cols, CV_32F); + cv::Mat Ly = cv::Mat::zeros(img.rows, img.cols, CV_32F); - // Set the histogram to zero, just in case - for (size_t i = 0; i < nbins; i++) { - hist[i] = 0.0; - } + // Set the histogram to zero + for (size_t i = 0; i < nbins; i++) + hist[i] = 0.0; - // Perform the Gaussian convolution - gaussian_2D_convolution(img,gaussian,ksize_x,ksize_y,gscale); + // Perform the Gaussian convolution + gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); - // Compute the Gaussian derivatives Lx and Ly - image_derivatives_scharr(gaussian,Lx,1,0); - image_derivatives_scharr(gaussian,Ly,0,1); + // Compute the Gaussian derivatives Lx and Ly + image_derivatives_scharr(gaussian, Lx, 1, 0); + image_derivatives_scharr(gaussian, Ly, 0, 1); - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows-1; i++) { - for (int j = 1; j < gaussian.cols-1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + for (int j = 1; j < gaussian.cols - 1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); - // Get the maximum - if (modg > hmax) { - hmax = modg; - } - } - } - - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows-1; i++) { - for (int j = 1; j < gaussian.cols-1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); - - // Find the correspondent bin - if (modg != 0.0) { - nbin = floor(nbins*(modg/hmax)); - - if (nbin == nbins) { - nbin--; + // Get the maximum + if (modg > hmax) { + hmax = modg; + } } - - hist[nbin]++; - npoints++; - } } - } - // Now find the perc of the histogram percentile - nthreshold = (size_t)(npoints*perc); + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + for (int j = 1; j < gaussian.cols - 1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); - for (k = 0; nelements < nthreshold && k < nbins; k++) { - nelements = nelements + hist[k]; - } + // Find the correspondent bin + if (modg != 0.0) { + nbin = floor(nbins*(modg / hmax)); - if (nelements < nthreshold) { - kperc = 0.03; - } - else { - kperc = hmax*((float)(k)/(float)nbins); - } + if (nbin == nbins) { + nbin--; + } - delete [] hist; - return kperc; + hist[nbin]++; + npoints++; + } + } + } + + // Now find the perc of the histogram percentile + nthreshold = (size_t)(npoints*perc); + + for (k = 0; nelements < nthreshold && k < nbins; k++) { + nelements = nelements + hist[k]; + } + + if (nelements < nthreshold) { + kperc = 0.03; + } + else { + kperc = hmax*((float)(k) / (float)nbins); + } + + delete[] hist; + return kperc; } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function computes Scharr image derivatives * @param src Input image @@ -256,16 +239,14 @@ float compute_k_percentile(const cv::Mat& img, const float& perc, const float& g * @param scale Scale factor for the derivative size */ void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, const size_t& xorder, - const size_t& yorder, const size_t& scale) { + const size_t& yorder, const size_t& scale) { - Mat kx, ky; - compute_derivative_kernels(kx, ky, xorder,yorder,scale); - sepFilter2D(src,dst,CV_32F,kx,ky); + Mat kx, ky; + compute_derivative_kernels(kx, ky, xorder, yorder, scale); + sepFilter2D(src, dst, CV_32F, kx, ky); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function performs a scalar non-linear diffusion step * @param Ld2 Output image in the evolution @@ -281,64 +262,50 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) #endif - for (int i = 1; i < Lstep.rows-1; i++) { - for (int j = 1; j < Lstep.cols-1; j++) { - float xpos = ((*(c.ptr(i)+j))+(*(c.ptr(i)+j+1)))*((*(Ld.ptr(i)+j+1))-(*(Ld.ptr(i)+j))); - float xneg = ((*(c.ptr(i)+j-1))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i)+j-1))); - - float ypos = ((*(c.ptr(i)+j))+(*(c.ptr(i+1)+j)))*((*(Ld.ptr(i+1)+j))-(*(Ld.ptr(i)+j))); - float yneg = ((*(c.ptr(i-1)+j))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i-1)+j))); - - *(Lstep.ptr(i)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + for (int i = 1; i < Lstep.rows - 1; i++) { + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(i)+j)) + (*(c.ptr(i)+j + 1)))*((*(Ld.ptr(i)+j + 1)) - (*(Ld.ptr(i)+j))); + float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); + float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); + float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); + *(Lstep.ptr(i)+j) = 0.5*stepsize*(xpos - xneg + ypos - yneg); + } } - } - for (int j = 1; j < Lstep.cols-1; j++) { - float xpos = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j+1)))*((*(Ld.ptr(0)+j+1))-(*(Ld.ptr(0)+j))); - float xneg = ((*(c.ptr(0)+j-1))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j-1))); + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); + float xneg = ((*(c.ptr(0) + j - 1)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j - 1))); + float ypos = ((*(c.ptr(0) + j)) + (*(c.ptr(1) + j)))*((*(Ld.ptr(1) + j)) - (*(Ld.ptr(0) + j))); + *(Lstep.ptr(0) + j) = 0.5*stepsize*(xpos - xneg + ypos); + } - float ypos = ((*(c.ptr(0)+j))+(*(c.ptr(1)+j)))*((*(Ld.ptr(1)+j))-(*(Ld.ptr(0)+j))); - float yneg = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j))); + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j + 1)))*((*(Ld.ptr(Lstep.rows - 1) + j + 1)) - (*(Ld.ptr(Lstep.rows - 1) + j))); + float xneg = ((*(c.ptr(Lstep.rows - 1) + j - 1)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j - 1))); + float ypos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j))); + float yneg = ((*(c.ptr(Lstep.rows - 2) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 2) + j))); + *(Lstep.ptr(Lstep.rows - 1) + j) = 0.5*stepsize*(xpos - xneg + ypos - yneg); + } - *(Lstep.ptr(0)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); - } + for (int i = 1; i < Lstep.rows - 1; i++) { + float xpos = ((*(c.ptr(i))) + (*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1)) - (*(Ld.ptr(i)))); + float xneg = ((*(c.ptr(i))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i)))); + float ypos = ((*(c.ptr(i))) + (*(c.ptr(i + 1))))*((*(Ld.ptr(i + 1))) - (*(Ld.ptr(i)))); + float yneg = ((*(c.ptr(i - 1))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i - 1)))); + *(Lstep.ptr(i)) = 0.5*stepsize*(xpos - xneg + ypos - yneg); + } - for (int j = 1; j < Lstep.cols-1; j++) { - float xpos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j+1)))*((*(Ld.ptr(Lstep.rows-1)+j+1))-(*(Ld.ptr(Lstep.rows-1)+j))); - float xneg = ((*(c.ptr(Lstep.rows-1)+j-1))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j-1))); + for (int i = 1; i < Lstep.rows - 1; i++) { + float xneg = ((*(c.ptr(i)+Lstep.cols - 2)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 2))); + float ypos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i + 1) + Lstep.cols - 1)))*((*(Ld.ptr(i + 1) + Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); + float yneg = ((*(c.ptr(i - 1) + Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i - 1) + Lstep.cols - 1))); + *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5*stepsize*(-xneg + ypos - yneg); + } - float ypos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j))); - float yneg = ((*(c.ptr(Lstep.rows-2)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-2)+j))); - - *(Lstep.ptr(Lstep.rows-1)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); - } - - for (int i = 1; i < Lstep.rows-1; i++) { - float xpos = ((*(c.ptr(i)))+(*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1))-(*(Ld.ptr(i)))); - float xneg = ((*(c.ptr(i)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i)))); - - float ypos = ((*(c.ptr(i)))+(*(c.ptr(i+1))))*((*(Ld.ptr(i+1)))-(*(Ld.ptr(i)))); - float yneg = ((*(c.ptr(i-1)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i-1)))); - - *(Lstep.ptr(i)) = 0.5*stepsize*(xpos-xneg + ypos-yneg); - } - - for (int i = 1; i < Lstep.rows-1; i++) { - float xpos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); - float xneg = ((*(c.ptr(i)+Lstep.cols-2))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-2))); - - float ypos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i+1)+Lstep.cols-1)))*((*(Ld.ptr(i+1)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); - float yneg = ((*(c.ptr(i-1)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i-1)+Lstep.cols-1))); - - *(Lstep.ptr(i)+Lstep.cols-1) = 0.5*stepsize*(xpos-xneg + ypos-yneg); - } - - Ld = Ld + Lstep; + Ld = Ld + Lstep; } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function downsamples the input image with the kernel [1/4,1/2,1/4] * @param img Input image to be downsampled @@ -346,22 +313,20 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& */ void downsample_image(const cv::Mat& src, cv::Mat& dst) { - int i1 = 0, j1 = 0, i2 = 0, j2 = 0; + int i1 = 0, j1 = 0, i2 = 0, j2 = 0; - for (i1 = 1; i1 < src.rows; i1+=2) { - j2 = 0; - for (j1 = 1; j1 < src.cols; j1+=2) { - *(dst.ptr(i2)+j2) = 0.5*(*(src.ptr(i1)+j1))+0.25*(*(src.ptr(i1)+j1-1) + *(src.ptr(i1)+j1+1)); - j2++; + for (i1 = 1; i1 < src.rows; i1 += 2) { + j2 = 0; + for (j1 = 1; j1 < src.cols; j1 += 2) { + *(dst.ptr(i2)+j2) = 0.5*(*(src.ptr(i1)+j1)) + 0.25*(*(src.ptr(i1)+j1 - 1) + *(src.ptr(i1)+j1 + 1)); + j2++; + } + + i2++; } - - i2++; - } } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief This function downsamples the input image using OpenCV resize * @param img Input image to be downsampled @@ -369,15 +334,13 @@ void downsample_image(const cv::Mat& src, cv::Mat& dst) { */ void halfsample_image(const cv::Mat& src, cv::Mat& dst) { - // Make sure the destination image is of the right size - CV_Assert(src.cols/2==dst.cols); - CV_Assert(src.rows / 2 == dst.rows); - resize(src,dst,dst.size(),0,0,cv::INTER_AREA); + // Make sure the destination image is of the right size + CV_Assert(src.cols / 2 == dst.cols); + CV_Assert(src.rows / 2 == dst.rows); + resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); } -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ /** * @brief Compute Scharr derivative kernels for sizes different than 3 * @param kx_ The derivative kernel in x-direction @@ -387,45 +350,45 @@ void halfsample_image(const cv::Mat& src, cv::Mat& dst) { * @param scale The kernel size */ void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, - const size_t& dx, const size_t& dy, const size_t& scale) { + const size_t& dx, const size_t& dy, const size_t& scale) { - const int ksize = 3 + 2*(scale-1); + const int ksize = 3 + 2 * (scale - 1); - // The usual Scharr kernel - if (scale == 1) { - getDerivKernels(kx_,ky_,dx,dy,0,true,CV_32F); - return; - } - - kx_.create(ksize,1,CV_32F,-1,true); - ky_.create(ksize,1,CV_32F,-1,true); - Mat kx = kx_.getMat(); - Mat ky = ky_.getMat(); - - float w = 10.0/3.0; - float norm = 1.0/(2.0*scale*(w+2.0)); - - for (int k = 0; k < 2; k++) { - Mat* kernel = k == 0 ? &kx : &ky; - int order = k == 0 ? dx : dy; - float kerI[1000]; - - for (int t = 0; trows, kernel->cols, CV_32F, &kerI[0]); - temp.copyTo(*kernel); - } + float w = 10.0 / 3.0; + float norm = 1.0 / (2.0*scale*(w + 2.0)); + + for (int k = 0; k < 2; k++) { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + float kerI[1000]; + + for (int t = 0; t < ksize; t++) { + kerI[t] = 0; + } + + if (order == 0) { + kerI[0] = norm; + kerI[ksize / 2] = w*norm; + kerI[ksize - 1] = norm; + } + else if (order == 1) { + kerI[0] = -1; + kerI[ksize / 2] = 0; + kerI[ksize - 1] = 1; + } + + Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); + temp.copyTo(*kernel); + } } diff --git a/modules/features2d/src/akaze/nldiffusion_functions.h b/modules/features2d/src/akaze/nldiffusion_functions.h index 172fa25f3e..ba578758b0 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.h +++ b/modules/features2d/src/akaze/nldiffusion_functions.h @@ -1,20 +1,17 @@ -#ifndef _NLDIFFUSION_FUNCTIONS_H_ -#define _NLDIFFUSION_FUNCTIONS_H_ +/** + * @file nldiffusion_functions.h + * @brief Functions for nonlinear diffusion filtering applications + * @date Sep 15, 2013 + * @author Pablo F. Alcantarilla, Jesus Nuevo + */ -//****************************************************************************** -//****************************************************************************** +#pragma once +/* ************************************************************************* */ // Includes #include "precomp.hpp" -// OpenMP Includes -#ifdef _OPENMP -# include -#endif - -//************************************************************************************* -//************************************************************************************* - +/* ************************************************************************* */ // Declaration of functions void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksize_x, const size_t& ksize_y, const float& sigma); @@ -24,8 +21,8 @@ void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); -float compute_k_percentile(const cv::Mat& img, const float& perc, const float& gscale, - const size_t& nbins, const size_t& ksize_x, const size_t& ksize_y); +float compute_k_percentile(const cv::Mat& img, float perc, float gscale, + size_t nbins, size_t ksize_x, size_t ksize_y); void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, const size_t& xorder, const size_t& yorder, const size_t& scale); void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize); @@ -33,9 +30,5 @@ void downsample_image(const cv::Mat& src, cv::Mat& dst); void halfsample_image(const cv::Mat& src, cv::Mat& dst); void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, const size_t& dx, const size_t& dy, const size_t& scale); - -//************************************************************************************* -//************************************************************************************* - - -#endif +bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, + int row, int col, bool same_img); diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index 41a7749058..d76a3c40f7 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -262,11 +262,7 @@ void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, for (int k = 0; k < 2; k++) { Mat* kernel = k == 0 ? &kx : &ky; int order = k == 0 ? dx : dy; - std::vector kerI(ksize); - - for (int t=0; t kerI(ksize, 0.0f); if (order == 0) { kerI[0] = norm, kerI[ksize/2] = w*norm, kerI[ksize-1] = norm; diff --git a/modules/features2d/src/kaze/nldiffusion_functions.h b/modules/features2d/src/kaze/nldiffusion_functions.h old mode 100755 new mode 100644 From 36db97068c5c4eeb944dace83d6bab5cd9f51320 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 24 Apr 2014 22:00:37 +0100 Subject: [PATCH 08/52] Added missing operator() --- modules/features2d/include/opencv2/features2d.hpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index e45c17771f..b6f9e44490 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -906,10 +906,12 @@ public: AlgorithmInfo* info() const; - void operator()(InputArray image, InputArray mask, - std::vector& keypoints, - OutputArray descriptors, - bool useProvidedKeypoints) const; + // Compute the KAZE features on an image + void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; + + // Compute the KAZE features and descriptors on an image + void operator()(InputArray image, InputArray mask, std::vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints = false) const; protected: void detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const; @@ -938,7 +940,7 @@ public: // Compute the AKAZE features on an image void operator()(InputArray image, InputArray mask, std::vector& keypoints) const; - // Compute the BRISK features and descriptors on an image + // Compute the AKAZE features and descriptors on an image void operator()(InputArray image, InputArray mask, std::vector& keypoints, OutputArray descriptors, bool useProvidedKeypoints = false) const; From 33b9e4b4a385739a8ad16d32c98ebd8abaf58df6 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 24 Apr 2014 22:00:51 +0100 Subject: [PATCH 09/52] Added missing operator() --- modules/features2d/src/kaze.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 1944f1e4e0..3bba8795a4 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -31,6 +31,10 @@ namespace cv return NORM_L2; } + void KAZE::operator()(InputArray image, InputArray mask, std::vector& keypoints) const + { + detectImpl(image, keypoints, mask); + } void KAZE::operator()(InputArray image, InputArray mask, std::vector& keypoints, From 3e24822fff47bb044c271f7f121ea1c70bc5dc0b Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 24 Apr 2014 22:01:29 +0100 Subject: [PATCH 10/52] Bugfix: clear input vector of key points for detection stage --- modules/features2d/src/kaze/KAZE.cpp | 3820 +++++++++++++------------- 1 file changed, 1911 insertions(+), 1909 deletions(-) diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZE.cpp index f43d267e0d..aa4b6cb1f1 100644 --- a/modules/features2d/src/kaze/KAZE.cpp +++ b/modules/features2d/src/kaze/KAZE.cpp @@ -34,35 +34,35 @@ using namespace cv; * @brief KAZE constructor with input options * @param options KAZE configuration options * @note The constructor allocates memory for the nonlinear scale space -*/ + */ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { - soffset_ = options.soffset; - sderivatives_ = options.sderivatives; - omax_ = options.omax; - nsublevels_ = options.nsublevels; - save_scale_space_ = options.save_scale_space; - verbosity_ = options.verbosity; - img_width_ = options.img_width; - img_height_ = options.img_height; - dthreshold_ = options.dthreshold; - diffusivity_ = options.diffusivity; - descriptor_mode_ = options.descriptor; - use_fed_ = options.use_fed; - use_upright_ = options.upright; - use_extended_ = options.extended; - kcontrast_ = DEFAULT_KCONTRAST; - ncycles_ = 0; - reordering_ = true; - tkcontrast_ = 0.0; - tnlscale_ = 0.0; - tdetector_ = 0.0; - tmderivatives_ = 0.0; - tdresponse_ = 0.0; - tdescriptor_ = 0.0; + soffset_ = options.soffset; + sderivatives_ = options.sderivatives; + omax_ = options.omax; + nsublevels_ = options.nsublevels; + save_scale_space_ = options.save_scale_space; + verbosity_ = options.verbosity; + img_width_ = options.img_width; + img_height_ = options.img_height; + dthreshold_ = options.dthreshold; + diffusivity_ = options.diffusivity; + descriptor_mode_ = options.descriptor; + use_fed_ = options.use_fed; + use_upright_ = options.upright; + use_extended_ = options.extended; + kcontrast_ = DEFAULT_KCONTRAST; + ncycles_ = 0; + reordering_ = true; + tkcontrast_ = 0.0; + tnlscale_ = 0.0; + tdetector_ = 0.0; + tmderivatives_ = 0.0; + tdresponse_ = 0.0; + tdescriptor_ = 0.0; - // Now allocate memory for the evolution - Allocate_Memory_Evolution(); + // Now allocate memory for the evolution + Allocate_Memory_Evolution(); } //******************************************************************************* @@ -70,10 +70,10 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { /** * @brief KAZE destructor -*/ + */ KAZEFeatures::~KAZEFeatures(void) { - evolution_.clear(); + evolution_.clear(); } //******************************************************************************* @@ -81,59 +81,59 @@ KAZEFeatures::~KAZEFeatures(void) { /** * @brief This method allocates the memory for the nonlinear diffusion evolution -*/ + */ void KAZEFeatures::Allocate_Memory_Evolution(void) { - // Allocate the dimension of the matrices for the evolution - for (int i = 0; i <= omax_-1; i++) { - for (int j = 0; j <= nsublevels_-1; j++) { + // Allocate the dimension of the matrices for the evolution + for (int i = 0; i <= omax_ - 1; i++) { + for (int j = 0; j <= nsublevels_ - 1; j++) { - TEvolution aux; - aux.Lx = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Ly = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Lxx = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Lxy = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Lyy = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Lflow = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Lt = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Lsmooth = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Lstep = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.Ldet = cv::Mat::zeros(img_height_,img_width_,CV_32F); - aux.esigma = soffset_*pow((float)2.0,(float)(j)/(float)(nsublevels_) + i); - aux.etime = 0.5*(aux.esigma*aux.esigma); - aux.sigma_size = fRound(aux.esigma); - aux.octave = i; - aux.sublevel = j; - evolution_.push_back(aux); + TEvolution aux; + aux.Lx = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Ly = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Lxx = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Lxy = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Lyy = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Lflow = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Lt = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Lsmooth = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Lstep = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.Ldet = cv::Mat::zeros(img_height_, img_width_, CV_32F); + aux.esigma = soffset_*pow((float)2.0, (float)(j) / (float)(nsublevels_)+i); + aux.etime = 0.5*(aux.esigma*aux.esigma); + aux.sigma_size = fRound(aux.esigma); + aux.octave = i; + aux.sublevel = j; + evolution_.push_back(aux); + } } - } - // Allocate memory for the FED number of cycles and time steps - if (use_fed_) { - for (size_t i = 1; i < evolution_.size(); i++) { - int naux = 0; - vector tau; - float ttime = 0.0; - ttime = evolution_[i].etime-evolution_[i-1].etime; - naux = fed_tau_by_process_time(ttime,1,0.25,reordering_,tau); - nsteps_.push_back(naux); - tsteps_.push_back(tau); - ncycles_++; + // Allocate memory for the FED number of cycles and time steps + if (use_fed_) { + for (size_t i = 1; i < evolution_.size(); i++) { + int naux = 0; + vector tau; + float ttime = 0.0; + ttime = evolution_[i].etime - evolution_[i - 1].etime; + naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_, tau); + nsteps_.push_back(naux); + tsteps_.push_back(tau); + ncycles_++; + } + } + else { + // Allocate memory for the auxiliary variables that are used in the AOS scheme + Ltx_ = Mat::zeros(img_width_, img_height_, CV_32F); + Lty_ = Mat::zeros(img_height_, img_width_, CV_32F); + px_ = Mat::zeros(img_height_, img_width_, CV_32F); + py_ = Mat::zeros(img_height_, img_width_, CV_32F); + ax_ = Mat::zeros(img_height_, img_width_, CV_32F); + ay_ = Mat::zeros(img_height_, img_width_, CV_32F); + bx_ = Mat::zeros(img_height_ - 1, img_width_, CV_32F); + by_ = Mat::zeros(img_height_ - 1, img_width_, CV_32F); + qr_ = Mat::zeros(img_height_ - 1, img_width_, CV_32F); + qc_ = Mat::zeros(img_height_, img_width_ - 1, CV_32F); } - } - else { - // Allocate memory for the auxiliary variables that are used in the AOS scheme - Ltx_ = Mat::zeros(img_width_,img_height_,CV_32F); - Lty_ = Mat::zeros(img_height_,img_width_,CV_32F); - px_ = Mat::zeros(img_height_,img_width_,CV_32F); - py_ = Mat::zeros(img_height_,img_width_,CV_32F); - ax_ = Mat::zeros(img_height_,img_width_,CV_32F); - ay_ = Mat::zeros(img_height_,img_width_,CV_32F); - bx_ = Mat::zeros(img_height_-1,img_width_,CV_32F); - by_ = Mat::zeros(img_height_-1,img_width_,CV_32F); - qr_ = Mat::zeros(img_height_-1,img_width_,CV_32F); - qc_ = Mat::zeros(img_height_,img_width_-1,CV_32F); - } } @@ -144,78 +144,78 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { * @brief This method creates the nonlinear scale space for a given image * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully. -1 otherwise -*/ + */ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { - double t2 = 0.0, t1 = 0.0; + double t2 = 0.0, t1 = 0.0; - if (evolution_.size() == 0) { - cout << "Error generating the nonlinear scale space!!" << endl; - cout << "Firstly you need to call KAZE::Allocate_Memory_Evolution()" << endl; - return -1; - } - - t1 = getTickCount(); - - // Copy the original image to the first level of the evolution - img.copyTo(evolution_[0].Lt); - gaussian_2D_convolution(evolution_[0].Lt,evolution_[0].Lt,0,0,soffset_); - gaussian_2D_convolution(evolution_[0].Lt,evolution_[0].Lsmooth,0,0,sderivatives_); - - // Firstly compute the kcontrast factor - Compute_KContrast(evolution_[0].Lt,KCONTRAST_PERCENTILE); - - t2 = getTickCount(); - tkcontrast_ = 1000.0*(t2-t1) / getTickFrequency(); - - if (verbosity_ == true) { - cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << - " Sigma: " << evolution_[0].esigma << endl; - } - - // Now generate the rest of evolution levels - for ( size_t i = 1; i < evolution_.size(); i++) { - - evolution_[i-1].Lt.copyTo(evolution_[i].Lt); - gaussian_2D_convolution(evolution_[i-1].Lt,evolution_[i].Lsmooth,0,0,sderivatives_); - - // Compute the Gaussian derivatives Lx and Ly - Scharr(evolution_[i].Lsmooth,evolution_[i].Lx,CV_32F,1,0,1,0,BORDER_DEFAULT); - Scharr(evolution_[i].Lsmooth,evolution_[i].Ly,CV_32F,0,1,1,0,BORDER_DEFAULT); - - // Compute the conductivity equation - if (diffusivity_ == 0) { - pm_g1(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); - } - else if (diffusivity_ == 1) { - pm_g2(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); - } - else if (diffusivity_ == 2) { - weickert_diffusivity(evolution_[i].Lx,evolution_[i].Ly,evolution_[i].Lflow,kcontrast_); + if (evolution_.size() == 0) { + cout << "Error generating the nonlinear scale space!!" << endl; + cout << "Firstly you need to call KAZE::Allocate_Memory_Evolution()" << endl; + return -1; } - // Perform FED n inner steps - if (use_fed_) { - for (int j = 0; j < nsteps_[i-1]; j++) { - nld_step_scalar(evolution_[i].Lt,evolution_[i].Lflow,evolution_[i].Lstep,tsteps_[i-1][j]); - } - } - else { - // Perform the evolution step with AOS - AOS_Step_Scalar(evolution_[i].Lt,evolution_[i-1].Lt,evolution_[i].Lflow, - evolution_[i].etime-evolution_[i-1].etime); - } + t1 = getTickCount(); + + // Copy the original image to the first level of the evolution + img.copyTo(evolution_[0].Lt); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, soffset_); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, sderivatives_); + + // Firstly compute the kcontrast factor + Compute_KContrast(evolution_[0].Lt, KCONTRAST_PERCENTILE); + + t2 = getTickCount(); + tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency(); if (verbosity_ == true) { - cout << "Computed image evolution step " << i << " Evolution time: " << evolution_[i].etime << - " Sigma: " << evolution_[i].esigma << endl; + cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << + " Sigma: " << evolution_[0].esigma << endl; } - } - t2 = getTickCount(); - tnlscale_ = 1000.0*(t2-t1) / getTickFrequency(); + // Now generate the rest of evolution levels + for (size_t i = 1; i < evolution_.size(); i++) { - return 0; + evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); + gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, sderivatives_); + + // Compute the Gaussian derivatives Lx and Ly + Scharr(evolution_[i].Lsmooth, evolution_[i].Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT); + Scharr(evolution_[i].Lsmooth, evolution_[i].Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT); + + // Compute the conductivity equation + if (diffusivity_ == 0) { + pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, kcontrast_); + } + else if (diffusivity_ == 1) { + pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, kcontrast_); + } + else if (diffusivity_ == 2) { + weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, kcontrast_); + } + + // Perform FED n inner steps + if (use_fed_) { + for (int j = 0; j < nsteps_[i - 1]; j++) { + nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i - 1][j]); + } + } + else { + // Perform the evolution step with AOS + AOS_Step_Scalar(evolution_[i].Lt, evolution_[i - 1].Lt, evolution_[i].Lflow, + evolution_[i].etime - evolution_[i - 1].etime); + } + + if (verbosity_ == true) { + cout << "Computed image evolution step " << i << " Evolution time: " << evolution_[i].etime << + " Sigma: " << evolution_[i].esigma << endl; + } + } + + t2 = getTickCount(); + tnlscale_ = 1000.0*(t2 - t1) / getTickFrequency(); + + return 0; } //************************************************************************************* @@ -225,21 +225,21 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { * @brief This method computes the k contrast factor * @param img Input image * @param kpercentile Percentile of the gradient histogram -*/ + */ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { - if (verbosity_ == true) { - cout << "Computing Kcontrast factor." << endl; - } + if (verbosity_ == true) { + cout << "Computing Kcontrast factor." << endl; + } - if (COMPUTE_KCONTRAST == true) { - kcontrast_ = compute_k_percentile(img,kpercentile,sderivatives_,KCONTRAST_NBINS,0,0); - } + if (COMPUTE_KCONTRAST == true) { + kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0); + } - if (verbosity_ == true) { - cout << "kcontrast = " << kcontrast_ << endl; - cout << endl << "Now computing the nonlinear scale space!!" << endl; - } + if (verbosity_ == true) { + cout << "kcontrast = " << kcontrast_ << endl; + cout << endl << "Now computing the nonlinear scale space!!" << endl; + } } //************************************************************************************* @@ -247,38 +247,38 @@ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentil /** * @brief This method computes the multiscale derivatives for the nonlinear scale space -*/ + */ void KAZEFeatures::Compute_Multiscale_Derivatives(void) { - double t2 = 0.0, t1 = 0.0; - t1 = getTickCount(); + double t2 = 0.0, t1 = 0.0; + t1 = getTickCount(); #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 0; i < evolution_.size(); i++) { + for (size_t i = 0; i < evolution_.size(); i++) { - if (verbosity_ == true) { - cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime - << " Step (pixels): " << evolution_[i].sigma_size << endl; + if (verbosity_ == true) { + cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime + << " Step (pixels): " << evolution_[i].sigma_size << endl; + } + + // Compute multiscale derivatives for the detector + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, evolution_[i].sigma_size); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, evolution_[i].sigma_size); + + evolution_[i].Lx = evolution_[i].Lx*((evolution_[i].sigma_size)); + evolution_[i].Ly = evolution_[i].Ly*((evolution_[i].sigma_size)); + evolution_[i].Lxx = evolution_[i].Lxx*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); + evolution_[i].Lxy = evolution_[i].Lxy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); + evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); } - // Compute multiscale derivatives for the detector - compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Lx,1,0,evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Lsmooth,evolution_[i].Ly,0,1,evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxx,1,0,evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Ly,evolution_[i].Lyy,0,1,evolution_[i].sigma_size); - compute_scharr_derivatives(evolution_[i].Lx,evolution_[i].Lxy,0,1,evolution_[i].sigma_size); - - evolution_[i].Lx = evolution_[i].Lx*((evolution_[i].sigma_size)); - evolution_[i].Ly = evolution_[i].Ly*((evolution_[i].sigma_size)); - evolution_[i].Lxx = evolution_[i].Lxx*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); - evolution_[i].Lxy = evolution_[i].Lxy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); - evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); - } - - t2 = getTickCount(); - tmderivatives_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = getTickCount(); + tmderivatives_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -287,36 +287,36 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void) /** * @brief This method computes the feature detector response for the nonlinear scale space * @note We use the Hessian determinant as feature detector -*/ + */ void KAZEFeatures::Compute_Detector_Response(void) { - double t2 = 0.0, t1 = 0.0; - float lxx = 0.0, lxy = 0.0, lyy = 0.0; + double t2 = 0.0, t1 = 0.0; + float lxx = 0.0, lxy = 0.0, lyy = 0.0; - t1 = getTickCount(); + t1 = getTickCount(); - // Firstly compute the multiscale derivatives - Compute_Multiscale_Derivatives(); + // Firstly compute the multiscale derivatives + Compute_Multiscale_Derivatives(); - for (size_t i = 0; i < evolution_.size(); i++) { + for (size_t i = 0; i < evolution_.size(); i++) { - // Determinant of the Hessian - if (verbosity_ == true) { - cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; + // Determinant of the Hessian + if (verbosity_ == true) { + cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; + } + + for (int ix = 0; ix < img_height_; ix++) { + for (int jx = 0; jx < img_width_; jx++) { + lxx = *(evolution_[i].Lxx.ptr(ix)+jx); + lxy = *(evolution_[i].Lxy.ptr(ix)+jx); + lyy = *(evolution_[i].Lyy.ptr(ix)+jx); + *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy - lxy*lxy); + } + } } - for (int ix = 0; ix < img_height_; ix++) { - for (int jx = 0; jx < img_width_; jx++) { - lxx = *(evolution_[i].Lxx.ptr(ix)+jx); - lxy = *(evolution_[i].Lxy.ptr(ix)+jx); - lyy = *(evolution_[i].Lyy.ptr(ix)+jx); - *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy-lxy*lxy); - } - } - } - - t2 = getTickCount(); - tdresponse_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = getTickCount(); + tdresponse_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -325,23 +325,25 @@ void KAZEFeatures::Compute_Detector_Response(void) { /** * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of keypoints -*/ + */ void KAZEFeatures::Feature_Detection(std::vector& kpts) { - double t2 = 0.0, t1 = 0.0; - t1 = getTickCount(); + double t2 = 0.0, t1 = 0.0; + t1 = getTickCount(); - // Firstly compute the detector response for each pixel and scale level - Compute_Detector_Response(); + kpts.clear(); - // Find scale space extrema - Determinant_Hessian_Parallel(kpts); + // Firstly compute the detector response for each pixel and scale level + Compute_Detector_Response(); - // Perform some subpixel refinement - Do_Subpixel_Refinement(kpts); + // Find scale space extrema + Determinant_Hessian_Parallel(kpts); - t2 = getTickCount(); - tdetector_ = 1000.0*(t2-t1) / getTickFrequency(); + // Perform some subpixel refinement + Do_Subpixel_Refinement(kpts); + + t2 = getTickCount(); + tdetector_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -352,88 +354,88 @@ void KAZEFeatures::Feature_Detection(std::vector& kpts) { * score of the Hessian determinant through the nonlinear scale space * @param kpts Vector of keypoints * @note We compute features for each of the nonlinear scale space level in a different processing thread -*/ + */ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) { - int level = 0; - float dist = 0.0, smax = 3.0; - int npoints = 0, id_repeated = 0; - int left_x = 0, right_x = 0, up_y = 0, down_y = 0; - bool is_extremum = false, is_repeated = false, is_out = false; + int level = 0; + float dist = 0.0, smax = 3.0; + int npoints = 0, id_repeated = 0; + int left_x = 0, right_x = 0, up_y = 0, down_y = 0; + bool is_extremum = false, is_repeated = false, is_out = false; - // Delete the memory of the vector of keypoints vectors - // In case we use the same kaze object for multiple images - for (size_t i = 0; i < kpts_par_.size(); i++) { - vector().swap(kpts_par_[i]); - } - kpts_par_.clear(); - vector aux; + // Delete the memory of the vector of keypoints vectors + // In case we use the same kaze object for multiple images + for (size_t i = 0; i < kpts_par_.size(); i++) { + vector().swap(kpts_par_[i]); + } + kpts_par_.clear(); + vector aux; - // Allocate memory for the vector of vectors - for (size_t i = 1; i < evolution_.size()-1; i++) { - kpts_par_.push_back(aux); - } + // Allocate memory for the vector of vectors + for (size_t i = 1; i < evolution_.size() - 1; i++) { + kpts_par_.push_back(aux); + } #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 1; i < evolution_.size()-1; i++) { - Find_Extremum_Threading(i); - } - - // Now fill the vector of keypoints!!! - for (size_t i = 0; i < kpts_par_.size(); i++) { - for (size_t j = 0; j < kpts_par_[i].size(); j++) { - level = i+1; - is_extremum = true; - is_repeated = false; - is_out = false; - - // Check in case we have the same point as maxima in previous evolution levels - for (size_t ik = 0; ik < kpts.size(); ik++) { - if (kpts[ik].class_id == level || kpts[ik].class_id == level+1 || kpts[ik].class_id == level-1) { - dist = pow(kpts_par_[i][j].pt.x-kpts[ik].pt.x,2)+pow(kpts_par_[i][j].pt.y-kpts[ik].pt.y,2); - - if (dist < evolution_[level].sigma_size*evolution_[level].sigma_size) { - if (kpts_par_[i][j].response > kpts[ik].response) { - id_repeated = ik; - is_repeated = true; - } - else { - is_extremum = false; - } - - break; - } - } - } - - if (is_extremum == true) { - // Check that the point is under the image limits for the descriptor computation - left_x = fRound(kpts_par_[i][j].pt.x-smax*kpts_par_[i][j].size); - right_x = fRound(kpts_par_[i][j].pt.x+smax*kpts_par_[i][j].size); - up_y = fRound(kpts_par_[i][j].pt.y-smax*kpts_par_[i][j].size); - down_y = fRound(kpts_par_[i][j].pt.y+smax*kpts_par_[i][j].size); - - if (left_x < 0 || right_x >= evolution_[level].Ldet.cols || - up_y < 0 || down_y >= evolution_[level].Ldet.rows) { - is_out = true; - } - - is_out = false; - - if (is_out == false) { - if (is_repeated == false) { - kpts.push_back(kpts_par_[i][j]); - npoints++; - } - else { - kpts[id_repeated] = kpts_par_[i][j]; - } - } - } + for (size_t i = 1; i < evolution_.size() - 1; i++) { + Find_Extremum_Threading(i); + } + + // Now fill the vector of keypoints!!! + for (size_t i = 0; i < kpts_par_.size(); i++) { + for (size_t j = 0; j < kpts_par_[i].size(); j++) { + level = i + 1; + is_extremum = true; + is_repeated = false; + is_out = false; + + // Check in case we have the same point as maxima in previous evolution levels + for (size_t ik = 0; ik < kpts.size(); ik++) { + if (kpts[ik].class_id == level || kpts[ik].class_id == level + 1 || kpts[ik].class_id == level - 1) { + dist = pow(kpts_par_[i][j].pt.x - kpts[ik].pt.x, 2) + pow(kpts_par_[i][j].pt.y - kpts[ik].pt.y, 2); + + if (dist < evolution_[level].sigma_size*evolution_[level].sigma_size) { + if (kpts_par_[i][j].response > kpts[ik].response) { + id_repeated = ik; + is_repeated = true; + } + else { + is_extremum = false; + } + + break; + } + } + } + + if (is_extremum == true) { + // Check that the point is under the image limits for the descriptor computation + left_x = fRound(kpts_par_[i][j].pt.x - smax*kpts_par_[i][j].size); + right_x = fRound(kpts_par_[i][j].pt.x + smax*kpts_par_[i][j].size); + up_y = fRound(kpts_par_[i][j].pt.y - smax*kpts_par_[i][j].size); + down_y = fRound(kpts_par_[i][j].pt.y + smax*kpts_par_[i][j].size); + + if (left_x < 0 || right_x >= evolution_[level].Ldet.cols || + up_y < 0 || down_y >= evolution_[level].Ldet.rows) { + is_out = true; + } + + is_out = false; + + if (is_out == false) { + if (is_repeated == false) { + kpts.push_back(kpts_par_[i][j]); + npoints++; + } + else { + kpts[id_repeated] = kpts_par_[i][j]; + } + } + } + } } - } } //************************************************************************************* @@ -443,51 +445,51 @@ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) * @brief This method is called by the thread which is responsible of finding extrema * at a given nonlinear scale level * @param level Index in the nonlinear scale space evolution -*/ + */ void KAZEFeatures::Find_Extremum_Threading(const int& level) { - float value = 0.0; - bool is_extremum = false; + float value = 0.0; + bool is_extremum = false; - for (int ix = 1; ix < img_height_-1; ix++) { - for (int jx = 1; jx < img_width_-1; jx++) { + for (int ix = 1; ix < img_height_ - 1; ix++) { + for (int jx = 1; jx < img_width_ - 1; jx++) { - is_extremum = false; - value = *(evolution_[level].Ldet.ptr(ix)+jx); + is_extremum = false; + value = *(evolution_[level].Ldet.ptr(ix)+jx); - // Filter the points with the detector threshold - if (value > dthreshold_ && value >= DEFAULT_MIN_DETECTOR_THRESHOLD) { - if (value >= *(evolution_[level].Ldet.ptr(ix)+jx-1)) { - // First check on the same scale - if (check_maximum_neighbourhood(evolution_[level].Ldet,1,value,ix,jx,1)) { - // Now check on the lower scale - if (check_maximum_neighbourhood(evolution_[level-1].Ldet,1,value,ix,jx,0)) { - // Now check on the upper scale - if (check_maximum_neighbourhood(evolution_[level+1].Ldet,1,value,ix,jx,0)) { - is_extremum = true; - } + // Filter the points with the detector threshold + if (value > dthreshold_ && value >= DEFAULT_MIN_DETECTOR_THRESHOLD) { + if (value >= *(evolution_[level].Ldet.ptr(ix)+jx - 1)) { + // First check on the same scale + if (check_maximum_neighbourhood(evolution_[level].Ldet, 1, value, ix, jx, 1)) { + // Now check on the lower scale + if (check_maximum_neighbourhood(evolution_[level - 1].Ldet, 1, value, ix, jx, 0)) { + // Now check on the upper scale + if (check_maximum_neighbourhood(evolution_[level + 1].Ldet, 1, value, ix, jx, 0)) { + is_extremum = true; + } + } + } + } + } + + // Add the point of interest!! + if (is_extremum == true) { + KeyPoint point; + point.pt.x = jx; + point.pt.y = ix; + point.response = fabs(value); + point.size = evolution_[level].esigma; + point.octave = evolution_[level].octave; + point.class_id = level; + + // We use the angle field for the sublevel value + // Then, we will replace this angle field with the main orientation + point.angle = evolution_[level].sublevel; + kpts_par_[level - 1].push_back(point); } - } } - } - - // Add the point of interest!! - if (is_extremum == true) { - KeyPoint point; - point.pt.x = jx; - point.pt.y = ix; - point.response = fabs(value); - point.size = evolution_[level].esigma; - point.octave = evolution_[level].octave; - point.class_id = level; - - // We use the angle field for the sublevel value - // Then, we will replace this angle field with the main orientation - point.angle = evolution_[level].sublevel; - kpts_par_[level-1].push_back(point); - } } - } } //************************************************************************************* @@ -496,103 +498,103 @@ void KAZEFeatures::Find_Extremum_Threading(const int& level) { /** * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints -*/ + */ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { - int step = 1; - int x = 0, y = 0; - float Dx = 0.0, Dy = 0.0, Ds = 0.0, dsc = 0.0; - float Dxx = 0.0, Dyy = 0.0, Dss = 0.0, Dxy = 0.0, Dxs = 0.0, Dys = 0.0; - Mat A = Mat::zeros(3,3,CV_32F); - Mat b = Mat::zeros(3,1,CV_32F); - Mat dst = Mat::zeros(3,1,CV_32F); - double t2 = 0.0, t1 = 0.0; + int step = 1; + int x = 0, y = 0; + float Dx = 0.0, Dy = 0.0, Ds = 0.0, dsc = 0.0; + float Dxx = 0.0, Dyy = 0.0, Dss = 0.0, Dxy = 0.0, Dxs = 0.0, Dys = 0.0; + Mat A = Mat::zeros(3, 3, CV_32F); + Mat b = Mat::zeros(3, 1, CV_32F); + Mat dst = Mat::zeros(3, 1, CV_32F); + double t2 = 0.0, t1 = 0.0; - t1 = cv::getTickCount(); - vector kpts_(kpts); + t1 = cv::getTickCount(); + vector kpts_(kpts); - for (size_t i = 0; i < kpts_.size(); i++) { + for (size_t i = 0; i < kpts_.size(); i++) { - x = kpts_[i].pt.x; - y = kpts_[i].pt.y; + x = kpts_[i].pt.x; + y = kpts_[i].pt.y; - // Compute the gradient - Dx = (1.0/(2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x+step) - -*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x-step)); - Dy = (1.0/(2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x) - -*(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x)); - Ds = 0.5*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x) - -*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x)); + // Compute the gradient + Dx = (1.0 / (2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x + step) + - *(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x - step)); + Dy = (1.0 / (2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x) + - *(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x)); + Ds = 0.5*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x) + - *(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x)); - // Compute the Hessian - Dxx = (1.0/(step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x+step) - + *(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x-step) - -2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); + // Compute the Hessian + Dxx = (1.0 / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x + step) + + *(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x - step) + - 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); - Dyy = (1.0/(step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x) - + *(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x) - -2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); + Dyy = (1.0 / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x) + + *(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x) + - 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); - Dss = *(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x) - + *(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x) - -2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x)); + Dss = *(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x) + + *(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x) + - 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x)); - Dxy = (1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x+step) - +(*(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x-step))) - -(1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y-step)+x+step) - +(*(evolution_[kpts_[i].class_id].Ldet.ptr(y+step)+x-step))); + Dxy = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x + step) + + (*(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x - step))) + - (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x + step) + + (*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x - step))); - Dxs = (1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x+step) - +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x-step))) - -(1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y)+x-step) - +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y)+x+step))); + Dxs = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x + step) + + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x - step))) + - (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x - step) + + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x + step))); - Dys = (1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y+step)+x) - +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y-step)+x))) - -(1.0/(4.0*step))*(*(evolution_[kpts_[i].class_id+1].Ldet.ptr(y-step)+x) - +(*(evolution_[kpts_[i].class_id-1].Ldet.ptr(y+step)+x))); + Dys = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y + step) + x) + + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y - step) + x))) + - (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y - step) + x) + + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y + step) + x))); - // Solve the linear system - *(A.ptr(0)) = Dxx; - *(A.ptr(1)+1) = Dyy; - *(A.ptr(2)+2) = Dss; + // Solve the linear system + *(A.ptr(0)) = Dxx; + *(A.ptr(1) + 1) = Dyy; + *(A.ptr(2) + 2) = Dss; - *(A.ptr(0)+1) = *(A.ptr(1)) = Dxy; - *(A.ptr(0)+2) = *(A.ptr(2)) = Dxs; - *(A.ptr(1)+2) = *(A.ptr(2)+1) = Dys; + *(A.ptr(0) + 1) = *(A.ptr(1)) = Dxy; + *(A.ptr(0) + 2) = *(A.ptr(2)) = Dxs; + *(A.ptr(1) + 2) = *(A.ptr(2) + 1) = Dys; - *(b.ptr(0)) = -Dx; - *(b.ptr(1)) = -Dy; - *(b.ptr(2)) = -Ds; + *(b.ptr(0)) = -Dx; + *(b.ptr(1)) = -Dy; + *(b.ptr(2)) = -Ds; - solve(A,b,dst,DECOMP_LU); + solve(A, b, dst, DECOMP_LU); - if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0 && fabs(*(dst.ptr(2))) <= 1.0) { - kpts_[i].pt.x += *(dst.ptr(0)); - kpts_[i].pt.y += *(dst.ptr(1)); - dsc = kpts_[i].octave + (kpts_[i].angle+*(dst.ptr(2)))/((float)(nsublevels_)); + if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0 && fabs(*(dst.ptr(2))) <= 1.0) { + kpts_[i].pt.x += *(dst.ptr(0)); + kpts_[i].pt.y += *(dst.ptr(1)); + dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr(2))) / ((float)(nsublevels_)); - // In OpenCV the size of a keypoint is the diameter!! - kpts_[i].size = 2.0*soffset_*pow((float)2.0,dsc); - kpts_[i].angle = 0.0; + // In OpenCV the size of a keypoint is the diameter!! + kpts_[i].size = 2.0*soffset_*pow((float)2.0, dsc); + kpts_[i].angle = 0.0; + } + // Set the points to be deleted after the for loop + else { + kpts_[i].response = -1; + } } - // Set the points to be deleted after the for loop - else { - kpts_[i].response = -1; + + // Clear the vector of keypoints + kpts.clear(); + + for (size_t i = 0; i < kpts_.size(); i++) { + if (kpts_[i].response != -1) { + kpts.push_back(kpts_[i]); + } } - } - // Clear the vector of keypoints - kpts.clear(); - - for (size_t i = 0; i < kpts_.size(); i++) { - if (kpts_[i].response != -1) { - kpts.push_back(kpts_[i]); - } - } - - t2 = getTickCount(); - tsubpixel_ = 1000.0*(t2-t1) / getTickFrequency(); + t2 = getTickCount(); + tsubpixel_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -602,53 +604,53 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { * @brief This method performs feature suppression based on 2D distance * @param kpts Vector of keypoints * @param mdist Maximum distance in pixels -*/ + */ void KAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, const float& mdist) { - vector aux; - vector to_delete; - float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; - bool found = false; + vector aux; + vector to_delete; + float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; + bool found = false; - for (size_t i = 0; i < kpts.size(); i++) { - x1 = kpts[i].pt.x; - y1 = kpts[i].pt.y; + for (size_t i = 0; i < kpts.size(); i++) { + x1 = kpts[i].pt.x; + y1 = kpts[i].pt.y; - for (size_t j = i+1; j < kpts.size(); j++) { - x2 = kpts[j].pt.x; - y2 = kpts[j].pt.y; - dist = sqrt(pow(x1-x2,2)+pow(y1-y2,2)); + for (size_t j = i + 1; j < kpts.size(); j++) { + x2 = kpts[j].pt.x; + y2 = kpts[j].pt.y; + dist = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)); - if (dist < mdist) { - if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { - to_delete.push_back(j); + if (dist < mdist) { + if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { + to_delete.push_back(j); + } + else { + to_delete.push_back(i); + break; + } + } } - else { - to_delete.push_back(i); - break; + } + + for (size_t i = 0; i < kpts.size(); i++) { + found = false; + + for (size_t j = 0; j < to_delete.size(); j++) { + if (i == (size_t)(to_delete[j])) { + found = true; + break; + } } - } - } - } - for (size_t i = 0; i < kpts.size(); i++) { - found = false; - - for (size_t j = 0; j < to_delete.size(); j++) { - if(i == (size_t)(to_delete[j])) { - found = true; - break; - } + if (found == false) { + aux.push_back(kpts[i]); + } } - if (found == false) { - aux.push_back(kpts[i]); - } - } - - kpts.clear(); - kpts = aux; - aux.clear(); + kpts.clear(); + kpts = aux; + aux.clear(); } //************************************************************************************* @@ -658,144 +660,144 @@ void KAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, * @brief This method computes the set of descriptors through the nonlinear scale space * @param kpts Vector of keypoints * @param desc Matrix with the feature descriptors -*/ + */ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) { - double t2 = 0.0, t1 = 0.0; - t1 = getTickCount(); + double t2 = 0.0, t1 = 0.0; + t1 = getTickCount(); - // Allocate memory for the matrix of descriptors - if (use_extended_ == true) { - desc = Mat::zeros(kpts.size(),128,CV_32FC1); - } - else { - desc = Mat::zeros(kpts.size(),64,CV_32FC1); - } - - if (use_upright_ == true) { - if (use_extended_ == false) { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_SURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_MSURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_GSURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); - } - } - } - else - { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++ ) { - kpts[i].angle = 0.0; - Get_SURF_Upright_Descriptor_128(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_MSURF_Upright_Descriptor_128(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_GSURF_Upright_Descriptor_128(kpts[i],desc.ptr(i)); - } - } - } - } - else { - if (use_extended_ == false) { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_SURF_Descriptor_64(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_MSURF_Descriptor_64(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_GSURF_Descriptor_64(kpts[i],desc.ptr(i)); - } - } + // Allocate memory for the matrix of descriptors + if (use_extended_ == true) { + desc = Mat::zeros(kpts.size(), 128, CV_32FC1); } else { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_SURF_Descriptor_128(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for(size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_MSURF_Descriptor_128(kpts[i],desc.ptr(i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for( size_t i = 0; i < kpts.size(); i++ ) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_GSURF_Descriptor_128(kpts[i],desc.ptr(i)); - } - } + desc = Mat::zeros(kpts.size(), 64, CV_32FC1); } - } - t2 = getTickCount(); - tdescriptor_ = 1000.0*(t2-t1) / getTickFrequency(); + if (use_upright_ == true) { + if (use_extended_ == false) { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_SURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_GSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + } + } + } + else + { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_SURF_Upright_Descriptor_128(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_MSURF_Upright_Descriptor_128(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + kpts[i].angle = 0.0; + Get_GSURF_Upright_Descriptor_128(kpts[i], desc.ptr(i)); + } + } + } + } + else { + if (use_extended_ == false) { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_SURF_Descriptor_64(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_MSURF_Descriptor_64(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_GSURF_Descriptor_64(kpts[i], desc.ptr(i)); + } + } + } + else { + if (descriptor_mode_ == 0) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_SURF_Descriptor_128(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 1) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_MSURF_Descriptor_128(kpts[i], desc.ptr(i)); + } + } + else if (descriptor_mode_ == 2) { +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t i = 0; i < kpts.size(); i++) { + Compute_Main_Orientation_SURF(kpts[i]); + Get_GSURF_Descriptor_128(kpts[i], desc.ptr(i)); + } + } + } + } + + t2 = getTickCount(); + tdescriptor_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -806,74 +808,74 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat * @param kpt Input keypoint * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 -*/ + */ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) { - int ix = 0, iy = 0, idx = 0, s = 0, level = 0; - float xf = 0.0, yf = 0.0, gweight = 0.0; - vector resX(109), resY(109), Ang(109); + int ix = 0, iy = 0, idx = 0, s = 0, level = 0; + float xf = 0.0, yf = 0.0, gweight = 0.0; + vector resX(109), resY(109), Ang(109); - // Variables for computing the dominant direction - float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; + // Variables for computing the dominant direction + float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; - // Get the information from the keypoint - xf = kpt.pt.x; - yf = kpt.pt.y; - level = kpt.class_id; - s = fRound(kpt.size/2.0); + // Get the information from the keypoint + xf = kpt.pt.x; + yf = kpt.pt.y; + level = kpt.class_id; + s = fRound(kpt.size / 2.0); - // Calculate derivatives responses for points within radius of 6*scale - for (int i = -6; i <= 6; ++i) { - for (int j = -6; j <= 6; ++j) { - if (i*i + j*j < 36) { - iy = fRound(yf + j*s); - ix = fRound(xf + i*s); + // Calculate derivatives responses for points within radius of 6*scale + for (int i = -6; i <= 6; ++i) { + for (int j = -6; j <= 6; ++j) { + if (i*i + j*j < 36) { + iy = fRound(yf + j*s); + ix = fRound(xf + i*s); - if (iy >= 0 && iy < img_height_ && ix >= 0 && ix < img_width_ ) { - gweight = gaussian(iy-yf,ix-xf,2.5*s); - resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); - resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); + if (iy >= 0 && iy < img_height_ && ix >= 0 && ix < img_width_) { + gweight = gaussian(iy - yf, ix - xf, 2.5*s); + resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); + resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); + } + else { + resX[idx] = 0.0; + resY[idx] = 0.0; + } + + Ang[idx] = getAngle(resX[idx], resY[idx]); + ++idx; + } } - else { - resX[idx] = 0.0; - resY[idx] = 0.0; + } + + // Loop slides pi/3 window around feature point + for (ang1 = 0; ang1 < 2.0*CV_PI; ang1 += 0.15f) { + ang2 = (ang1 + CV_PI / 3.0f > 2.0*CV_PI ? ang1 - 5.0f*CV_PI / 3.0f : ang1 + CV_PI / 3.0f); + sumX = sumY = 0.f; + + for (size_t k = 0; k < Ang.size(); ++k) { + // Get angle from the x-axis of the sample point + const float & ang = Ang[k]; + + // Determine whether the point is within the window + if (ang1 < ang2 && ang1 < ang && ang < ang2) { + sumX += resX[k]; + sumY += resY[k]; + } + else if (ang2 < ang1 && + ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { + sumX += resX[k]; + sumY += resY[k]; + } } - Ang[idx] = getAngle(resX[idx],resY[idx]); - ++idx; - } + // if the vector produced from this window is longer than all + // previous vectors then this forms the new dominant direction + if (sumX*sumX + sumY*sumY > max) { + // store largest orientation + max = sumX*sumX + sumY*sumY; + kpt.angle = getAngle(sumX, sumY); + } } - } - - // Loop slides pi/3 window around feature point - for (ang1 = 0; ang1 < 2.0*CV_PI; ang1+=0.15f) { - ang2 =(ang1+CV_PI/3.0f > 2.0*CV_PI ? ang1-5.0f*CV_PI/3.0f : ang1+CV_PI/3.0f); - sumX = sumY = 0.f; - - for (size_t k = 0; k < Ang.size(); ++k) { - // Get angle from the x-axis of the sample point - const float & ang = Ang[k]; - - // Determine whether the point is within the window - if (ang1 < ang2 && ang1 < ang && ang < ang2) { - sumX+=resX[k]; - sumY+=resY[k]; - } - else if (ang2 < ang1 && - ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { - sumX+=resX[k]; - sumY+=resY[k]; - } - } - - // if the vector produced from this window is longer than all - // previous vectors then this forms the new dominant direction - if (sumX*sumX + sumY*sumY > max) { - // store largest orientation - max = sumX*sumX + sumY*sumY; - kpt.angle = getAngle(sumX, sumY); - } - } } //************************************************************************************* @@ -887,92 +889,92 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 -*/ + */ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - level = kpt.class_id; - scale = fRound(kpt.size/2.0); + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + level = kpt.class_id; + scale = fRound(kpt.size / 2.0); - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dx=dy=mdx=mdy=0.0; + dx = dy = mdx = mdy = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - sample_y = k*scale + yf; - sample_x = l*scale + xf; + sample_y = k*scale + yf; + sample_x = l*scale + xf; - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Sum the derivatives to the cumulative descriptor - dx += rx; - dy += ry; - mdx += fabs(rx); - mdy += fabs(ry); + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -986,100 +988,100 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 -*/ + */ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { - dx=dy=mdx=mdy=0.0; + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + dx = dy = mdx = mdy = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; - // Sum the derivatives to the cumulative descriptor - dx += rrx; - dy += rry; - mdx += fabs(rrx); - mdy += fabs(rry); + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -1093,125 +1095,125 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 -*/ + */ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int dsize = 0, scale = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int dsize = 0, scale = 0, level = 0; - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 12; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - level = kpt.class_id; + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + level = kpt.class_id; - i = -8; + i = -8; - // Calculate descriptor for this interest point - // Area of size 24 s x 24 s - while (i < pattern_size) { - j = -8; - i = i-4; + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { + j = -8; + i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0; + cy = -0.5; - while (j < pattern_size) { + while (j < pattern_size) { - dx=dy=mdx=mdy=0.0; - cy += 1.0; - j = j-4; + dx = dy = mdx = mdy = 0.0; + cy += 1.0; + j = j - 4; - ky = i + sample_step; - kx = j + sample_step; + ky = i + sample_step; + kx = j + sample_step; - ys = yf + (ky*scale); - xs = xf + (kx*scale); + ys = yf + (ky*scale); + xs = xf + (kx*scale); - for (int k = i; k < i+9; k++) { - for (int l = j; l < j+9; l++) { + for (int k = i; k < i + 9; k++) { + for (int l = j; l < j + 9; l++) { - sample_y = k*scale + yf; - sample_x = l*scale + xf; + sample_y = k*scale + yf; + sample_x = l*scale + xf; - //Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); + //Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - rx = gauss_s1*rx; - ry = gauss_s1*ry; + rx = gauss_s1*rx; + ry = gauss_s1*ry; - // Sum the derivatives to the cumulative descriptor - dx += rx; - dy += ry; - mdx += fabs(rx); - mdy += fabs(ry); + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; } - } - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); - - desc[dcount++] = dx*gauss_s2; - desc[dcount++] = dy*gauss_s2; - desc[dcount++] = mdx*gauss_s2; - desc[dcount++] = mdy*gauss_s2; - - len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; - - j += 9; + i += 9; } - i += 9; - } + // convert to unit vector + len = sqrt(len); - // convert to unit vector - len = sqrt(len); + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -1225,126 +1227,126 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 -*/ + */ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; - int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; + int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 12; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); - i = -8; + i = -8; - // Calculate descriptor for this interest point - // Area of size 24 s x 24 s - while (i < pattern_size) { + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { - j = -8; - i = i-4; + j = -8; + i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0; + cy = -0.5; - while (j < pattern_size) { + while (j < pattern_size) { - dx=dy=mdx=mdy=0.0; - cy += 1.0; - j = j - 4; + dx = dy = mdx = mdy = 0.0; + cy += 1.0; + j = j - 4; - ky = i + sample_step; - kx = j + sample_step; + ky = i + sample_step; + kx = j + sample_step; - xs = xf + (-kx*scale*si + ky*scale*co); - ys = yf + (kx*scale*co + ky*scale*si); + xs = xf + (-kx*scale*si + ky*scale*co); + ys = yf + (kx*scale*co + ky*scale*si); - for (int k = i; k < i + 9; ++k) { - for (int l = j; l < j + 9; ++l) { + for (int k = i; k < i + 9; ++k) { + for (int l = j; l < j + 9; ++l) { - // Get coords of sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); + // Get coords of sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); - // Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); - y1 = fRound(sample_y-.5); - x1 = fRound(sample_x-.5); + // Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); + y1 = fRound(sample_y - .5); + x1 = fRound(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Get the x and y derivatives on the rotated axis - rry = gauss_s1*(rx*co + ry*si); - rrx = gauss_s1*(-rx*si + ry*co); + // Get the x and y derivatives on the rotated axis + rry = gauss_s1*(rx*co + ry*si); + rrx = gauss_s1*(-rx*si + ry*co); - // Sum the derivatives to the cumulative descriptor - dx += rrx; - dy += rry; - mdx += fabs(rrx); - mdy += fabs(rry); + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + j += 9; } - } - - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); - desc[dcount++] = dx*gauss_s2; - desc[dcount++] = dy*gauss_s2; - desc[dcount++] = mdx*gauss_s2; - desc[dcount++] = mdy*gauss_s2; - len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; - j += 9; + i += 9; } - i += 9; - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -1358,128 +1360,128 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 -*/ + */ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float lvv = 0.0, lww = 0.0, modg = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float lvv = 0.0, lww = 0.0, modg = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - level = kpt.class_id; + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + level = kpt.class_id; - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dx=dy=mdx=mdy=0.0; + dx = dy = mdx = mdy = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + l*scale; - sample_x = xf + k*scale; + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + l*scale; + sample_x = xf + k*scale; - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - modg = pow(rx,2) + pow(ry,2); + modg = pow(rx, 2) + pow(ry, 2); - if (modg != 0.0) { + if (modg != 0.0) { - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); - } - else { - lww = 0.0; - lvv = 0.0; - } + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + } + else { + lww = 0.0; + lvv = 0.0; + } - // Sum the derivatives to the cumulative descriptor - dx += lww; - dy += lvv; - mdx += fabs(lww); - mdy += fabs(lvv); + // Sum the derivatives to the cumulative descriptor + dx += lww; + dy += lvv; + mdx += fabs(lww); + mdy += fabs(lvv); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -1493,131 +1495,131 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 -*/ + */ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float lvv = 0.0, lww = 0.0, modg = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float lvv = 0.0, lww = 0.0, modg = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dx=dy=mdx=mdy=0.0; + dx = dy = mdx = mdy = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - modg = pow(rx,2) + pow(ry,2); + modg = pow(rx, 2) + pow(ry, 2); - if (modg != 0.0) { + if (modg != 0.0) { - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); - } - else { - lww = 0.0; - lvv = 0.0; - } + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + } + else { + lww = 0.0; + lvv = 0.0; + } - // Sum the derivatives to the cumulative descriptor - dx += lww; - dy += lvv; - mdx += fabs(lww); - mdy += fabs(lvv); + // Sum the derivatives to the cumulative descriptor + dx += lww; + dy += lvv; + mdx += fabs(lww); + mdy += fabs(lvv); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } @@ -1632,112 +1634,112 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 -*/ + */ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - level = kpt.class_id; + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + level = kpt.class_id; - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dxp=dxn=mdxp=mdxn=0.0; - dyp=dyn=mdyp=mdyn=0.0; + dxp = dxn = mdxp = mdxn = 0.0; + dyp = dyn = mdyp = mdyn = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - sample_y = k*scale + yf; - sample_x = l*scale + xf; + sample_y = k*scale + yf; + sample_x = l*scale + xf; - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Sum the derivatives to the cumulative descriptor - if (ry >= 0.0) { - dxp += rx; - mdxp += fabs(rx); - } - else { - dxn += rx; - mdxn += fabs(rx); - } + // Sum the derivatives to the cumulative descriptor + if (ry >= 0.0) { + dxp += rx; + mdxp += fabs(rx); + } + else { + dxn += rx; + mdxn += fabs(rx); + } - if (rx >= 0.0) { - dyp += ry; - mdyp += fabs(ry); - } - else { - dyn += ry; - mdyn += fabs(ry); - } + if (rx >= 0.0) { + dyp += ry; + mdyp += fabs(ry); + } + else { + dyn += ry; + mdyn += fabs(ry); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -1751,121 +1753,121 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 -*/ + */ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dxp=dxn=mdxp=mdxn=0.0; - dyp=dyn=mdyp=mdyn=0.0; + dxp = dxn = mdxp = mdxn = 0.0; + dyp = dyn = mdyp = mdyn = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; - // Sum the derivatives to the cumulative descriptor - if (rry >= 0.0) { - dxp += rrx; - mdxp += fabs(rrx); - } - else { - dxn += rrx; - mdxn += fabs(rrx); - } + // Sum the derivatives to the cumulative descriptor + if (rry >= 0.0) { + dxp += rrx; + mdxp += fabs(rrx); + } + else { + dxn += rrx; + mdxn += fabs(rrx); + } - if (rrx >= 0.0) { - dyp += rry; - mdyp += fabs(rry); - } - else { - dyn += rry; - mdyn += fabs(rry); - } + if (rrx >= 0.0) { + dyp += rry; + mdyp += fabs(rry); + } + else { + dyn += rry; + mdyn += fabs(rry); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -1879,149 +1881,149 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 -*/ + */ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - float gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - int dsize = 0, scale = 0, level = 0; + float gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int dsize = 0, scale = 0, level = 0; - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 12; + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 12; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - level = kpt.class_id; + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + level = kpt.class_id; - i = -8; + i = -8; - // Calculate descriptor for this interest point - // Area of size 24 s x 24 s - while (i < pattern_size) { + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { - j = -8; - i = i-4; + j = -8; + i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0; + cy = -0.5; - while (j < pattern_size) { + while (j < pattern_size) { - dxp=dxn=mdxp=mdxn=0.0; - dyp=dyn=mdyp=mdyn=0.0; + dxp = dxn = mdxp = mdxn = 0.0; + dyp = dyn = mdyp = mdyn = 0.0; - cy += 1.0; - j = j-4; + cy += 1.0; + j = j - 4; - ky = i + sample_step; - kx = j + sample_step; + ky = i + sample_step; + kx = j + sample_step; - ys = yf + (ky*scale); - xs = xf + (kx*scale); + ys = yf + (ky*scale); + xs = xf + (kx*scale); - for (int k = i; k < i+9; k++) { - for (int l = j; l < j+9; l++) { + for (int k = i; k < i + 9; k++) { + for (int l = j; l < j + 9; l++) { - sample_y = k*scale + yf; - sample_x = l*scale + xf; + sample_y = k*scale + yf; + sample_x = l*scale + xf; - //Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.50*scale); + //Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50*scale); - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - rx = gauss_s1*rx; - ry = gauss_s1*ry; + rx = gauss_s1*rx; + ry = gauss_s1*ry; - // Sum the derivatives to the cumulative descriptor - if (ry >= 0.0) { - dxp += rx; - mdxp += fabs(rx); - } - else { - dxn += rx; - mdxn += fabs(rx); - } + // Sum the derivatives to the cumulative descriptor + if (ry >= 0.0) { + dxp += rx; + mdxp += fabs(rx); + } + else { + dxn += rx; + mdxn += fabs(rx); + } - if (rx >= 0.0) { - dyp += ry; - mdyp += fabs(ry); - } - else { - dyn += ry; - mdyn += fabs(ry); - } + if (rx >= 0.0) { + dyp += ry; + mdyp += fabs(ry); + } + else { + dyn += ry; + mdyn += fabs(ry); + } + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + + desc[dcount++] = dxp*gauss_s2; + desc[dcount++] = dxn*gauss_s2; + desc[dcount++] = mdxp*gauss_s2; + desc[dcount++] = mdxn*gauss_s2; + desc[dcount++] = dyp*gauss_s2; + desc[dcount++] = dyn*gauss_s2; + desc[dcount++] = mdyp*gauss_s2; + desc[dcount++] = mdyn*gauss_s2; + + // Store the current length^2 of the vector + len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; + + j += 9; } - } - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); - - desc[dcount++] = dxp*gauss_s2; - desc[dcount++] = dxn*gauss_s2; - desc[dcount++] = mdxp*gauss_s2; - desc[dcount++] = mdxn*gauss_s2; - desc[dcount++] = dyp*gauss_s2; - desc[dcount++] = dyn*gauss_s2; - desc[dcount++] = mdyp*gauss_s2; - desc[dcount++] = mdyn*gauss_s2; - - // Store the current length^2 of the vector - len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; - - j += 9; + i += 9; } - i += 9; - } + // convert to unit vector + len = sqrt(len); - // convert to unit vector - len = sqrt(len); + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -2035,154 +2037,154 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 -*/ + */ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - float gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; - int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; + int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 12; + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 12; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); - i = -8; + i = -8; - // Calculate descriptor for this interest point - // Area of size 24 s x 24 s - while (i < pattern_size) { + // Calculate descriptor for this interest point + // Area of size 24 s x 24 s + while (i < pattern_size) { - j = -8; - i = i-4; + j = -8; + i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0; + cy = -0.5; - while (j < pattern_size) { + while (j < pattern_size) { - dxp=dxn=mdxp=mdxn=0.0; - dyp=dyn=mdyp=mdyn=0.0; + dxp = dxn = mdxp = mdxn = 0.0; + dyp = dyn = mdyp = mdyn = 0.0; - cy += 1.0f; - j = j - 4; + cy += 1.0f; + j = j - 4; - ky = i + sample_step; - kx = j + sample_step; + ky = i + sample_step; + kx = j + sample_step; - xs = xf + (-kx*scale*si + ky*scale*co); - ys = yf + (kx*scale*co + ky*scale*si); + xs = xf + (-kx*scale*si + ky*scale*co); + ys = yf + (kx*scale*co + ky*scale*si); - for (int k = i; k < i + 9; ++k) { - for (int l = j; l < j + 9; ++l) { + for (int k = i; k < i + 9; ++k) { + for (int l = j; l < j + 9; ++l) { - // Get coords of sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); + // Get coords of sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); - // Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); + // Get the gaussian weighted x and y responses + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); - y1 = fRound(sample_y-.5); - x1 = fRound(sample_x-.5); + y1 = fRound(sample_y - .5); + x1 = fRound(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Get the x and y derivatives on the rotated axis - rry = gauss_s1*(rx*co + ry*si); - rrx = gauss_s1*(-rx*si + ry*co); + // Get the x and y derivatives on the rotated axis + rry = gauss_s1*(rx*co + ry*si); + rrx = gauss_s1*(-rx*si + ry*co); - // Sum the derivatives to the cumulative descriptor - if (rry >= 0.0) { - dxp += rrx; - mdxp += fabs(rrx); - } - else { - dxn += rrx; - mdxn += fabs(rrx); - } + // Sum the derivatives to the cumulative descriptor + if (rry >= 0.0) { + dxp += rrx; + mdxp += fabs(rrx); + } + else { + dxn += rrx; + mdxn += fabs(rrx); + } - if (rrx >= 0.0) { - dyp += rry; - mdyp += fabs(rry); - } - else { - dyn += rry; - mdyn += fabs(rry); - } + if (rrx >= 0.0) { + dyp += rry; + mdyp += fabs(rry); + } + else { + dyn += rry; + mdyn += fabs(rry); + } + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + + desc[dcount++] = dxp*gauss_s2; + desc[dcount++] = dxn*gauss_s2; + desc[dcount++] = mdxp*gauss_s2; + desc[dcount++] = mdxn*gauss_s2; + desc[dcount++] = dyp*gauss_s2; + desc[dcount++] = dyn*gauss_s2; + desc[dcount++] = mdyp*gauss_s2; + desc[dcount++] = mdyn*gauss_s2; + + // Store the current length^2 of the vector + len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; + + j += 9; } - } - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); - - desc[dcount++] = dxp*gauss_s2; - desc[dcount++] = dxn*gauss_s2; - desc[dcount++] = mdxp*gauss_s2; - desc[dcount++] = mdxn*gauss_s2; - desc[dcount++] = dyp*gauss_s2; - desc[dcount++] = dyn*gauss_s2; - desc[dcount++] = mdyp*gauss_s2; - desc[dcount++] = mdyn*gauss_s2; - - // Store the current length^2 of the vector - len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; - - j += 9; + i += 9; } - i += 9; - } + // convert to unit vector + len = sqrt(len); - // convert to unit vector - len = sqrt(len); + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -2196,146 +2198,146 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 -*/ + */ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - float len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0, lvv = 0.0, lww = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0, lvv = 0.0, lww = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - level = kpt.class_id; + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + level = kpt.class_id; - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for(int j = -pattern_size; j < pattern_size; j+=sample_step) { + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dxp=dxn=mdxp=mdxn=0.0; - dyp=dyn=mdyp=mdyn=0.0; + dxp = dxn = mdxp = mdxn = 0.0; + dyp = dyn = mdyp = mdyn = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - sample_y = k*scale + yf; - sample_x = l*scale + xf; + sample_y = k*scale + yf; + sample_x = l*scale + xf; - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - modg = pow(rx,2) + pow(ry,2); + modg = pow(rx, 2) + pow(ry, 2); - if (modg != 0.0) { + if (modg != 0.0) { - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); - } - else { - lww = 0.0; - lvv = 0.0; - } + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + } + else { + lww = 0.0; + lvv = 0.0; + } - // Sum the derivatives to the cumulative descriptor - if (lww >= 0.0) { - dxp += lvv; - mdxp += fabs(lvv); - } - else { - dxn += lvv; - mdxn += fabs(lvv); - } + // Sum the derivatives to the cumulative descriptor + if (lww >= 0.0) { + dxp += lvv; + mdxp += fabs(lvv); + } + else { + dxn += lvv; + mdxn += fabs(lvv); + } - if (lvv >= 0.0) { - dyp += lww; - mdyp += fabs(lww); - } - else { - dyn += lww; - mdyn += fabs(lww); - } + if (lvv >= 0.0) { + dyp += lww; + mdyp += fabs(lww); + } + else { + dyn += lww; + mdyn += fabs(lww); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -2349,151 +2351,151 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 -*/ + */ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - float len = 0.0, xf = 0.0, yf = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - float lvv = 0.0, lww = 0.0, modg = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; + float len = 0.0, xf = 0.0, yf = 0.0; + float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; + float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; + float lvv = 0.0, lww = 0.0, modg = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int dsize = 0, scale = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 128; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size/2.0); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); + // Get the information from the keypoint + yf = kpt.pt.y; + xf = kpt.pt.x; + scale = fRound(kpt.size / 2.0); + angle = kpt.angle; + level = kpt.class_id; + co = cos(angle); + si = sin(angle); - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { + // Calculate descriptor for this interest point + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dxp=dxn=mdxp=mdxn=0.0; - dyp=dyn=mdyp=mdyn=0.0; + dxp = dxn = mdxp = mdxn = 0.0; + dyp = dyn = mdyp = mdyn = 0.0; - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); + // Get the coordinates of the sample point on the rotated axis + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y-.5); - x1 = (int)(sample_x-.5); + y1 = (int)(sample_y - .5); + x1 = (int)(sample_x - .5); - checkDescriptorLimits(x1,y1,img_width_,img_height_); + checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y+.5); - x2 = (int)(sample_x+.5); + y2 = (int)(sample_y + .5); + x2 = (int)(sample_x + .5); - checkDescriptorLimits(x2,y2,img_width_,img_height_); + checkDescriptorLimits(x2, y2, img_width_, img_height_); - fx = sample_x-x1; - fy = sample_y-y1; + fx = sample_x - x1; + fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - modg = pow(rx,2) + pow(ry,2); + modg = pow(rx, 2) + pow(ry, 2); - if (modg != 0.0) { - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + if (modg != 0.0) { + res1 = *(evolution_[level].Lxx.ptr(y1)+x1); + res2 = *(evolution_[level].Lxx.ptr(y1)+x2); + res3 = *(evolution_[level].Lxx.ptr(y2)+x1); + res4 = *(evolution_[level].Lxx.ptr(y2)+x2); + rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lxy.ptr(y1)+x1); + res2 = *(evolution_[level].Lxy.ptr(y1)+x2); + res3 = *(evolution_[level].Lxy.ptr(y2)+x1); + res4 = *(evolution_[level].Lxy.ptr(y2)+x2); + rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lyy.ptr(y1)+x1); + res2 = *(evolution_[level].Lyy.ptr(y1)+x2); + res3 = *(evolution_[level].Lyy.ptr(y2)+x1); + res4 = *(evolution_[level].Lyy.ptr(y2)+x2); + ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); + // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) + lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); - } - else { - lww = 0.0; - lvv = 0.0; - } + // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) + lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + } + else { + lww = 0.0; + lvv = 0.0; + } - // Sum the derivatives to the cumulative descriptor - if (lww >= 0.0) { - dxp += lvv; - mdxp += fabs(lvv); - } - else { - dxn += lvv; - mdxn += fabs(lvv); - } + // Sum the derivatives to the cumulative descriptor + if (lww >= 0.0) { + dxp += lvv; + mdxp += fabs(lvv); + } + else { + dxn += lvv; + mdxn += fabs(lvv); + } - if (lvv >= 0.0) { - dyp += lww; - mdyp += fabs(lww); - } - else { - dyn += lww; - mdyn += fabs(lww); - } + if (lvv >= 0.0) { + dyp += lww; + mdyp += fabs(lww); + } + else { + dyn += lww; + mdyn += fabs(lww); + } + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dxp; + desc[dcount++] = dxn; + desc[dcount++] = mdxp; + desc[dcount++] = mdxn; + desc[dcount++] = dyp; + desc[dcount++] = dyn; + desc[dcount++] = mdyp; + desc[dcount++] = mdyn; + + // Store the current length^2 of the vector + len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + + dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } - if (USE_CLIPPING_NORMALIZATION == true) { - clippingDescriptor(desc,dsize,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); - } + if (USE_CLIPPING_NORMALIZATION == true) { + clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + } } //************************************************************************************* @@ -2508,27 +2510,27 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc * @note If c is constant, the diffusion will be linear * If c is a matrix of the same size as Ld, the diffusion will be nonlinear * The stepsize can be arbitrarilly large -*/ + */ void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { #ifdef _OPENMP #pragma omp sections - { -#pragma omp section { - AOS_Rows(Ldprev,c,stepsize); - } #pragma omp section - { - AOS_Columns(Ldprev,c,stepsize); + { + AOS_Rows(Ldprev,c,stepsize); + } +#pragma omp section + { + AOS_Columns(Ldprev,c,stepsize); + } } - } #else - AOS_Rows(Ldprev,c,stepsize); - AOS_Columns(Ldprev,c,stepsize); + AOS_Rows(Ldprev, c, stepsize); + AOS_Columns(Ldprev, c, stepsize); #endif - Ld = 0.5*(Lty_+Ltx_.t()); + Ld = 0.5*(Lty_ + Ltx_.t()); } //************************************************************************************* @@ -2539,37 +2541,37 @@ void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv: * @param Ldprev Image at a previous evolution step * @param c Conductivity image * @param stepsize Stepsize for the nonlinear diffusion evolution -*/ + */ void KAZEFeatures::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { - // Operate on rows - for (int i = 0; i < qr_.rows; i++) { - for (int j = 0; j < qr_.cols; j++) { - *(qr_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i+1)+j); + // Operate on rows + for (int i = 0; i < qr_.rows; i++) { + for (int j = 0; j < qr_.cols; j++) { + *(qr_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i + 1) + j); + } } - } - for (int j = 0; j < py_.cols; j++) { - *(py_.ptr(0)+j) = *(qr_.ptr(0)+j); - } - - for (int j = 0; j < py_.cols; j++) { - *(py_.ptr(py_.rows-1)+j) = *(qr_.ptr(qr_.rows-1)+j); - } - - for (int i = 1; i < py_.rows-1; i++) { for (int j = 0; j < py_.cols; j++) { - *(py_.ptr(i)+j) = *(qr_.ptr(i-1)+j) + *(qr_.ptr(i)+j); + *(py_.ptr(0) + j) = *(qr_.ptr(0) + j); } - } - // a = 1 + t.*p; (p is -1*p) - // b = -t.*q; - ay_ = 1.0 + stepsize*py_; // p is -1*p - by_ = -stepsize*qr_; + for (int j = 0; j < py_.cols; j++) { + *(py_.ptr(py_.rows - 1) + j) = *(qr_.ptr(qr_.rows - 1) + j); + } - // Do Thomas algorithm to solve the linear system of equations - Thomas(ay_,by_,Ldprev,Lty_); + for (int i = 1; i < py_.rows - 1; i++) { + for (int j = 0; j < py_.cols; j++) { + *(py_.ptr(i)+j) = *(qr_.ptr(i - 1) + j) + *(qr_.ptr(i)+j); + } + } + + // a = 1 + t.*p; (p is -1*p) + // b = -t.*q; + ay_ = 1.0 + stepsize*py_; // p is -1*p + by_ = -stepsize*qr_; + + // Do Thomas algorithm to solve the linear system of equations + Thomas(ay_, by_, Ldprev, Lty_); } //************************************************************************************* @@ -2580,41 +2582,41 @@ void KAZEFeatures::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float * @param Ldprev Image at a previous evolution step * @param c Conductivity image * @param stepsize Stepsize for the nonlinear diffusion evolution -*/ + */ void KAZEFeatures::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { - // Operate on columns - for (int j = 0; j < qc_.cols; j++) { - for (int i = 0; i < qc_.rows; i++) { - *(qc_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i)+j+1); + // Operate on columns + for (int j = 0; j < qc_.cols; j++) { + for (int i = 0; i < qc_.rows; i++) { + *(qc_.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i)+j + 1); + } } - } - for (int i = 0; i < px_.rows; i++) { - *(px_.ptr(i)) = *(qc_.ptr(i)); - } - - for (int i = 0; i < px_.rows; i++) { - *(px_.ptr(i)+px_.cols-1) = *(qc_.ptr(i)+qc_.cols-1); - } - - for (int j = 1; j < px_.cols-1; j++) { for (int i = 0; i < px_.rows; i++) { - *(px_.ptr(i)+j) = *(qc_.ptr(i)+j-1) + *(qc_.ptr(i)+j); + *(px_.ptr(i)) = *(qc_.ptr(i)); } - } - // a = 1 + t.*p'; - ax_ = 1.0 + stepsize*px_.t(); + for (int i = 0; i < px_.rows; i++) { + *(px_.ptr(i)+px_.cols - 1) = *(qc_.ptr(i)+qc_.cols - 1); + } - // b = -t.*q'; - bx_ = -stepsize*qc_.t(); + for (int j = 1; j < px_.cols - 1; j++) { + for (int i = 0; i < px_.rows; i++) { + *(px_.ptr(i)+j) = *(qc_.ptr(i)+j - 1) + *(qc_.ptr(i)+j); + } + } - // But take care since we need to transpose the solution!! - Mat Ldprevt = Ldprev.t(); + // a = 1 + t.*p'; + ax_ = 1.0 + stepsize*px_.t(); - // Do Thomas algorithm to solve the linear system of equations - Thomas(ax_,bx_,Ldprevt,Ltx_); + // b = -t.*q'; + bx_ = -stepsize*qc_.t(); + + // But take care since we need to transpose the solution!! + Mat Ldprevt = Ldprev.t(); + + // Do Thomas algorithm to solve the linear system of equations + Thomas(ax_, bx_, Ldprevt, Ltx_); } //************************************************************************************* @@ -2623,62 +2625,62 @@ void KAZEFeatures::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const fl /** * @brief This method does the Thomas algorithm for solving a tridiagonal linear system * @note The matrix A must be strictly diagonally dominant for a stable solution -*/ + */ void KAZEFeatures::Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x) { - // Auxiliary variables - int n = a.rows; - Mat m = cv::Mat::zeros(a.rows,a.cols,CV_32F); - Mat l = cv::Mat::zeros(b.rows,b.cols,CV_32F); - Mat y = cv::Mat::zeros(Ld.rows,Ld.cols,CV_32F); + // Auxiliary variables + int n = a.rows; + Mat m = cv::Mat::zeros(a.rows, a.cols, CV_32F); + Mat l = cv::Mat::zeros(b.rows, b.cols, CV_32F); + Mat y = cv::Mat::zeros(Ld.rows, Ld.cols, CV_32F); - /** A*x = d; */ - /** / a1 b1 0 0 0 ... 0 \ / x1 \ = / d1 \ */ - /** | c1 a2 b2 0 0 ... 0 | | x2 | = | d2 | */ - /** | 0 c2 a3 b3 0 ... 0 | | x3 | = | d3 | */ - /** | : : : : 0 ... 0 | | : | = | : | */ - /** | : : : : 0 cn-1 an | | xn | = | dn | */ + /** A*x = d; */ + /** / a1 b1 0 0 0 ... 0 \ / x1 \ = / d1 \ */ + /** | c1 a2 b2 0 0 ... 0 | | x2 | = | d2 | */ + /** | 0 c2 a3 b3 0 ... 0 | | x3 | = | d3 | */ + /** | : : : : 0 ... 0 | | : | = | : | */ + /** | : : : : 0 cn-1 an | | xn | = | dn | */ - /** 1. LU decomposition - / L = / 1 \ U = / m1 r1 \ - / | l1 1 | | m2 r2 | - / | l2 1 | | m3 r3 | - / | : : : | | : : : | - / \ ln-1 1 / \ mn / */ + /** 1. LU decomposition + / L = / 1 \ U = / m1 r1 \ + / | l1 1 | | m2 r2 | + / | l2 1 | | m3 r3 | + / | : : : | | : : : | + / \ ln-1 1 / \ mn / */ - for (int j = 0; j < m.cols; j++) { - *(m.ptr(0)+j) = *(a.ptr(0)+j); - } - - for (int j = 0; j < y.cols; j++) { - *(y.ptr(0)+j) = *(Ld.ptr(0)+j); - } - - // 1. Forward substitution L*y = d for y - for (int k = 1; k < n; k++) { - for (int j=0; j < l.cols; j++) { - *(l.ptr(k-1)+j) = *(b.ptr(k-1)+j) / *(m.ptr(k-1)+j); + for (int j = 0; j < m.cols; j++) { + *(m.ptr(0) + j) = *(a.ptr(0) + j); } - for (int j=0; j < m.cols; j++) { - *(m.ptr(k)+j) = *(a.ptr(k)+j) - *(l.ptr(k-1)+j)*(*(b.ptr(k-1)+j)); + for (int j = 0; j < y.cols; j++) { + *(y.ptr(0) + j) = *(Ld.ptr(0) + j); } - for (int j=0; j < y.cols; j++) { - *(y.ptr(k)+j) = *(Ld.ptr(k)+j) - *(l.ptr(k-1)+j)*(*(y.ptr(k-1)+j)); - } - } + // 1. Forward substitution L*y = d for y + for (int k = 1; k < n; k++) { + for (int j = 0; j < l.cols; j++) { + *(l.ptr(k - 1) + j) = *(b.ptr(k - 1) + j) / *(m.ptr(k - 1) + j); + } - // 2. Backward substitution U*x = y - for (int j=0; j < y.cols; j++) { - *(x.ptr(n-1)+j) = (*(y.ptr(n-1)+j))/(*(m.ptr(n-1)+j)); - } + for (int j = 0; j < m.cols; j++) { + *(m.ptr(k)+j) = *(a.ptr(k)+j) - *(l.ptr(k - 1) + j)*(*(b.ptr(k - 1) + j)); + } - for (int i = n-2; i >= 0; i--) { - for(int j = 0; j < x.cols; j++) { - *(x.ptr(i)+j) = (*(y.ptr(i)+j) - (*(b.ptr(i)+j))*(*(x.ptr(i+1)+j)))/(*(m.ptr(i)+j)); + for (int j = 0; j < y.cols; j++) { + *(y.ptr(k)+j) = *(Ld.ptr(k)+j) - *(l.ptr(k - 1) + j)*(*(y.ptr(k - 1) + j)); + } + } + + // 2. Backward substitution U*x = y + for (int j = 0; j < y.cols; j++) { + *(x.ptr(n - 1) + j) = (*(y.ptr(n - 1) + j)) / (*(m.ptr(n - 1) + j)); + } + + for (int i = n - 2; i >= 0; i--) { + for (int j = 0; j < x.cols; j++) { + *(x.ptr(i)+j) = (*(y.ptr(i)+j) - (*(b.ptr(i)+j))*(*(x.ptr(i + 1) + j))) / (*(m.ptr(i)+j)); + } } - } } //************************************************************************************* @@ -2686,27 +2688,27 @@ void KAZEFeatures::Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, /** * @brief This function computes the angle from the vector given by (X Y). From 0 to 2*Pi -*/ + */ inline float getAngle(const float& x, const float& y) { - if (x >= 0 && y >= 0) - { - return atan(y/x); - } + if (x >= 0 && y >= 0) + { + return atan(y / x); + } - if (x < 0 && y >= 0) { - return CV_PI - atan(-y/x); - } + if (x < 0 && y >= 0) { + return CV_PI - atan(-y / x); + } - if(x < 0 && y < 0) { - return CV_PI + atan(y/x); - } + if (x < 0 && y < 0) { + return CV_PI + atan(y / x); + } - if(x >= 0 && y < 0) { - return 2.0*CV_PI - atan(-y/x); - } + if (x >= 0 && y < 0) { + return 2.0*CV_PI - atan(-y / x); + } - return 0; + return 0; } //************************************************************************************* @@ -2718,31 +2720,31 @@ inline float getAngle(const float& x, const float& y) { * @param dsize Size of the descriptor vector * @param iter Number of iterations * @param ratio Clipping ratio -*/ + */ inline void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio) { - float cratio = ratio / sqrt(dsize); - float len = 0.0; + float cratio = ratio / sqrt(dsize); + float len = 0.0; - for (int i = 0; i < niter; i++) { - len = 0.0; - for (int j = 0; j < dsize; j++) { - if (desc[j] > cratio) { - desc[j] = cratio; - } - else if (desc[j] < -cratio) { - desc[j] = -cratio; - } - len += desc[j]*desc[j]; + for (int i = 0; i < niter; i++) { + len = 0.0; + for (int j = 0; j < dsize; j++) { + if (desc[j] > cratio) { + desc[j] = cratio; + } + else if (desc[j] < -cratio) { + desc[j] = -cratio; + } + len += desc[j] * desc[j]; + } + + // Normalize again + len = sqrt(len); + + for (int j = 0; j < dsize; j++) { + desc[j] = desc[j] / len; + } } - - // Normalize again - len = sqrt(len); - - for (int j = 0; j < dsize; j++) { - desc[j] = desc[j] / len; - } - } } //************************************************************************************** @@ -2753,9 +2755,9 @@ inline void clippingDescriptor(float *desc, const int& dsize, const int& niter, * @param x X Position * @param y Y Position * @param sig Standard Deviation -*/ + */ inline float gaussian(const float& x, const float& y, const float& sig) { - return exp(-(x*x+y*y)/(2.0f*sig*sig)); + return exp(-(x*x + y*y) / (2.0f*sig*sig)); } //************************************************************************************** @@ -2767,24 +2769,24 @@ inline float gaussian(const float& x, const float& y, const float& sig) { * @param y Y Position * @param width Image width * @param height Image height -*/ + */ inline void checkDescriptorLimits(int &x, int &y, const int& width, const int& height) { - if (x < 0) { - x = 0; - } + if (x < 0) { + x = 0; + } - if (y < 0) { - y = 0; - } + if (y < 0) { + y = 0; + } - if (x > width-1) { - x = width-1; - } + if (x > width - 1) { + x = width - 1; + } - if (y > height-1) { - y = height-1; - } + if (y > height - 1) { + y = height - 1; + } } //************************************************************************************* @@ -2797,5 +2799,5 @@ inline void checkDescriptorLimits(int &x, int &y, const int& width, const int& h */ inline int fRound(const float& flt) { - return (int)(flt+0.5f); + return (int)(flt + 0.5f); } From 86888bdf7797315731603fd336905793e81120ef Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 24 Apr 2014 22:01:45 +0100 Subject: [PATCH 11/52] Replace swap with clear (more efficient) --- modules/features2d/src/akaze/AKAZE.cpp | 2903 ++++++++++++------------ 1 file changed, 1452 insertions(+), 1451 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 661a1cad8b..617a16d4ee 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -18,81 +18,81 @@ using namespace cv; * @brief AKAZEFeatures constructor with input options * @param options AKAZEFeatures configuration options * @note This constructor allocates memory for the nonlinear scale space -*/ + */ AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { - ncycles_ = 0; - reordering_ = true; + ncycles_ = 0; + reordering_ = true; - if (options_.descriptor_size > 0 && options_.descriptor >= MLDB_UPRIGHT) { - generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, - options_.descriptor_pattern_size, options_.descriptor_channels); - } + if (options_.descriptor_size > 0 && options_.descriptor >= MLDB_UPRIGHT) { + generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, + options_.descriptor_pattern_size, options_.descriptor_channels); + } - Allocate_Memory_Evolution(); + Allocate_Memory_Evolution(); } /* ************************************************************************* */ /** * @brief AKAZEFeatures destructor -*/ + */ AKAZEFeatures::~AKAZEFeatures(void) { - evolution_.clear(); + evolution_.clear(); } /* ************************************************************************* */ /** * @brief This method allocates the memory for the nonlinear diffusion evolution -*/ + */ void AKAZEFeatures::Allocate_Memory_Evolution(void) { - float rfactor = 0.0; - int level_height = 0, level_width = 0; + float rfactor = 0.0; + int level_height = 0, level_width = 0; - // Allocate the dimension of the matrices for the evolution - for (int i = 0; i <= options_.omax-1; i++) { - rfactor = 1.0/pow(2.f, i); - level_height = (int)(options_.img_height*rfactor); - level_width = (int)(options_.img_width*rfactor); + // Allocate the dimension of the matrices for the evolution + for (int i = 0; i <= options_.omax - 1; i++) { + rfactor = 1.0 / pow(2.f, i); + level_height = (int)(options_.img_height*rfactor); + level_width = (int)(options_.img_width*rfactor); - // Smallest possible octave and allow one scale if the image is small - if ((level_width < 80 || level_height < 40) && i != 0) { - options_.omax = i; - break; + // Smallest possible octave and allow one scale if the image is small + if ((level_width < 80 || level_height < 40) && i != 0) { + options_.omax = i; + break; + } + + for (int j = 0; j < options_.nsublevels; j++) { + TEvolution step; + step.Lx = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Ly = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lxx = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lxy = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lyy = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lt = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Ldet = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lflow = cv::Mat::zeros(level_height, level_width, CV_32F); + step.Lstep = cv::Mat::zeros(level_height, level_width, CV_32F); + step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i); + step.sigma_size = fRound(step.esigma); + step.etime = 0.5*(step.esigma*step.esigma); + step.octave = i; + step.sublevel = j; + evolution_.push_back(step); + } } - for (int j = 0; j < options_.nsublevels; j++) { - TEvolution step; - step.Lx = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Ly = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lxx = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lxy = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lyy = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lt = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Ldet = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lflow = cv::Mat::zeros(level_height, level_width, CV_32F); - step.Lstep = cv::Mat::zeros(level_height, level_width, CV_32F); - step.esigma = options_.soffset*pow(2.f, (float)(j)/(float)(options_.nsublevels) + i); - step.sigma_size = fRound(step.esigma); - step.etime = 0.5*(step.esigma*step.esigma); - step.octave = i; - step.sublevel = j; - evolution_.push_back(step); + // Allocate memory for the number of cycles and time steps + for (size_t i = 1; i < evolution_.size(); i++) { + int naux = 0; + vector tau; + float ttime = 0.0; + ttime = evolution_[i].etime - evolution_[i - 1].etime; + naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_, tau); + nsteps_.push_back(naux); + tsteps_.push_back(tau); + ncycles_++; } - } - - // Allocate memory for the number of cycles and time steps - for (size_t i = 1; i < evolution_.size(); i++) { - int naux = 0; - vector tau; - float ttime = 0.0; - ttime = evolution_[i].etime-evolution_[i-1].etime; - naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_,tau); - nsteps_.push_back(naux); - tsteps_.push_back(tau); - ncycles_++; - } } /* ************************************************************************* */ @@ -100,364 +100,365 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { * @brief This method creates the nonlinear scale space for a given image * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully, -1 otherwise -*/ + */ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { - double t1 = 0.0, t2 = 0.0; + double t1 = 0.0, t2 = 0.0; - if (evolution_.size() == 0) { - cerr << "Error generating the nonlinear scale space!!" << endl; - cerr << "Firstly you need to call AKAZEFeatures::Allocate_Memory_Evolution()" << endl; - return -1; - } - - t1 = cv::getTickCount(); - - // Copy the original image to the first level of the evolution - img.copyTo(evolution_[0].Lt); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); - evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); - - // First compute the kcontrast factor - options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, - 1.0, options_.kcontrast_nbins, 0, 0); - - t2 = cv::getTickCount(); - timing_.kcontrast = 1000.0*(t2-t1) / cv::getTickFrequency(); - - // Now generate the rest of evolution levels - for (size_t i = 1; i < evolution_.size(); i++) { - - if (evolution_[i].octave > evolution_[i-1].octave) { - halfsample_image(evolution_[i-1].Lt, evolution_[i].Lt); - options_.kcontrast = options_.kcontrast*0.75; - } - else { - evolution_[i-1].Lt.copyTo(evolution_[i].Lt); + if (evolution_.size() == 0) { + cerr << "Error generating the nonlinear scale space!!" << endl; + cerr << "Firstly you need to call AKAZEFeatures::Allocate_Memory_Evolution()" << endl; + return -1; } - gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0); + t1 = cv::getTickCount(); - // Compute the Gaussian derivatives Lx and Ly - image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0); - image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1); + // Copy the original image to the first level of the evolution + img.copyTo(evolution_[0].Lt); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset); + evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); - // Compute the conductivity equation - switch (options_.diffusivity) { - case PM_G1: - pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - case PM_G2: - pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - case WEICKERT: - weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - case CHARBONNIER: - charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); - break; - default: - cerr << "Diffusivity: " << options_.diffusivity << " is not supported" << endl; + // First compute the kcontrast factor + options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, + 1.0, options_.kcontrast_nbins, 0, 0); + + t2 = cv::getTickCount(); + timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency(); + + // Now generate the rest of evolution levels + for (size_t i = 1; i < evolution_.size(); i++) { + + if (evolution_[i].octave > evolution_[i - 1].octave) { + halfsample_image(evolution_[i - 1].Lt, evolution_[i].Lt); + options_.kcontrast = options_.kcontrast*0.75; + } + else { + evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); + } + + gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0); + + // Compute the Gaussian derivatives Lx and Ly + image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0); + image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1); + + // Compute the conductivity equation + switch (options_.diffusivity) { + case PM_G1: + pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); + break; + case PM_G2: + pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); + break; + case WEICKERT: + weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); + break; + case CHARBONNIER: + charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); + break; + default: + cerr << "Diffusivity: " << options_.diffusivity << " is not supported" << endl; + } + + // Perform FED n inner steps + for (int j = 0; j < nsteps_[i - 1]; j++) { + nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i - 1][j]); + } } - // Perform FED n inner steps - for (int j = 0; j < nsteps_[i-1]; j++) { - nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i-1][j]); - } - } + t2 = cv::getTickCount(); + timing_.scale = 1000.0*(t2 - t1) / cv::getTickFrequency(); - t2 = cv::getTickCount(); - timing_.scale = 1000.0*(t2-t1) / cv::getTickFrequency(); - - return 0; + return 0; } /* ************************************************************************* */ /** * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of detected keypoints -*/ + */ void AKAZEFeatures::Feature_Detection(std::vector& kpts) { - double t1 = 0.0, t2 = 0.0; + double t1 = 0.0, t2 = 0.0; - t1 = cv::getTickCount(); + t1 = cv::getTickCount(); - vector().swap(kpts); - Compute_Determinant_Hessian_Response(); - Find_Scale_Space_Extrema(kpts); - Do_Subpixel_Refinement(kpts); + kpts.clear(); - t2 = cv::getTickCount(); - timing_.detector = 1000.0*(t2-t1) / cv::getTickFrequency(); + Compute_Determinant_Hessian_Response(); + Find_Scale_Space_Extrema(kpts); + Do_Subpixel_Refinement(kpts); + + t2 = cv::getTickCount(); + timing_.detector = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ /** * @brief This method computes the multiscale derivatives for the nonlinear scale space -*/ + */ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { - double t1 = 0.0, t2 = 0.0; + double t1 = 0.0, t2 = 0.0; - t1 = cv::getTickCount(); + t1 = cv::getTickCount(); #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0; i < (int)(evolution_.size()); i++) { + for (int i = 0; i < (int)(evolution_.size()); i++) { - float ratio = pow(2.f,(float)evolution_[i].octave); - int sigma_size_ = fRound(evolution_[i].esigma*options_.derivative_factor/ratio); + float ratio = pow(2.f, (float)evolution_[i].octave); + int sigma_size_ = fRound(evolution_[i].esigma*options_.derivative_factor / ratio); - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_); - evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); - evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); - evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_)); - evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); - evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); - } + evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); + evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); + evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_)); + evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); + evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); + } - t2 = cv::getTickCount(); - timing_.derivatives = 1000.0*(t2-t1) / cv::getTickFrequency(); + t2 = cv::getTickCount(); + timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ /** * @brief This method computes the feature detector response for the nonlinear scale space * @note We use the Hessian determinant as the feature detector response -*/ + */ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { - // Firstly compute the multiscale derivatives - Compute_Multiscale_Derivatives(); + // Firstly compute the multiscale derivatives + Compute_Multiscale_Derivatives(); - for (size_t i = 0; i < evolution_.size(); i++) { - if (options_.verbosity == true) { - cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; - } + for (size_t i = 0; i < evolution_.size(); i++) { + if (options_.verbosity == true) { + cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; + } - for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) { - for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) { - float lxx = *(evolution_[i].Lxx.ptr(ix)+jx); - float lxy = *(evolution_[i].Lxy.ptr(ix)+jx); - float lyy = *(evolution_[i].Lyy.ptr(ix)+jx); - *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy-lxy*lxy); - } + for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) { + for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) { + float lxx = *(evolution_[i].Lxx.ptr(ix)+jx); + float lxy = *(evolution_[i].Lxy.ptr(ix)+jx); + float lyy = *(evolution_[i].Lyy.ptr(ix)+jx); + *(evolution_[i].Ldet.ptr(ix)+jx) = (lxx*lyy - lxy*lxy); + } + } } - } } /* ************************************************************************* */ /** * @brief This method finds extrema in the nonlinear scale space * @param kpts Vector of detected keypoints -*/ + */ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { - double t1 = 0.0, t2 = 0.0; - float value = 0.0; - float dist = 0.0, ratio = 0.0, smax = 0.0; - int npoints = 0, id_repeated = 0; - int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0; - bool is_extremum = false, is_repeated = false, is_out = false; - cv::KeyPoint point; - vector kpts_aux; + double t1 = 0.0, t2 = 0.0; + float value = 0.0; + float dist = 0.0, ratio = 0.0, smax = 0.0; + int npoints = 0, id_repeated = 0; + int sigma_size_ = 0, left_x = 0, right_x = 0, up_y = 0, down_y = 0; + bool is_extremum = false, is_repeated = false, is_out = false; + cv::KeyPoint point; + vector kpts_aux; - // Set maximum size - if (options_.descriptor == SURF_UPRIGHT || options_.descriptor == SURF || - options_.descriptor == MLDB_UPRIGHT || options_.descriptor == MLDB) { - smax = 10.0*sqrtf(2.0); - } - else if (options_.descriptor == MSURF_UPRIGHT || options_.descriptor == MSURF) { - smax = 12.0*sqrtf(2.0); - } - - t1 = cv::getTickCount(); - - for (size_t i = 0; i < evolution_.size(); i++) { - for (int ix = 1; ix < evolution_[i].Ldet.rows-1; ix++) { - for (int jx = 1; jx < evolution_[i].Ldet.cols-1; jx++) { - is_extremum = false; - is_repeated = false; - is_out = false; - value = *(evolution_[i].Ldet.ptr(ix)+jx); - - // Filter the points with the detector threshold - if (value > options_.dthreshold && value >= options_.min_dthreshold && - value > *(evolution_[i].Ldet.ptr(ix)+jx-1) && - value > *(evolution_[i].Ldet.ptr(ix)+jx+1) && - value > *(evolution_[i].Ldet.ptr(ix-1)+jx-1) && - value > *(evolution_[i].Ldet.ptr(ix-1)+jx) && - value > *(evolution_[i].Ldet.ptr(ix-1)+jx+1) && - value > *(evolution_[i].Ldet.ptr(ix+1)+jx-1) && - value > *(evolution_[i].Ldet.ptr(ix+1)+jx) && - value > *(evolution_[i].Ldet.ptr(ix+1)+jx+1)) { - - is_extremum = true; - point.response = fabs(value); - point.size = evolution_[i].esigma*options_.derivative_factor; - point.octave = evolution_[i].octave; - point.class_id = i; - ratio = pow(2.f,point.octave); - sigma_size_ = fRound(point.size/ratio); - point.pt.x = jx; - point.pt.y = ix; - - // Compare response with the same and lower scale - for (size_t ik = 0; ik < kpts_aux.size(); ik++) { - - if ((point.class_id-1) == kpts_aux[ik].class_id || - point.class_id == kpts_aux[ik].class_id) { - dist = sqrt(pow(point.pt.x*ratio-kpts_aux[ik].pt.x,2)+pow(point.pt.y*ratio-kpts_aux[ik].pt.y,2)); - if (dist <= point.size) { - if (point.response > kpts_aux[ik].response) { - id_repeated = ik; - is_repeated = true; - } - else { - is_extremum = false; - } - break; - } - } - } - - // Check out of bounds - if (is_extremum == true) { - - // Check that the point is under the image limits for the descriptor computation - left_x = fRound(point.pt.x-smax*sigma_size_)-1; - right_x = fRound(point.pt.x+smax*sigma_size_) +1; - up_y = fRound(point.pt.y-smax*sigma_size_)-1; - down_y = fRound(point.pt.y+smax*sigma_size_)+1; - - if (left_x < 0 || right_x >= evolution_[i].Ldet.cols || - up_y < 0 || down_y >= evolution_[i].Ldet.rows) { - is_out = true; - } - - if (is_out == false) { - if (is_repeated == false) { - point.pt.x *= ratio; - point.pt.y *= ratio; - kpts_aux.push_back(point); - npoints++; - } - else { - point.pt.x *= ratio; - point.pt.y *= ratio; - kpts_aux[id_repeated] = point; - } - } // if is_out - } //if is_extremum - } - } // for jx - } // for ix - } // for i - - // Now filter points with the upper scale level - for (size_t i = 0; i < kpts_aux.size(); i++) { - - is_repeated = false; - const cv::KeyPoint& point = kpts_aux[i]; - for (size_t j = i+1; j < kpts_aux.size(); j++) { - - // Compare response with the upper scale - if ((point.class_id+1) == kpts_aux[j].class_id) { - dist = sqrt(pow(point.pt.x-kpts_aux[j].pt.x,2)+pow(point.pt.y-kpts_aux[j].pt.y,2)); - if (dist <= point.size) { - if (point.response < kpts_aux[j].response) { - is_repeated = true; - break; - } - } - } + // Set maximum size + if (options_.descriptor == SURF_UPRIGHT || options_.descriptor == SURF || + options_.descriptor == MLDB_UPRIGHT || options_.descriptor == MLDB) { + smax = 10.0*sqrtf(2.0); + } + else if (options_.descriptor == MSURF_UPRIGHT || options_.descriptor == MSURF) { + smax = 12.0*sqrtf(2.0); } - if (is_repeated == false) - kpts.push_back(point); - } + t1 = cv::getTickCount(); - t2 = cv::getTickCount(); - timing_.extrema = 1000.0*(t2-t1) / cv::getTickFrequency(); + for (size_t i = 0; i < evolution_.size(); i++) { + for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) { + for (int jx = 1; jx < evolution_[i].Ldet.cols - 1; jx++) { + is_extremum = false; + is_repeated = false; + is_out = false; + value = *(evolution_[i].Ldet.ptr(ix)+jx); + + // Filter the points with the detector threshold + if (value > options_.dthreshold && value >= options_.min_dthreshold && + value > *(evolution_[i].Ldet.ptr(ix)+jx - 1) && + value > *(evolution_[i].Ldet.ptr(ix)+jx + 1) && + value > *(evolution_[i].Ldet.ptr(ix - 1) + jx - 1) && + value > *(evolution_[i].Ldet.ptr(ix - 1) + jx) && + value > *(evolution_[i].Ldet.ptr(ix - 1) + jx + 1) && + value > *(evolution_[i].Ldet.ptr(ix + 1) + jx - 1) && + value > *(evolution_[i].Ldet.ptr(ix + 1) + jx) && + value > *(evolution_[i].Ldet.ptr(ix + 1) + jx + 1)) { + + is_extremum = true; + point.response = fabs(value); + point.size = evolution_[i].esigma*options_.derivative_factor; + point.octave = evolution_[i].octave; + point.class_id = i; + ratio = pow(2.f, point.octave); + sigma_size_ = fRound(point.size / ratio); + point.pt.x = jx; + point.pt.y = ix; + + // Compare response with the same and lower scale + for (size_t ik = 0; ik < kpts_aux.size(); ik++) { + + if ((point.class_id - 1) == kpts_aux[ik].class_id || + point.class_id == kpts_aux[ik].class_id) { + dist = sqrt(pow(point.pt.x*ratio - kpts_aux[ik].pt.x, 2) + pow(point.pt.y*ratio - kpts_aux[ik].pt.y, 2)); + if (dist <= point.size) { + if (point.response > kpts_aux[ik].response) { + id_repeated = ik; + is_repeated = true; + } + else { + is_extremum = false; + } + break; + } + } + } + + // Check out of bounds + if (is_extremum == true) { + + // Check that the point is under the image limits for the descriptor computation + left_x = fRound(point.pt.x - smax*sigma_size_) - 1; + right_x = fRound(point.pt.x + smax*sigma_size_) + 1; + up_y = fRound(point.pt.y - smax*sigma_size_) - 1; + down_y = fRound(point.pt.y + smax*sigma_size_) + 1; + + if (left_x < 0 || right_x >= evolution_[i].Ldet.cols || + up_y < 0 || down_y >= evolution_[i].Ldet.rows) { + is_out = true; + } + + if (is_out == false) { + if (is_repeated == false) { + point.pt.x *= ratio; + point.pt.y *= ratio; + kpts_aux.push_back(point); + npoints++; + } + else { + point.pt.x *= ratio; + point.pt.y *= ratio; + kpts_aux[id_repeated] = point; + } + } // if is_out + } //if is_extremum + } + } // for jx + } // for ix + } // for i + + // Now filter points with the upper scale level + for (size_t i = 0; i < kpts_aux.size(); i++) { + + is_repeated = false; + const cv::KeyPoint& point = kpts_aux[i]; + for (size_t j = i + 1; j < kpts_aux.size(); j++) { + + // Compare response with the upper scale + if ((point.class_id + 1) == kpts_aux[j].class_id) { + dist = sqrt(pow(point.pt.x - kpts_aux[j].pt.x, 2) + pow(point.pt.y - kpts_aux[j].pt.y, 2)); + if (dist <= point.size) { + if (point.response < kpts_aux[j].response) { + is_repeated = true; + break; + } + } + } + } + + if (is_repeated == false) + kpts.push_back(point); + } + + t2 = cv::getTickCount(); + timing_.extrema = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ /** * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints -*/ + */ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { - double t1 = 0.0, t2 = 0.0; - float Dx = 0.0, Dy = 0.0, ratio = 0.0; - float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; - int x = 0, y = 0; - cv::Mat A = cv::Mat::zeros(2, 2, CV_32F); - cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); - cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); + double t1 = 0.0, t2 = 0.0; + float Dx = 0.0, Dy = 0.0, ratio = 0.0; + float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; + int x = 0, y = 0; + cv::Mat A = cv::Mat::zeros(2, 2, CV_32F); + cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); + cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); - t1 = cv::getTickCount(); + t1 = cv::getTickCount(); - for (size_t i = 0; i < kpts.size(); i++) { - ratio = pow(2.f,kpts[i].octave); - x = fRound(kpts[i].pt.x/ratio); - y = fRound(kpts[i].pt.y/ratio); + for (size_t i = 0; i < kpts.size(); i++) { + ratio = pow(2.f, kpts[i].octave); + x = fRound(kpts[i].pt.x / ratio); + y = fRound(kpts[i].pt.y / ratio); - // Compute the gradient - Dx = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x+1) - -*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x-1)); - Dy = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x) - -*(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x)); + // Compute the gradient + Dx = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) + - *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1)); + Dy = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) + - *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x)); - // Compute the Hessian - Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x+1) - + *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x-1) - -2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + // Compute the Hessian + Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) + + *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1) + - 2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); - Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x) - + *(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x) - -2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) + + *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x) + - 2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); - Dxy = (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x+1) - +(*(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x-1))) - -(0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y-1)+x+1) - +(*(evolution_[kpts[i].class_id].Ldet.ptr(y+1)+x-1))); + Dxy = (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x + 1) + + (*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x - 1))) + - (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x + 1) + + (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x - 1))); - // Solve the linear system - *(A.ptr(0)) = Dxx; - *(A.ptr(1)+1) = Dyy; - *(A.ptr(0)+1) = *(A.ptr(1)) = Dxy; - *(b.ptr(0)) = -Dx; - *(b.ptr(1)) = -Dy; + // Solve the linear system + *(A.ptr(0)) = Dxx; + *(A.ptr(1) + 1) = Dyy; + *(A.ptr(0) + 1) = *(A.ptr(1)) = Dxy; + *(b.ptr(0)) = -Dx; + *(b.ptr(1)) = -Dy; - cv::solve(A, b, dst, DECOMP_LU); + cv::solve(A, b, dst, DECOMP_LU); - if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0) { - kpts[i].pt.x = x + (*(dst.ptr(0))); - kpts[i].pt.y = y + (*(dst.ptr(1))); - kpts[i].pt.x *= powf(2.f,evolution_[kpts[i].class_id].octave); - kpts[i].pt.y *= powf(2.f,evolution_[kpts[i].class_id].octave); - kpts[i].angle = 0.0; + if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0) { + kpts[i].pt.x = x + (*(dst.ptr(0))); + kpts[i].pt.y = y + (*(dst.ptr(1))); + kpts[i].pt.x *= powf(2.f, evolution_[kpts[i].class_id].octave); + kpts[i].pt.y *= powf(2.f, evolution_[kpts[i].class_id].octave); + kpts[i].angle = 0.0; - // In OpenCV the size of a keypoint its the diameter - kpts[i].size *= 2.0; + // In OpenCV the size of a keypoint its the diameter + kpts[i].size *= 2.0; + } + // Delete the point since its not stable + else { + kpts.erase(kpts.begin() + i); + i--; + } } - // Delete the point since its not stable - else { - kpts.erase(kpts.begin()+i); - i--; - } - } - t2 = cv::getTickCount(); - timing_.subpixel = 1000.0*(t2-t1) / cv::getTickFrequency(); + t2 = cv::getTickCount(); + timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -465,49 +466,49 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { * @brief This method performs feature suppression based on 2D distance * @param kpts Vector of keypoints * @param mdist Maximum distance in pixels -*/ + */ void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, float mdist) const { - vector aux; - vector to_delete; - float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; - bool found = false; + vector aux; + vector to_delete; + float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; + bool found = false; - for (size_t i = 0; i < kpts.size(); i++) { - x1 = kpts[i].pt.x; - y1 = kpts[i].pt.y; - for (size_t j = i+1; j < kpts.size(); j++) { - x2 = kpts[j].pt.x; - y2 = kpts[j].pt.y; - dist = sqrt(pow(x1-x2,2)+pow(y1-y2,2)); - if (dist < mdist) { - if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { - to_delete.push_back(j); + for (size_t i = 0; i < kpts.size(); i++) { + x1 = kpts[i].pt.x; + y1 = kpts[i].pt.y; + for (size_t j = i + 1; j < kpts.size(); j++) { + x2 = kpts[j].pt.x; + y2 = kpts[j].pt.y; + dist = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)); + if (dist < mdist) { + if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { + to_delete.push_back(j); + } + else { + to_delete.push_back(i); + break; + } + } } - else { - to_delete.push_back(i); - break; + } + + for (size_t i = 0; i < kpts.size(); i++) { + found = false; + for (size_t j = 0; j < to_delete.size(); j++) { + if (i == (size_t)(to_delete[j])) { + found = true; + break; + } + } + if (found == false) { + aux.push_back(kpts[i]); } - } } - } - for (size_t i = 0; i < kpts.size(); i++) { - found = false; - for (size_t j = 0; j < to_delete.size(); j++) { - if (i == (size_t)(to_delete[j])) { - found = true; - break; - } - } - if (found == false) { - aux.push_back(kpts[i]); - } - } - - kpts.clear(); - kpts = aux; - aux.clear(); + kpts.clear(); + kpts = aux; + aux.clear(); } /* ************************************************************************* */ @@ -515,104 +516,104 @@ void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts * @brief This method computes the set of descriptors through the nonlinear scale space * @param kpts Vector of detected keypoints * @param desc Matrix to store the descriptors -*/ + */ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) { - double t1 = 0.0, t2 = 0.0; + double t1 = 0.0, t2 = 0.0; - t1 = cv::getTickCount(); + t1 = cv::getTickCount(); - // Allocate memory for the matrix with the descriptors - if (options_.descriptor < MLDB_UPRIGHT) { - desc = cv::Mat::zeros(kpts.size(), 64, CV_32FC1); - } - else { - // We use the full length binary descriptor -> 486 bits - if (options_.descriptor_size == 0) { - int t = (6+36+120)*options_.descriptor_channels; - desc = cv::Mat::zeros(kpts.size(), ceil(t/8.), CV_8UC1); + // Allocate memory for the matrix with the descriptors + if (options_.descriptor < MLDB_UPRIGHT) { + desc = cv::Mat::zeros(kpts.size(), 64, CV_32FC1); } else { - // We use the random bit selection length binary descriptor - desc = cv::Mat::zeros(kpts.size(), ceil(options_.descriptor_size/8.), CV_8UC1); + // We use the full length binary descriptor -> 486 bits + if (options_.descriptor_size == 0) { + int t = (6 + 36 + 120)*options_.descriptor_channels; + desc = cv::Mat::zeros(kpts.size(), ceil(t / 8.), CV_8UC1); + } + else { + // We use the random bit selection length binary descriptor + desc = cv::Mat::zeros(kpts.size(), ceil(options_.descriptor_size / 8.), CV_8UC1); + } } - } - switch (options_.descriptor) { + switch (options_.descriptor) { - case SURF_UPRIGHT : // Upright descriptors, not invariant to rotation + case SURF_UPRIGHT: // Upright descriptors, not invariant to rotation { #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Get_SURF_Descriptor_Upright_64(kpts[i],desc.ptr(i)); - } + for (int i = 0; i < (int)(kpts.size()); i++) { + Get_SURF_Descriptor_Upright_64(kpts[i], desc.ptr(i)); + } } - break; - case SURF : + break; + case SURF: { #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Compute_Main_Orientation(kpts[i]); - Get_SURF_Descriptor_64(kpts[i],desc.ptr(i)); - } + for (int i = 0; i < (int)(kpts.size()); i++) { + Compute_Main_Orientation(kpts[i]); + Get_SURF_Descriptor_64(kpts[i], desc.ptr(i)); + } } - break; - case MSURF_UPRIGHT : // Upright descriptors, not invariant to rotation + break; + case MSURF_UPRIGHT: // Upright descriptors, not invariant to rotation { #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Get_MSURF_Upright_Descriptor_64(kpts[i],desc.ptr(i)); - } + for (int i = 0; i < (int)(kpts.size()); i++) { + Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + } } - break; - case MSURF : + break; + case MSURF: { #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Compute_Main_Orientation(kpts[i]); - Get_MSURF_Descriptor_64(kpts[i],desc.ptr(i)); - } + for (int i = 0; i < (int)(kpts.size()); i++) { + Compute_Main_Orientation(kpts[i]); + Get_MSURF_Descriptor_64(kpts[i], desc.ptr(i)); + } } - break; - case MLDB_UPRIGHT : // Upright descriptors, not invariant to rotation + break; + case MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation { #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0; i < (int)(kpts.size()); i++) { - if (options_.descriptor_size == 0) - Get_Upright_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); - else - Get_Upright_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); - } + for (int i = 0; i < (int)(kpts.size()); i++) { + if (options_.descriptor_size == 0) + Get_Upright_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); + else + Get_Upright_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); + } } - break; - case MLDB : + break; + case MLDB: { #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Compute_Main_Orientation(kpts[i]); - if (options_.descriptor_size == 0) - Get_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); - else - Get_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); - } + for (int i = 0; i < (int)(kpts.size()); i++) { + Compute_Main_Orientation(kpts[i]); + if (options_.descriptor_size == 0) + Get_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); + else + Get_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); + } + } + break; } - break; - } - t2 = cv::getTickCount(); - timing_.descriptor = 1000.0*(t2-t1) / cv::getTickFrequency(); + t2 = cv::getTickCount(); + timing_.descriptor = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -621,70 +622,70 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat * @param kpt Input keypoint * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 -*/ + */ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt) const { - int ix = 0, iy = 0, idx = 0, s = 0, level = 0; - float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; - std::vector resX(109), resY(109), Ang(109); - const int id[] = {6,5,4,3,2,1,0,1,2,3,4,5,6}; + int ix = 0, iy = 0, idx = 0, s = 0, level = 0; + float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; + std::vector resX(109), resY(109), Ang(109); + const int id[] = { 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6 }; - // Variables for computing the dominant direction - float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; + // Variables for computing the dominant direction + float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; - // Get the information from the keypoint - level = kpt.class_id; - ratio = (float)(1<(iy)+ix)); - resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); + gweight = gauss25[id[i + 6]][id[j + 6]]; + resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); + resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); - Ang[idx] = get_angle(resX[idx],resY[idx]); - ++idx; - } - } - } - - // Loop slides pi/3 window around feature point - for (ang1 = 0; ang1 < 2.0*CV_PI; ang1+=0.15f) { - ang2 =(ang1+CV_PI/3.0f > 2.0*CV_PI ? ang1-5.0f*CV_PI/3.0f : ang1+CV_PI/3.0f); - sumX = sumY = 0.f; - - for (size_t k = 0; k < Ang.size(); ++k) { - // Get angle from the x-axis of the sample point - const float & ang = Ang[k]; - - // Determine whether the point is within the window - if (ang1 < ang2 && ang1 < ang && ang < ang2) { - sumX+=resX[k]; - sumY+=resY[k]; - } - else if (ang2 < ang1 && - ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI) )) { - sumX+=resX[k]; - sumY+=resY[k]; - } + Ang[idx] = get_angle(resX[idx], resY[idx]); + ++idx; + } + } } - // if the vector produced from this window is longer than all - // previous vectors then this forms the new dominant direction - if (sumX*sumX + sumY*sumY > max) { - // store largest orientation - max = sumX*sumX + sumY*sumY; - kpt.angle = get_angle(sumX, sumY); + // Loop slides pi/3 window around feature point + for (ang1 = 0; ang1 < 2.0*CV_PI; ang1 += 0.15f) { + ang2 = (ang1 + CV_PI / 3.0f > 2.0*CV_PI ? ang1 - 5.0f*CV_PI / 3.0f : ang1 + CV_PI / 3.0f); + sumX = sumY = 0.f; + + for (size_t k = 0; k < Ang.size(); ++k) { + // Get angle from the x-axis of the sample point + const float & ang = Ang[k]; + + // Determine whether the point is within the window + if (ang1 < ang2 && ang1 < ang && ang < ang2) { + sumX += resX[k]; + sumY += resY[k]; + } + else if (ang2 < ang1 && + ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { + sumX += resX[k]; + sumY += resY[k]; + } + } + + // if the vector produced from this window is longer than all + // previous vectors then this forms the new dominant direction + if (sumX*sumX + sumY*sumY > max) { + // store largest orientation + max = sumX*sumX + sumY*sumY; + kpt.angle = get_angle(sumX, sumY); + } } - } } /* ************************************************************************* */ @@ -694,85 +695,85 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt) const { * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 -*/ + */ void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) const { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int scale = 0, dsize = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int scale = 0, dsize = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - ratio = (float)(1<(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Sum the derivatives to the cumulative descriptor - dx += rx; - dy += ry; - mdx += fabs(rx); - mdy += fabs(ry); + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } } /* ************************************************************************* */ @@ -784,92 +785,92 @@ void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, floa * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 -*/ + */ void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int scale = 0, dsize = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; + int scale = 0, dsize = 0, level = 0; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 10; - // Get the information from the keypoint - ratio = (float)(1<(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; - // Sum the derivatives to the cumulative descriptor - dx += rrx; - dy += rry; - mdx += fabs(rrx); - mdy += fabs(rry); + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + desc[dcount++] = dx; + desc[dcount++] = dy; + desc[dcount++] = mdx; + desc[dcount++] = mdy; + + // Store the current length^2 of the vector + len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; } - } - // convert to unit vector - len = sqrt(len); + // convert to unit vector + len = sqrt(len); - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } } /* ************************************************************************* */ @@ -881,116 +882,116 @@ void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 -*/ + */ void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int scale = 0, dsize = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int scale = 0, dsize = 0, level = 0; - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 12; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; - // Get the information from the keypoint - ratio = (float)(1<(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - rx = gauss_s1*rx; - ry = gauss_s1*ry; + rx = gauss_s1*rx; + ry = gauss_s1*ry; - // Sum the derivatives to the cumulative descriptor - dx += rx; - dy += ry; - mdx += fabs(rx); - mdy += fabs(ry); + // Sum the derivatives to the cumulative descriptor + dx += rx; + dy += ry; + mdx += fabs(rx); + mdy += fabs(ry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; } - } - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); - - desc[dcount++] = dx*gauss_s2; - desc[dcount++] = dy*gauss_s2; - desc[dcount++] = mdx*gauss_s2; - desc[dcount++] = mdy*gauss_s2; - - len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; - - j += 9; + i += 9; } - i += 9; - } + // convert to unit vector + len = sqrt(len); - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } } /* ************************************************************************* */ @@ -1002,120 +1003,120 @@ void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, flo * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 -*/ + */ void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; - int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; - int scale = 0, dsize = 0, level = 0; + float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; + float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; + int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; + int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; + int scale = 0, dsize = 0, level = 0; - // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + // Subregion centers for the 4x4 gaussian weighting + float cx = -0.5, cy = 0.5; - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 12; + // Set the descriptor size and the sample and pattern sizes + dsize = 64; + sample_step = 5; + pattern_size = 12; - // Get the information from the keypoint - ratio = (float)(1<(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Lx.ptr(y1)+x1); + res2 = *(evolution_[level].Lx.ptr(y1)+x2); + res3 = *(evolution_[level].Lx.ptr(y2)+x1); + res4 = *(evolution_[level].Lx.ptr(y2)+x2); + rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution_[level].Ly.ptr(y1)+x1); + res2 = *(evolution_[level].Ly.ptr(y1)+x2); + res3 = *(evolution_[level].Ly.ptr(y2)+x1); + res4 = *(evolution_[level].Ly.ptr(y2)+x2); + ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; - // Get the x and y derivatives on the rotated axis - rry = gauss_s1*(rx*co + ry*si); - rrx = gauss_s1*(-rx*si + ry*co); + // Get the x and y derivatives on the rotated axis + rry = gauss_s1*(rx*co + ry*si); + rrx = gauss_s1*(-rx*si + ry*co); - // Sum the derivatives to the cumulative descriptor - dx += rrx; - dy += rry; - mdx += fabs(rrx); - mdy += fabs(rry); + // Sum the derivatives to the cumulative descriptor + dx += rrx; + dy += rry; + mdx += fabs(rrx); + mdy += fabs(rry); + } + } + + // Add the values to the descriptor vector + gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f); + desc[dcount++] = dx*gauss_s2; + desc[dcount++] = dy*gauss_s2; + desc[dcount++] = mdx*gauss_s2; + desc[dcount++] = mdy*gauss_s2; + + len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; + + j += 9; } - } - // Add the values to the descriptor vector - gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); - desc[dcount++] = dx*gauss_s2; - desc[dcount++] = dy*gauss_s2; - desc[dcount++] = mdx*gauss_s2; - desc[dcount++] = mdy*gauss_s2; - - len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; - - j += 9; + i += 9; } - i += 9; - } + // convert to unit vector + len = sqrt(len); - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } + for (int i = 0; i < dsize; i++) { + desc[i] /= len; + } } /* ************************************************************************* */ @@ -1124,212 +1125,212 @@ void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc * the provided keypoint * @param kpt Input keypoint * @param desc Descriptor vector -*/ + */ void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { - float di = 0.0, dx = 0.0, dy = 0.0; - float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, ratio = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int level = 0, nsamples = 0, scale = 0; - int dcount1 = 0, dcount2 = 0; + float di = 0.0, dx = 0.0, dy = 0.0; + float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, ratio = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int level = 0, nsamples = 0, scale = 0; + int dcount1 = 0, dcount2 = 0; - // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); - cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); + // Matrices for the M-LDB descriptor + cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); - // Get the information from the keypoint - ratio = (float)(1<(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); - di += ri; - dx += rx; - dy += ry; - nsamples++; + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_1.ptr(dcount2)) = di; + *(values_1.ptr(dcount2)+1) = dx; + *(values_1.ptr(dcount2)+2) = dy; + dcount2++; } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_1.ptr(dcount2)) = di; - *(values_1.ptr(dcount2)+1) = dx; - *(values_1.ptr(dcount2)+2) = dy; - dcount2++; } - } - // Do binary comparison first level - for(int i = 0; i < 4; i++) { - for (int j = i+1; j < 4; j++) { - if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + // Do binary comparison first level + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; - if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; - if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; - } - } - - // Second 3x3 grid - sample_step = ceil(pattern_size*2./3.); - dcount2 = 0; - - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { - di=dx=dy=0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + l*scale; - sample_x = xf + k*scale; - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); - - di += ri; - dx += rx; - dy += ry; - nsamples++; + if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_2.ptr(dcount2)) = di; - *(values_2.ptr(dcount2)+1) = dx; - *(values_2.ptr(dcount2)+2) = dy; - dcount2++; } - } - //Do binary comparison second level - dcount2 = 0; - for (int i = 0; i < 9; i++) { - for (int j = i+1; j < 9; j++) { - if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + // Second 3x3 grid + sample_step = ceil(pattern_size*2. / 3.); + dcount2 = 0; - if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + di = dx = dy = 0.0; + nsamples = 0; - if(*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; - } - } + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { - // Third 4x4 grid - sample_step = pattern_size/2; - dcount2 = 0; + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { - di=dx=dy=0.0; - nsamples = 0; + y1 = fRound(sample_y); + x1 = fRound(sample_x); - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); - // Get the coordinates of the sample point - sample_y = yf + l*scale; - sample_x = xf + k*scale; + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } - y1 = fRound(sample_y); - x1 = fRound(sample_x); + di /= nsamples; + dx /= nsamples; + dy /= nsamples; - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); - - di += ri; - dx += rx; - dy += ry; - nsamples++; + *(values_2.ptr(dcount2)) = di; + *(values_2.ptr(dcount2)+1) = dx; + *(values_2.ptr(dcount2)+2) = dy; + dcount2++; } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_3.ptr(dcount2)) = di; - *(values_3.ptr(dcount2)+1) = dx; - *(values_3.ptr(dcount2)+2) = dy; - dcount2++; } - } - //Do binary comparison third level - dcount2 = 0; - for (int i = 0; i < 16; i++) { - for (int j = i+1; j < 16; j++) { - if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + //Do binary comparison second level + dcount2 = 0; + for (int i = 0; i < 9; i++) { + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; - if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; - if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + + // Third 4x4 grid + sample_step = pattern_size / 2; + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + + di += ri; + dx += rx; + dy += ry; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_3.ptr(dcount2)) = di; + *(values_3.ptr(dcount2)+1) = dx; + *(values_3.ptr(dcount2)+2) = dy; + dcount2++; + } + } + + //Do binary comparison third level + dcount2 = 0; + for (int i = 0; i < 16; i++) { + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + + if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } } - } } /* ************************************************************************* */ @@ -1338,296 +1339,296 @@ void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, un * main orientation of the keypoint * @param kpt Input keypoint * @param desc Descriptor vector -*/ + */ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { - float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; - float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; - int level = 0, nsamples = 0, scale = 0; - int dcount1 = 0, dcount2 = 0; + float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; + float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; + float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; + int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; + int level = 0, nsamples = 0, scale = 0; + int dcount1 = 0, dcount2 = 0; - // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); - cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); + // Matrices for the M-LDB descriptor + cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); - // Get the information from the keypoint - ratio = (float)(1<(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); - di += ri; + di += ri; - if (options_.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options_.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - dx += rrx; - dy += rry; - } + if (options_.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options_.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } - nsamples++; + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_1.ptr(dcount2)) = di; + if (options_.descriptor_channels > 1) { + *(values_1.ptr(dcount2)+1) = dx; + } + + if (options_.descriptor_channels > 2) { + *(values_1.ptr(dcount2)+2) = dy; + } + + dcount2++; } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_1.ptr(dcount2)) = di; - if (options_.descriptor_channels > 1 ) { - *(values_1.ptr(dcount2)+1) = dx; - } - - if (options_.descriptor_channels > 2 ) { - *(values_1.ptr(dcount2)+2) = dy; - } - - dcount2++; } - } - // Do binary comparison first level - for (int i = 0; i < 4; i++) { - for (int j = i+1; j < 4; j++) { - if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; - } - } - - if (options_.descriptor_channels > 1) { + // Do binary comparison first level for (int i = 0; i < 4; i++) { - for (int j = i+1; j < 4; j++) { - if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { - desc[dcount1/8] |= (1<<(dcount1%8)); + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)) > *(values_1.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; } - - dcount1++; - } } - } - if (options_.descriptor_channels > 2) { - for (int i = 0; i < 4; i++) { - for ( int j = i+1; j < 4; j++) { - if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { - desc[dcount1/8] |= (1<<(dcount1%8)); + if (options_.descriptor_channels > 1) { + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + + dcount1++; + } } - dcount1++; - } } - } - // Second 3x3 grid - sample_step = ceil(pattern_size*2./3.); - dcount2 = 0; - - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { - - di=dx=dy=0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); - di += ri; - - if (options_.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options_.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - dx += rrx; - dy += rry; - } - - nsamples++; + if (options_.descriptor_channels > 2) { + for (int i = 0; i < 4; i++) { + for (int j = i + 1; j < 4; j++) { + if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_2.ptr(dcount2)) = di; - if (options_.descriptor_channels > 1) { - *(values_2.ptr(dcount2)+1) = dx; - } - - if (options_.descriptor_channels > 2) { - *(values_2.ptr(dcount2)+2) = dy; - } - - dcount2++; } - } - // Do binary comparison second level - for (int i = 0; i < 9; i++) { - for (int j = i+1; j < 9; j++) { - if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + // Second 3x3 grid + sample_step = ceil(pattern_size*2. / 3.); + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + di += ri; + + if (options_.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options_.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_2.ptr(dcount2)) = di; + if (options_.descriptor_channels > 1) { + *(values_2.ptr(dcount2)+1) = dx; + } + + if (options_.descriptor_channels > 2) { + *(values_2.ptr(dcount2)+2) = dy; + } + + dcount2++; + } } - } - if (options_.descriptor_channels > 1) { + // Do binary comparison second level for (int i = 0; i < 9; i++) { - for (int j = i+1; j < 9; j++) { - if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { - desc[dcount1/8] |= (1<<(dcount1%8)); + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)) > *(values_2.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; } - dcount1++; - } } - } - if (options_.descriptor_channels > 2) { - for (int i = 0; i < 9; i++) { - for (int j = i+1; j < 9; j++) { - if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { - desc[dcount1/8] |= (1<<(dcount1%8)); + if (options_.descriptor_channels > 1) { + for (int i = 0; i < 9; i++) { + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } } - dcount1++; - } } - } - // Third 4x4 grid - sample_step = pattern_size/2; - dcount2 = 0; - - for (int i = -pattern_size; i < pattern_size; i+=sample_step) { - for (int j = -pattern_size; j < pattern_size; j+=sample_step) { - di=dx=dy=0.0; - nsamples = 0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = fRound(sample_y); - x1 = fRound(sample_x); - - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); - di += ri; - - if (options_.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options_.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - dx += rrx; - dy += rry; - } - - nsamples++; + if (options_.descriptor_channels > 2) { + for (int i = 0; i < 9; i++) { + for (int j = i + 1; j < 9; j++) { + if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } } - } - - di /= nsamples; - dx /= nsamples; - dy /= nsamples; - - *(values_3.ptr(dcount2)) = di; - if (options_.descriptor_channels > 1) - *(values_3.ptr(dcount2)+1) = dx; - - if (options_.descriptor_channels > 2) - *(values_3.ptr(dcount2)+2) = dy; - - dcount2++; } - } - // Do binary comparison third level - for(int i = 0; i < 16; i++) { - for(int j = i+1; j < 16; j++) { - if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { - desc[dcount1/8] |= (1<<(dcount1%8)); - } - dcount1++; + // Third 4x4 grid + sample_step = pattern_size / 2; + dcount2 = 0; + + for (int i = -pattern_size; i < pattern_size; i += sample_step) { + for (int j = -pattern_size; j < pattern_size; j += sample_step) { + di = dx = dy = 0.0; + nsamples = 0; + + for (int k = i; k < i + sample_step; k++) { + for (int l = j; l < j + sample_step; l++) { + + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); + + y1 = fRound(sample_y); + x1 = fRound(sample_x); + + ri = *(evolution_[level].Lt.ptr(y1)+x1); + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); + di += ri; + + if (options_.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options_.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + rry = rx*co + ry*si; + rrx = -rx*si + ry*co; + dx += rrx; + dy += rry; + } + + nsamples++; + } + } + + di /= nsamples; + dx /= nsamples; + dy /= nsamples; + + *(values_3.ptr(dcount2)) = di; + if (options_.descriptor_channels > 1) + *(values_3.ptr(dcount2)+1) = dx; + + if (options_.descriptor_channels > 2) + *(values_3.ptr(dcount2)+2) = dy; + + dcount2++; + } } - } - if (options_.descriptor_channels > 1) { + // Do binary comparison third level for (int i = 0; i < 16; i++) { - for (int j = i+1; j < 16; j++) { - if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { - desc[dcount1/8] |= (1<<(dcount1%8)); + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)) > *(values_3.ptr(j))) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; } - dcount1++; - } } - } - if (options_.descriptor_channels > 2) { - for (int i = 0; i < 16; i++) { - for (int j = i+1; j < 16; j++) { - if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { - desc[dcount1/8] |= (1<<(dcount1%8)); + if (options_.descriptor_channels > 1) { + for (int i = 0; i < 16; i++) { + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } + } + } + + if (options_.descriptor_channels > 2) { + for (int i = 0; i < 16; i++) { + for (int j = i + 1; j < 16; j++) { + if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { + desc[dcount1 / 8] |= (1 << (dcount1 % 8)); + } + dcount1++; + } } - dcount1++; - } } - } } /* ************************************************************************* */ @@ -1637,88 +1638,88 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c * the bits of the whole descriptor * @param kpt Input keypoint * @param desc Descriptor vector -*/ + */ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { - float di = 0.f, dx = 0.f, dy = 0.f; - float rx = 0.f, ry = 0.f; - float sample_x = 0.f, sample_y = 0.f; - int x1 = 0, y1 = 0; + float di = 0.f, dx = 0.f, dy = 0.f; + float rx = 0.f, ry = 0.f; + float sample_x = 0.f, sample_y = 0.f; + int x1 = 0, y1 = 0; - // Get the information from the keypoint - float ratio = (float)(1<::zeros((4+9+16)*options_.descriptor_channels, 1); + // Allocate memory for the matrix of values + cv::Mat values = cv::Mat_::zeros((4 + 9 + 16)*options_.descriptor_channels, 1); - // Sample everything, but only do the comparisons - vector steps(3); - steps.at(0) = options_.descriptor_pattern_size; - steps.at(1) = ceil(2.f*options_.descriptor_pattern_size/3.f); - steps.at(2) = options_.descriptor_pattern_size/2; + // Sample everything, but only do the comparisons + vector steps(3); + steps.at(0) = options_.descriptor_pattern_size; + steps.at(1) = ceil(2.f*options_.descriptor_pattern_size / 3.f); + steps.at(2) = options_.descriptor_pattern_size / 2; - for (int i=0; i < descriptorSamples_.rows; i++) { - int *coords = descriptorSamples_.ptr(i); - int sample_step = steps.at(coords[0]); - di=0.0f; - dx=0.0f; - dy=0.0f; + for (int i = 0; i < descriptorSamples_.rows; i++) { + int *coords = descriptorSamples_.ptr(i); + int sample_step = steps.at(coords[0]); + di = 0.0f; + dx = 0.0f; + dy = 0.0f; - for (int k = coords[1]; k < coords[1] + sample_step; k++) { - for (int l = coords[2]; l < coords[2] + sample_step; l++) { + for (int k = coords[1]; k < coords[1] + sample_step; k++) { + for (int l = coords[2]; l < coords[2] + sample_step; l++) { - // Get the coordinates of the sample point - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); + // Get the coordinates of the sample point + sample_y = yf + (l*scale*co + k*scale*si); + sample_x = xf + (-l*scale*si + k*scale*co); - y1 = fRound(sample_y); - x1 = fRound(sample_x); + y1 = fRound(sample_y); + x1 = fRound(sample_x); - di += *(evolution_[level].Lt.ptr(y1)+x1); + di += *(evolution_[level].Lt.ptr(y1)+x1); - if (options_.descriptor_channels > 1) { - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + if (options_.descriptor_channels > 1) { + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); - if (options_.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options_.descriptor_channels == 3) { - // Get the x and y derivatives on the rotated axis - dx += rx*co + ry*si; - dy += -rx*si + ry*co; - } + if (options_.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options_.descriptor_channels == 3) { + // Get the x and y derivatives on the rotated axis + dx += rx*co + ry*si; + dy += -rx*si + ry*co; + } + } + } + } + + *(values.ptr(options_.descriptor_channels*i)) = di; + + if (options_.descriptor_channels == 2) { + *(values.ptr(options_.descriptor_channels*i + 1)) = dx; + } + else if (options_.descriptor_channels == 3) { + *(values.ptr(options_.descriptor_channels*i + 1)) = dx; + *(values.ptr(options_.descriptor_channels*i + 2)) = dy; } - } } - *(values.ptr(options_.descriptor_channels*i)) = di; + // Do the comparisons + const float *vals = values.ptr(0); + const int *comps = descriptorBits_.ptr(0); - if (options_.descriptor_channels == 2) { - *(values.ptr(options_.descriptor_channels*i+1)) = dx; + for (int i = 0; i vals[comps[2 * i + 1]]) { + desc[i / 8] |= (1 << (i % 8)); + } } - else if (options_.descriptor_channels == 3) { - *(values.ptr(options_.descriptor_channels*i+1)) = dx; - *(values.ptr(options_.descriptor_channels*i+2)) = dy; - } - } - - // Do the comparisons - const float *vals = values.ptr(0); - const int *comps = descriptorBits_.ptr(0); - - for (int i=0; i vals[comps[2*i +1]]) { - desc[i/8] |= (1<<(i%8)); - } - } } /* ************************************************************************* */ @@ -1728,80 +1729,80 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned * The descriptor is computed based on a subset of the bits of the whole descriptor * @param kpt Input keypoint * @param desc Descriptor vector -*/ + */ void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { - float di = 0.0f, dx = 0.0f, dy = 0.0f; - float rx = 0.0f, ry = 0.0f; - float sample_x = 0.0f, sample_y = 0.0f; - int x1 = 0, y1 = 0; + float di = 0.0f, dx = 0.0f, dy = 0.0f; + float rx = 0.0f, ry = 0.0f; + float sample_x = 0.0f, sample_y = 0.0f; + int x1 = 0, y1 = 0; - // Get the information from the keypoint - float ratio = (float)(1<::zeros((4+9+16)*options_.descriptor_channels, 1); + // Allocate memory for the matrix of values + Mat values = cv::Mat_::zeros((4 + 9 + 16)*options_.descriptor_channels, 1); - vector steps(3); - steps.at(0) = options_.descriptor_pattern_size; - steps.at(1) = ceil(2.f*options_.descriptor_pattern_size/3.f); - steps.at(2) = options_.descriptor_pattern_size/2; + vector steps(3); + steps.at(0) = options_.descriptor_pattern_size; + steps.at(1) = ceil(2.f*options_.descriptor_pattern_size / 3.f); + steps.at(2) = options_.descriptor_pattern_size / 2; - for (int i=0; i < descriptorSamples_.rows; i++) { - int *coords = descriptorSamples_.ptr(i); - int sample_step = steps.at(coords[0]); - di=0.0f, dx=0.0f, dy=0.0f; + for (int i = 0; i < descriptorSamples_.rows; i++) { + int *coords = descriptorSamples_.ptr(i); + int sample_step = steps.at(coords[0]); + di = 0.0f, dx = 0.0f, dy = 0.0f; - for (int k = coords[1]; k < coords[1] + sample_step; k++) { - for (int l = coords[2]; l < coords[2] + sample_step; l++) { + for (int k = coords[1]; k < coords[1] + sample_step; k++) { + for (int l = coords[2]; l < coords[2] + sample_step; l++) { - // Get the coordinates of the sample point - sample_y = yf + l*scale; - sample_x = xf + k*scale; + // Get the coordinates of the sample point + sample_y = yf + l*scale; + sample_x = xf + k*scale; - y1 = fRound(sample_y); - x1 = fRound(sample_x); - di += *(evolution_[level].Lt.ptr(y1)+x1); + y1 = fRound(sample_y); + x1 = fRound(sample_x); + di += *(evolution_[level].Lt.ptr(y1)+x1); - if (options_.descriptor_channels > 1) { - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + if (options_.descriptor_channels > 1) { + rx = *(evolution_[level].Lx.ptr(y1)+x1); + ry = *(evolution_[level].Ly.ptr(y1)+x1); - if (options_.descriptor_channels == 2) { - dx += sqrtf(rx*rx + ry*ry); - } - else if (options_.descriptor_channels == 3) { - dx += rx; - dy += ry; - } + if (options_.descriptor_channels == 2) { + dx += sqrtf(rx*rx + ry*ry); + } + else if (options_.descriptor_channels == 3) { + dx += rx; + dy += ry; + } + } + } + } + + *(values.ptr(options_.descriptor_channels*i)) = di; + + if (options_.descriptor_channels == 2) { + *(values.ptr(options_.descriptor_channels*i + 1)) = dx; + } + else if (options_.descriptor_channels == 3) { + *(values.ptr(options_.descriptor_channels*i + 1)) = dx; + *(values.ptr(options_.descriptor_channels*i + 2)) = dy; } - } } - *(values.ptr(options_.descriptor_channels*i)) = di; + // Do the comparisons + const float *vals = values.ptr(0); + const int *comps = descriptorBits_.ptr(0); - if (options_.descriptor_channels == 2) { - *(values.ptr(options_.descriptor_channels*i+1)) = dx; + for (int i = 0; i vals[comps[2 * i + 1]]) { + desc[i / 8] |= (1 << (i % 8)); + } } - else if (options_.descriptor_channels == 3) { - *(values.ptr(options_.descriptor_channels*i+1)) = dx; - *(values.ptr(options_.descriptor_channels*i+2)) = dy; - } - } - - // Do the comparisons - const float *vals = values.ptr(0); - const int *comps = descriptorBits_.ptr(0); - - for (int i=0; i vals[comps[2*i +1]]) { - desc[i/8] |= (1<<(i%8)); - } - } } @@ -1809,15 +1810,15 @@ void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, /* ************************************************************************* */ /** * @brief This method displays the computation times -*/ + */ void AKAZEFeatures::Show_Computation_Times() const { - cout << "(*) Time Scale Space: " << timing_.scale << endl; - cout << "(*) Time Detector: " << timing_.detector << endl; - cout << " - Time Derivatives: " << timing_.derivatives << endl; - cout << " - Time Extrema: " << timing_.extrema << endl; - cout << " - Time Subpixel: " << timing_.subpixel << endl; - cout << "(*) Time Descriptor: " << timing_.descriptor << endl; - cout << endl; + cout << "(*) Time Scale Space: " << timing_.scale << endl; + cout << "(*) Time Detector: " << timing_.detector << endl; + cout << " - Time Derivatives: " << timing_.derivatives << endl; + cout << " - Time Extrema: " << timing_.extrema << endl; + cout << " - Time Subpixel: " << timing_.subpixel << endl; + cout << "(*) Time Descriptor: " << timing_.descriptor << endl; + cout << endl; } /* ************************************************************************* */ @@ -1835,131 +1836,131 @@ void AKAZEFeatures::Show_Computation_Times() const { * coarser grid, since it provides the most robust estimations */ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int nbits, - int pattern_size, int nchannels) { + int pattern_size, int nchannels) { - int ssz = 0; - for (int i=0; i<3; i++) { - int gz = (i+2)*(i+2); - ssz += gz*(gz-1)/2; - } - ssz *= nchannels; - - CV_Assert(nbits<=ssz && "descriptor size can't be bigger than full descriptor"); - - // Since the full descriptor is usually under 10k elements, we pick - // the selection from the full matrix. We take as many samples per - // pick as the number of channels. For every pick, we - // take the two samples involved and put them in the sampling list - - Mat_ fullM(ssz/nchannels,5); - for (size_t i=0, c=0; i<3; i++) { - int gdiv = i+2; //grid divisions, per row - int gsz = gdiv*gdiv; - int psz = ceil(2.*pattern_size/(float)gdiv); - - for (int j=0; j comps = Mat_(nchannels*ceil(nbits/(float)nchannels),2); - comps = 1000; + CV_Assert(nbits <= ssz && "descriptor size can't be bigger than full descriptor"); - // Select some samples. A sample includes all channels - int count =0; - size_t npicks = ceil(nbits/(float)nchannels); - Mat_ samples(29,3); - Mat_ fullcopy = fullM.clone(); - samples = -1; + // Since the full descriptor is usually under 10k elements, we pick + // the selection from the full matrix. We take as many samples per + // pick as the number of channels. For every pick, we + // take the two samples involved and put them in the sampling list - for (size_t i=0; i fullM(ssz / nchannels, 5); + for (size_t i = 0, c = 0; i < 3; i++) { + int gdiv = i + 2; //grid divisions, per row + int gsz = gdiv*gdiv; + int psz = ceil(2.*pattern_size / (float)gdiv); + + for (int j = 0; j < gsz; j++) { + for (int k = j + 1; k < gsz; k++, c++) { + fullM(c, 0) = i; + fullM(c, 1) = psz*(j % gdiv) - pattern_size; + fullM(c, 2) = psz*(j / gdiv) - pattern_size; + fullM(c, 3) = psz*(k % gdiv) - pattern_size; + fullM(c, 4) = psz*(k / gdiv) - pattern_size; + } + } } - bool n = true; + srand(1024); + Mat_ comps = Mat_(nchannels*ceil(nbits / (float)nchannels), 2); + comps = 1000; - for (int j=0; j samples(29, 3); + Mat_ fullcopy = fullM.clone(); + samples = -1; + + for (size_t i = 0; i < npicks; i++) { + size_t k = rand() % (fullM.rows - i); + if (i < 6) { + // Force use of the coarser grid values and comparisons + k = i; + } + + bool n = true; + + for (int j = 0; j < count; j++) { + if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 1) && samples(j, 2) == fullcopy(k, 2)) { + n = false; + comps(i*nchannels, 0) = nchannels*j; + comps(i*nchannels + 1, 0) = nchannels*j + 1; + comps(i*nchannels + 2, 0) = nchannels*j + 2; + break; + } + } + + if (n) { + samples(count, 0) = fullcopy(k, 0); + samples(count, 1) = fullcopy(k, 1); + samples(count, 2) = fullcopy(k, 2); + comps(i*nchannels, 0) = nchannels*count; + comps(i*nchannels + 1, 0) = nchannels*count + 1; + comps(i*nchannels + 2, 0) = nchannels*count + 2; + count++; + } + + n = true; + for (int j = 0; j < count; j++) { + if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 3) && samples(j, 2) == fullcopy(k, 4)) { + n = false; + comps(i*nchannels, 1) = nchannels*j; + comps(i*nchannels + 1, 1) = nchannels*j + 1; + comps(i*nchannels + 2, 1) = nchannels*j + 2; + break; + } + } + + if (n) { + samples(count, 0) = fullcopy(k, 0); + samples(count, 1) = fullcopy(k, 3); + samples(count, 2) = fullcopy(k, 4); + comps(i*nchannels, 1) = nchannels*count; + comps(i*nchannels + 1, 1) = nchannels*count + 1; + comps(i*nchannels + 2, 1) = nchannels*count + 2; + count++; + } + + Mat tmp = fullcopy.row(k); + fullcopy.row(fullcopy.rows - i - 1).copyTo(tmp); } - if (n) { - samples(count,0) = fullcopy(k,0); - samples(count,1) = fullcopy(k,1); - samples(count,2) = fullcopy(k,2); - comps(i*nchannels,0) = nchannels*count; - comps(i*nchannels+1,0) = nchannels*count+1; - comps(i*nchannels+2,0) = nchannels*count+2; - count++; - } - - n = true; - for (int j=0; j= 0 && y >= 0) { - return atanf(y/x); - } + if (x >= 0 && y >= 0) { + return atanf(y / x); + } - if (x < 0 && y >= 0) { - return CV_PI - atanf(-y/x); - } + if (x < 0 && y >= 0) { + return CV_PI - atanf(-y / x); + } - if (x < 0 && y < 0) { - return CV_PI + atanf(y/x); - } + if (x < 0 && y < 0) { + return CV_PI + atanf(y / x); + } - if(x >= 0 && y < 0) { - return 2.0*CV_PI - atanf(-y/x); - } + if (x >= 0 && y < 0) { + return 2.0*CV_PI - atanf(-y / x); + } - return 0; + return 0; } /* ************************************************************************* */ @@ -1968,9 +1969,9 @@ inline float get_angle(float x, float y) { * @param x X Position * @param y Y Position * @param sig Standard Deviation -*/ + */ inline float gaussian(float x, float y, float sigma) { - return expf(-(x*x+y*y)/(2.0f*sigma*sigma)); + return expf(-(x*x + y*y) / (2.0f*sigma*sigma)); } /* ************************************************************************* */ @@ -1980,24 +1981,24 @@ inline float gaussian(float x, float y, float sigma) { * @param y Y Position * @param width Image width * @param height Image height -*/ + */ inline void check_descriptor_limits(int &x, int &y, int width, int height) { - if (x < 0) { - x = 0; - } + if (x < 0) { + x = 0; + } - if (y < 0) { - y = 0; - } + if (y < 0) { + y = 0; + } - if (x > width-1) { - x = width-1; - } + if (x > width - 1) { + x = width - 1; + } - if (y > height-1) { - y = height-1; - } + if (y > height - 1) { + y = height - 1; + } } /* ************************************************************************* */ @@ -2007,6 +2008,6 @@ inline void check_descriptor_limits(int &x, int &y, int width, int height) { * @return dst Nearest integer */ inline int fRound(float flt) { - return (int)(flt+0.5f); + return (int)(flt + 0.5f); } From d8c9bb777ef4a4a9c4eed522d7dde7aba21cbaa8 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 26 Apr 2014 23:34:07 +0300 Subject: [PATCH 12/52] Fix return value of descriptorType() --- modules/features2d/src/akaze.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 8cba3b6d20..2083ce8d7f 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -45,11 +45,11 @@ namespace cv { if (descriptor < MLDB_UPRIGHT) { - return CV_32FC1; + return CV_32F; } else { - return CV_8UC1; + return CV_8U; } } From d37220e8fff44fa3bb6eea839620b18f0cb46b3a Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 26 Apr 2014 23:34:38 +0300 Subject: [PATCH 13/52] Clean-up of the iostream manipulator in AKAZEConfig. --- modules/features2d/src/akaze/AKAZEConfig.h | 221 +++++++++------------ 1 file changed, 93 insertions(+), 128 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZEConfig.h b/modules/features2d/src/akaze/AKAZEConfig.h index d82b0f4271..7fed80e2ce 100644 --- a/modules/features2d/src/akaze/AKAZEConfig.h +++ b/modules/features2d/src/akaze/AKAZEConfig.h @@ -11,173 +11,138 @@ // OpenCV #include "precomp.hpp" -// System Includes -#include -#include -#include -#include -#include - /* ************************************************************************* */ /// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right const float gauss25[7][7] = { - {0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f}, - {0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f}, - {0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f}, - {0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f}, - {0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f}, - {0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f}, - {0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f} + { 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f }, + { 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f }, + { 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f }, + { 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f }, + { 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f }, + { 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f }, + { 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f } }; /* ************************************************************************* */ /// AKAZE Descriptor Type enum DESCRIPTOR_TYPE { - SURF_UPRIGHT = 0, ///< Upright descriptors, not invariant to rotation - SURF = 1, - MSURF_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation - MSURF = 3, - MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation - MLDB = 5 + SURF_UPRIGHT = 0, ///< Upright descriptors, not invariant to rotation + SURF = 1, + MSURF_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + MSURF = 3, + MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + MLDB = 5 }; /* ************************************************************************* */ /// AKAZE Diffusivities enum DIFFUSIVITY_TYPE { - PM_G1 = 0, - PM_G2 = 1, - WEICKERT = 2, - CHARBONNIER = 3 + PM_G1 = 0, + PM_G2 = 1, + WEICKERT = 2, + CHARBONNIER = 3 }; /* ************************************************************************* */ /// AKAZE Timing structure struct AKAZETiming { - AKAZETiming() { - kcontrast = 0.0; - scale = 0.0; - derivatives = 0.0; - detector = 0.0; - extrema = 0.0; - subpixel = 0.0; - descriptor = 0.0; - } + AKAZETiming() { + kcontrast = 0.0; + scale = 0.0; + derivatives = 0.0; + detector = 0.0; + extrema = 0.0; + subpixel = 0.0; + descriptor = 0.0; + } - double kcontrast; ///< Contrast factor computation time in ms - double scale; ///< Nonlinear scale space computation time in ms - double derivatives; ///< Multiscale derivatives computation time in ms - double detector; ///< Feature detector computation time in ms - double extrema; ///< Scale space extrema computation time in ms - double subpixel; ///< Subpixel refinement computation time in ms - double descriptor; ///< Descriptors computation time in ms + double kcontrast; ///< Contrast factor computation time in ms + double scale; ///< Nonlinear scale space computation time in ms + double derivatives; ///< Multiscale derivatives computation time in ms + double detector; ///< Feature detector computation time in ms + double extrema; ///< Scale space extrema computation time in ms + double subpixel; ///< Subpixel refinement computation time in ms + double descriptor; ///< Descriptors computation time in ms }; /* ************************************************************************* */ /// AKAZE configuration options structure struct AKAZEOptions { - AKAZEOptions() { - soffset = 1.6f; - derivative_factor = 1.5f; - omax = 4; - nsublevels = 4; - dthreshold = 0.001f; - min_dthreshold = 0.00001f; + AKAZEOptions() { + soffset = 1.6f; + derivative_factor = 1.5f; + omax = 4; + nsublevels = 4; + dthreshold = 0.001f; + min_dthreshold = 0.00001f; - diffusivity = PM_G2; - descriptor = MLDB; - descriptor_size = 0; - descriptor_channels = 3; - descriptor_pattern_size = 10; - sderivatives = 1.0; + diffusivity = PM_G2; + descriptor = MLDB; + descriptor_size = 0; + descriptor_channels = 3; + descriptor_pattern_size = 10; + sderivatives = 1.0; - kcontrast = 0.001f; - kcontrast_percentile = 0.7f; - kcontrast_nbins = 300; + kcontrast = 0.001f; + kcontrast_percentile = 0.7f; + kcontrast_nbins = 300; - save_scale_space = false; - save_keypoints = false; - verbosity = false; - } + save_scale_space = false; + save_keypoints = false; + verbosity = false; + } - int omin; ///< Initial octave level (-1 means that the size of the input image is duplicated) - int omax; ///< Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) - int nsublevels; ///< Default number of sublevels per scale level - int img_width; ///< Width of the input image - int img_height; ///< Height of the input image - float soffset; ///< Base scale offset (sigma units) - float derivative_factor; ///< Factor for the multiscale derivatives - float sderivatives; ///< Smoothing factor for the derivatives - DIFFUSIVITY_TYPE diffusivity; ///< Diffusivity type + int omin; ///< Initial octave level (-1 means that the size of the input image is duplicated) + int omax; ///< Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) + int nsublevels; ///< Default number of sublevels per scale level + int img_width; ///< Width of the input image + int img_height; ///< Height of the input image + float soffset; ///< Base scale offset (sigma units) + float derivative_factor; ///< Factor for the multiscale derivatives + float sderivatives; ///< Smoothing factor for the derivatives + DIFFUSIVITY_TYPE diffusivity; ///< Diffusivity type - float dthreshold; ///< Detector response threshold to accept point - float min_dthreshold; ///< Minimum detector threshold to accept a point + float dthreshold; ///< Detector response threshold to accept point + float min_dthreshold; ///< Minimum detector threshold to accept a point - DESCRIPTOR_TYPE descriptor; ///< Type of descriptor - int descriptor_size; ///< Size of the descriptor in bits. 0->Full size - int descriptor_channels; ///< Number of channels in the descriptor (1, 2, 3) - int descriptor_pattern_size; ///< Actual patch size is 2*pattern_size*point.scale + DESCRIPTOR_TYPE descriptor; ///< Type of descriptor + int descriptor_size; ///< Size of the descriptor in bits. 0->Full size + int descriptor_channels; ///< Number of channels in the descriptor (1, 2, 3) + int descriptor_pattern_size; ///< Actual patch size is 2*pattern_size*point.scale - float kcontrast; ///< The contrast factor parameter - float kcontrast_percentile; ///< Percentile level for the contrast factor - size_t kcontrast_nbins; ///< Number of bins for the contrast factor histogram + float kcontrast; ///< The contrast factor parameter + float kcontrast_percentile; ///< Percentile level for the contrast factor + size_t kcontrast_nbins; ///< Number of bins for the contrast factor histogram - bool save_scale_space; ///< Set to true for saving the scale space images - bool save_keypoints; ///< Set to true for saving the detected keypoints and descriptors - bool verbosity; ///< Set to true for displaying verbosity information - - friend std::ostream& operator<<(std::ostream& os, - const AKAZEOptions& akaze_options) { - - os << std::left; -#define CHECK_AKAZE_OPTION(option) \ - os << std::setw(33) << #option << " = " << option << std::endl - - // Scale-space parameters. - CHECK_AKAZE_OPTION(akaze_options.omax); - CHECK_AKAZE_OPTION(akaze_options.nsublevels); - CHECK_AKAZE_OPTION(akaze_options.soffset); - CHECK_AKAZE_OPTION(akaze_options.sderivatives); - CHECK_AKAZE_OPTION(akaze_options.diffusivity); - // Detection parameters. - CHECK_AKAZE_OPTION(akaze_options.dthreshold); - // Descriptor parameters. - CHECK_AKAZE_OPTION(akaze_options.descriptor); - CHECK_AKAZE_OPTION(akaze_options.descriptor_channels); - CHECK_AKAZE_OPTION(akaze_options.descriptor_size); - // Save scale-space - CHECK_AKAZE_OPTION(akaze_options.save_scale_space); - // Verbose option for debug. - CHECK_AKAZE_OPTION(akaze_options.verbosity); -#undef CHECK_AKAZE_OPTIONS - - return os; - } + bool save_scale_space; ///< Set to true for saving the scale space images + bool save_keypoints; ///< Set to true for saving the detected keypoints and descriptors + bool verbosity; ///< Set to true for displaying verbosity information }; /* ************************************************************************* */ /// AKAZE nonlinear diffusion filtering evolution struct TEvolution { - TEvolution() { - etime = 0.0f; - esigma = 0.0f; - octave = 0; - sublevel = 0; - sigma_size = 0; - } + TEvolution() { + etime = 0.0f; + esigma = 0.0f; + octave = 0; + sublevel = 0; + sigma_size = 0; + } - cv::Mat Lx, Ly; // First order spatial derivatives - cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives - cv::Mat Lflow; // Diffusivity image - cv::Mat Lt; // Evolution image - cv::Mat Lsmooth; // Smoothed image - cv::Mat Lstep; // Evolution step update - cv::Mat Ldet; // Detector response - float etime; // Evolution time - float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 - size_t octave; // Image octave - size_t sublevel; // Image sublevel in each octave - size_t sigma_size; // Integer sigma. For computing the feature detector responses + cv::Mat Lx, Ly; // First order spatial derivatives + cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives + cv::Mat Lflow; // Diffusivity image + cv::Mat Lt; // Evolution image + cv::Mat Lsmooth; // Smoothed image + cv::Mat Lstep; // Evolution step update + cv::Mat Ldet; // Detector response + float etime; // Evolution time + float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 + size_t octave; // Image octave + size_t sublevel; // Image sublevel in each octave + size_t sigma_size; // Integer sigma. For computing the feature detector responses }; \ No newline at end of file From 503bd3896384c74c78ab17877b0a72c7dc4228da Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sun, 27 Apr 2014 22:28:18 +0300 Subject: [PATCH 14/52] Enable multithreaded extraction of features using cv::parallel_for_ --- modules/features2d/src/akaze/AKAZE.cpp | 409 +++++++++++++++++++++---- modules/features2d/src/akaze/AKAZE.h | 19 +- 2 files changed, 359 insertions(+), 69 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 617a16d4ee..a61971a910 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -194,6 +194,43 @@ void AKAZEFeatures::Feature_Detection(std::vector& kpts) { } /* ************************************************************************* */ + +class MultiscaleDerivativesInvoker : public cv::ParallelLoopBody +{ +public: + explicit MultiscaleDerivativesInvoker(std::vector& ev, const AKAZEOptions& opt) + : evolution_(ev) + , options_(opt) + { + } + + + void operator()(const cv::Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + float ratio = pow(2.f, (float)evolution_[i].octave); + int sigma_size_ = fRound(evolution_[i].esigma * options_.derivative_factor / ratio); + + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_); + + evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); + evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); + evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_)); + evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); + evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); + } + } + +private: + mutable std::vector & evolution_; + AKAZEOptions options_; +}; + /** * @brief This method computes the multiscale derivatives for the nonlinear scale space */ @@ -203,9 +240,8 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { t1 = cv::getTickCount(); -#ifdef _OPENMP -#pragma omp parallel for -#endif + cv::parallel_for_(cv::Range(0, evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_)); + /* for (int i = 0; i < (int)(evolution_.size()); i++) { float ratio = pow(2.f, (float)evolution_[i].octave); @@ -223,7 +259,7 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); } - + */ t2 = cv::getTickCount(); timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency(); } @@ -512,6 +548,259 @@ void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts } /* ************************************************************************* */ + +class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody +{ +public: + SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_SURF_Descriptor_Upright_64(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; +}; + +class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody +{ +public: + SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + { + } + + void operator()(const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); + Get_SURF_Descriptor_64(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; +}; + +class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody +{ +public: + MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + { + } + + void operator()(const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_MSURF_Upright_Descriptor_64(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; + +}; + +class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody +{ +public: + MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); + Get_MSURF_Descriptor_64(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; + +}; + +class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody +{ +public: + Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_Upright_MLDB_Full_Descriptor(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; + +}; + +class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody +{ +public: + Upright_MLDB_Descriptor_Subset_Invoker(std::vector& kpts, + cv::Mat& desc, + const std::vector& evolution, + const AKAZEOptions& options, + cv::Mat descriptorSamples, + cv::Mat descriptorBits) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + , descriptorSamples_(descriptorSamples) + , descriptorBits_(descriptorBits) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + Get_Upright_MLDB_Descriptor_Subset(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; + + cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + cv::Mat descriptorBits_; +}; + +class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody +{ +public: + MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); + Get_MLDB_Full_Descriptor(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; + +}; + +class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody +{ +public: + MLDB_Descriptor_Subset_Invoker(std::vector& kpts, + cv::Mat& desc, + const std::vector& evolution, + const AKAZEOptions& options, + cv::Mat descriptorSamples, + cv::Mat descriptorBits) + : evolution_(evolution) + , options_(options) + , keypoints_(kpts) + , descriptors_(desc) + , descriptorSamples_(descriptorSamples) + , descriptorBits_(descriptorBits) + { + } + + void operator() (const Range& range) const + { + for (int i = range.start; i < range.end; i++) + { + AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); + Get_MLDB_Descriptor_Subset(keypoints_[i], descriptors_.ptr(i)); + } + } + + void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; + +private: + std::vector& keypoints_; + cv::Mat& descriptors_; + const std::vector& evolution_; + const AKAZEOptions& options_; + + cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. + cv::Mat descriptorBits_; +}; + /** * @brief This method computes the set of descriptors through the nonlinear scale space * @param kpts Vector of detected keypoints @@ -543,71 +832,71 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat case SURF_UPRIGHT: // Upright descriptors, not invariant to rotation { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Get_SURF_Descriptor_Upright_64(kpts[i], desc.ptr(i)); - } + cv::parallel_for_(cv::Range(0, kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_, options_)); + + //for (int i = 0; i < (int)(kpts.size()); i++) { + // Get_SURF_Descriptor_Upright_64(kpts[i], desc.ptr(i)); + //} } break; case SURF: { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Compute_Main_Orientation(kpts[i]); - Get_SURF_Descriptor_64(kpts[i], desc.ptr(i)); - } + cv::parallel_for_(cv::Range(0, kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + + //for (int i = 0; i < (int)(kpts.size()); i++) { + // Compute_Main_Orientation(kpts[i]); + // Get_SURF_Descriptor_64(kpts[i], desc.ptr(i)); + //} } break; case MSURF_UPRIGHT: // Upright descriptors, not invariant to rotation { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); - } + cv::parallel_for_(cv::Range(0, kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + + //for (int i = 0; i < (int)(kpts.size()); i++) { + // Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + //} } break; case MSURF: { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Compute_Main_Orientation(kpts[i]); - Get_MSURF_Descriptor_64(kpts[i], desc.ptr(i)); - } + cv::parallel_for_(cv::Range(0, kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + + //for (int i = 0; i < (int)(kpts.size()); i++) { + // Compute_Main_Orientation(kpts[i]); + // Get_MSURF_Descriptor_64(kpts[i], desc.ptr(i)); + //} } break; case MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (int i = 0; i < (int)(kpts.size()); i++) { - if (options_.descriptor_size == 0) - Get_Upright_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); - else - Get_Upright_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); - } + if (options_.descriptor_size == 0) + cv::parallel_for_(cv::Range(0, kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + else + cv::parallel_for_(cv::Range(0, kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + + //for (int i = 0; i < (int)(kpts.size()); i++) { + // if (options_.descriptor_size == 0) + // Get_Upright_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); + // else + // Get_Upright_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); + //} } break; case MLDB: { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (int i = 0; i < (int)(kpts.size()); i++) { - Compute_Main_Orientation(kpts[i]); - if (options_.descriptor_size == 0) - Get_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); - else - Get_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); - } + if (options_.descriptor_size == 0) + cv::parallel_for_(cv::Range(0, kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + else + cv::parallel_for_(cv::Range(0, kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + + //for (int i = 0; i < (int)(kpts.size()); i++) { + // Compute_Main_Orientation(kpts[i]); + // if (options_.descriptor_size == 0) + // Get_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); + // else + // Get_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); + //} } break; } @@ -623,7 +912,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt) const { +void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_) { int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0, ratio = 0.0; @@ -696,7 +985,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt) const { * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) const { +void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -786,7 +1075,7 @@ void AKAZEFeatures::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, floa * Gaussian weighting is performed. The descriptor is inspired from Bay et al., * Speeded Up Robust Features, ECCV, 2006 */ -void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { +void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -883,7 +1172,7 @@ void AKAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { +void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1004,7 +1293,7 @@ void AKAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, flo * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { +void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1126,7 +1415,7 @@ void AKAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { +void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { float di = 0.0, dx = 0.0, dy = 0.0; float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0; @@ -1340,7 +1629,7 @@ void AKAZEFeatures::Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, un * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { +void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char *desc) const { float di = 0.0, dx = 0.0, dy = 0.0, ratio = 0.0; float ri = 0.0, rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, xf = 0.0, yf = 0.0; @@ -1639,7 +1928,7 @@ void AKAZEFeatures::Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned c * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { +void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { float di = 0.f, dx = 0.f, dy = 0.f; float rx = 0.f, ry = 0.f; @@ -1666,7 +1955,7 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned steps.at(2) = options_.descriptor_pattern_size / 2; for (int i = 0; i < descriptorSamples_.rows; i++) { - int *coords = descriptorSamples_.ptr(i); + const int *coords = descriptorSamples_.ptr(i); int sample_step = steps.at(coords[0]); di = 0.0f; dx = 0.0f; @@ -1730,7 +2019,7 @@ void AKAZEFeatures::Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned * @param kpt Input keypoint * @param desc Descriptor vector */ -void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) { +void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char *desc) const { float di = 0.0f, dx = 0.0f, dy = 0.0f; float rx = 0.0f, ry = 0.0f; @@ -1753,7 +2042,7 @@ void AKAZEFeatures::Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, steps.at(2) = options_.descriptor_pattern_size / 2; for (int i = 0; i < descriptorSamples_.rows; i++) { - int *coords = descriptorSamples_.ptr(i); + const int *coords = descriptorSamples_.ptr(i); int sample_step = steps.at(coords[0]); di = 0.0f, dx = 0.0f, dy = 0.0f; diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZE.h index c4929571f4..b5849d64a9 100644 --- a/modules/features2d/src/akaze/AKAZE.h +++ b/modules/features2d/src/akaze/AKAZE.h @@ -56,21 +56,22 @@ public: // Feature description methods void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); - void Compute_Main_Orientation(cv::KeyPoint& kpt) const; + + static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_); // SURF Pattern Descriptor - void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; - void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + //void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; + //void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; // M-SURF Pattern Descriptor - void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + //void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + //void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; // M-LDB Pattern Descriptor - void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; - void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; - void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); - void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); + //void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + //void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; + //void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); + //void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); // Methods for saving some results and showing computation times void Save_Scale_Space(); From eada0f4b48330a2011605501c64fa01a56178ec7 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sun, 27 Apr 2014 23:04:31 +0300 Subject: [PATCH 15/52] Fix shadowed i variable warning --- modules/features2d/src/kaze/KAZE.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZE.cpp index aa4b6cb1f1..0b818f8fe8 100644 --- a/modules/features2d/src/kaze/KAZE.cpp +++ b/modules/features2d/src/kaze/KAZE.cpp @@ -1207,7 +1207,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa // convert to unit vector len = sqrt(len); - for (int i = 0; i < dsize; i++) { + for (i = 0; i < dsize; i++) { desc[i] /= len; } @@ -1340,7 +1340,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) // convert to unit vector len = sqrt(len); - for (int i = 0; i < dsize; i++) { + for (i = 0; i < dsize; i++) { desc[i] /= len; } @@ -2017,7 +2017,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo // convert to unit vector len = sqrt(len); - for (int i = 0; i < dsize; i++) { + for (i = 0; i < dsize; i++) { desc[i] /= len; } @@ -2178,7 +2178,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc // convert to unit vector len = sqrt(len); - for (int i = 0; i < dsize; i++) { + for (i = 0; i < dsize; i++) { desc[i] /= len; } From ba5bec1de2fbd9d99993da76d17f5bf6ecf2362e Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sun, 27 Apr 2014 23:07:48 +0300 Subject: [PATCH 16/52] Fix mutable compilation error --- modules/features2d/src/akaze/AKAZE.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index a61971a910..a31ff87915 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -227,8 +227,8 @@ public: } private: - mutable std::vector & evolution_; - AKAZEOptions options_; + std::vector & evolution_; + AKAZEOptions options_; }; /** From 0e243541f976dcd85d82753366a721b598aaa9bb Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 10:50:46 +0300 Subject: [PATCH 17/52] Fix shadowed variable warnings --- modules/features2d/src/akaze/AKAZE.cpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index a31ff87915..87249cf6e0 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -157,7 +157,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; default: - cerr << "Diffusivity: " << options_.diffusivity << " is not supported" << endl; + cerr << "Diffusivity: " << static_cast(options_.diffusivity) << " is not supported" << endl; + break; } // Perform FED n inner steps @@ -402,14 +403,14 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { for (size_t i = 0; i < kpts_aux.size(); i++) { is_repeated = false; - const cv::KeyPoint& point = kpts_aux[i]; + const cv::KeyPoint& pt = kpts_aux[i]; for (size_t j = i + 1; j < kpts_aux.size(); j++) { // Compare response with the upper scale - if ((point.class_id + 1) == kpts_aux[j].class_id) { - dist = sqrt(pow(point.pt.x - kpts_aux[j].pt.x, 2) + pow(point.pt.y - kpts_aux[j].pt.y, 2)); - if (dist <= point.size) { - if (point.response < kpts_aux[j].response) { + if ((pt.class_id + 1) == kpts_aux[j].class_id) { + dist = sqrt(pow(pt.pt.x - kpts_aux[j].pt.x, 2) + pow(pt.pt.y - kpts_aux[j].pt.y, 2)); + if (dist <= pt.size) { + if (pt.response < kpts_aux[j].response) { is_repeated = true; break; } @@ -1278,7 +1279,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const // convert to unit vector len = sqrt(len); - for (int i = 0; i < dsize; i++) { + for (i = 0; i < dsize; i++) { desc[i] /= len; } } @@ -1403,7 +1404,7 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp // convert to unit vector len = sqrt(len); - for (int i = 0; i < dsize; i++) { + for (i = 0; i < dsize; i++) { desc[i] /= len; } } From f3f0e06c4dd4b27c98a6277ee8522399286d1810 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 10:51:09 +0300 Subject: [PATCH 18/52] Fix parenthesis in assert statements --- modules/features2d/src/akaze.cpp | 7 +++++-- modules/features2d/src/kaze.cpp | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 2083ce8d7f..4d4c744b1a 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -99,6 +99,9 @@ namespace cv } impl.Compute_Descriptors(keypoints, desc); + + CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); + CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); } void AKAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const @@ -143,7 +146,7 @@ namespace cv impl.Create_Nonlinear_Scale_Space(img1_32); impl.Compute_Descriptors(keypoints, desc); - CV_Assert(!desc.rows || desc.cols == descriptorSize() && "Descriptor size does not match expected"); - CV_Assert(!desc.rows || (desc.type() & descriptorType()) && "Descriptor type does not match expected"); + CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); + CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); } } \ No newline at end of file diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 3bba8795a4..e49e1d2d78 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -70,8 +70,8 @@ namespace cv impl.Feature_Description(keypoints, desc); - CV_Assert(!desc.rows || desc.cols == descriptorSize() && "Descriptor size does not match expected"); - CV_Assert(!desc.rows || (desc.type() & descriptorType()) && "Descriptor type does not match expected"); + CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); + CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); } void KAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const @@ -118,7 +118,7 @@ namespace cv impl.Create_Nonlinear_Scale_Space(img1_32); impl.Feature_Description(keypoints, desc); - CV_Assert(!desc.rows || desc.cols == descriptorSize() && "Descriptor size does not match expected"); - CV_Assert(!desc.rows || (desc.type() & descriptorType()) && "Descriptor type does not match expected"); + CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); + CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); } } \ No newline at end of file From 3cfc22ae4e71c9337805b5c594f4bc7d7bedab8c Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 10:52:05 +0300 Subject: [PATCH 19/52] Fix initialisation order of parallel loop classes for AKAZE descriptor extraction --- modules/features2d/src/akaze/AKAZE.cpp | 104 ++++++++++++------------- 1 file changed, 50 insertions(+), 54 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 87249cf6e0..0347d4f263 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -553,11 +553,11 @@ void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody { public: - SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) { } @@ -574,18 +574,18 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; + std::vector& evolution_; + AKAZEOptions& options_; }; class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: - SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) { } @@ -603,18 +603,18 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; + std::vector& evolution_; + AKAZEOptions& options_; }; class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: - MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) { } @@ -631,19 +631,18 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; - + std::vector& evolution_; + AKAZEOptions& options_; }; class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: - MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) { } @@ -661,19 +660,18 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; - + std::vector& evolution_; + AKAZEOptions& options_; }; class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody { public: - Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) { } @@ -690,9 +688,8 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; - + std::vector& evolution_; + AKAZEOptions& options_; }; class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody @@ -700,14 +697,14 @@ class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody public: Upright_MLDB_Descriptor_Subset_Invoker(std::vector& kpts, cv::Mat& desc, - const std::vector& evolution, - const AKAZEOptions& options, + std::vector& evolution, + AKAZEOptions& options, cv::Mat descriptorSamples, cv::Mat descriptorBits) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) , descriptorSamples_(descriptorSamples) , descriptorBits_(descriptorBits) { @@ -726,8 +723,8 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; + std::vector& evolution_; + AKAZEOptions& options_; cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. cv::Mat descriptorBits_; @@ -736,11 +733,11 @@ private: class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody { public: - MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, const std::vector& evolution, const AKAZEOptions& options) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) { } @@ -758,9 +755,8 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; - + std::vector& evolution_; + AKAZEOptions& options_; }; class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody @@ -768,14 +764,14 @@ class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody public: MLDB_Descriptor_Subset_Invoker(std::vector& kpts, cv::Mat& desc, - const std::vector& evolution, - const AKAZEOptions& options, + std::vector& evolution, + AKAZEOptions& options, cv::Mat descriptorSamples, cv::Mat descriptorBits) - : evolution_(evolution) - , options_(options) - , keypoints_(kpts) + : keypoints_(kpts) , descriptors_(desc) + , evolution_(evolution) + , options_(options) , descriptorSamples_(descriptorSamples) , descriptorBits_(descriptorBits) { @@ -795,8 +791,8 @@ public: private: std::vector& keypoints_; cv::Mat& descriptors_; - const std::vector& evolution_; - const AKAZEOptions& options_; + std::vector& evolution_; + AKAZEOptions& options_; cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. cv::Mat descriptorBits_; From 61f79c263230dc5102f39ad8aa84b80f064a89f3 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 10:53:09 +0300 Subject: [PATCH 20/52] Fix line ending at EOF --- modules/features2d/src/akaze/AKAZE.cpp | 43 +++++++++++++------------- modules/features2d/src/kaze/config.h | 14 +-------- 2 files changed, 22 insertions(+), 35 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 0347d4f263..2adfe112ce 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -572,8 +572,8 @@ public: void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; }; @@ -601,8 +601,8 @@ public: void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; }; @@ -612,7 +612,7 @@ class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody public: MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) : keypoints_(kpts) - , descriptors_(desc) + , descriptors_(desc) , evolution_(evolution) , options_(options) { @@ -629,8 +629,8 @@ public: void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; }; @@ -640,7 +640,7 @@ class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody public: MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) : keypoints_(kpts) - , descriptors_(desc) + , descriptors_(desc) , evolution_(evolution) , options_(options) { @@ -658,8 +658,8 @@ public: void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; }; @@ -669,7 +669,7 @@ class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody public: Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) : keypoints_(kpts) - , descriptors_(desc) + , descriptors_(desc) , evolution_(evolution) , options_(options) { @@ -686,8 +686,8 @@ public: void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; }; @@ -721,8 +721,8 @@ public: void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; @@ -735,7 +735,7 @@ class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody public: MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) : keypoints_(kpts) - , descriptors_(desc) + , descriptors_(desc) , evolution_(evolution) , options_(options) { @@ -753,8 +753,8 @@ public: void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; }; @@ -789,8 +789,8 @@ public: void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; + std::vector& keypoints_; + cv::Mat& descriptors_; std::vector& evolution_; AKAZEOptions& options_; @@ -2295,5 +2295,4 @@ inline void check_descriptor_limits(int &x, int &y, int width, int height) { */ inline int fRound(float flt) { return (int)(flt + 0.5f); -} - +} \ No newline at end of file diff --git a/modules/features2d/src/kaze/config.h b/modules/features2d/src/kaze/config.h index 88fcba5960..2615f5fb64 100644 --- a/modules/features2d/src/kaze/config.h +++ b/modules/features2d/src/kaze/config.h @@ -1,4 +1,3 @@ - /** * @file config.h * @brief Configuration file @@ -24,13 +23,6 @@ // OpenCV Includes #include "precomp.hpp" -// OpenMP Includes -#ifdef _OPENMP -#include -#else -#define omp_get_thread_num() 0 -#endif - //************************************************************************************* //************************************************************************************* @@ -122,8 +114,4 @@ struct TEvolution { //************************************************************************************* //************************************************************************************* -#endif - - - - +#endif \ No newline at end of file From f97e38d8a6fe26538fc4cc79d4b66d6274fe3e91 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 15:00:14 +0300 Subject: [PATCH 21/52] Fix casting from/to int/float that caused lot of compiler warnings. --- modules/features2d/src/akaze.cpp | 4 +- modules/features2d/src/akaze/AKAZE.cpp | 651 +++++++++++++------------ modules/features2d/src/akaze/AKAZE.h | 15 +- modules/features2d/src/kaze/KAZE.cpp | 416 ++++++++-------- modules/features2d/src/kaze/KAZE.h | 56 +-- modules/features2d/src/kaze/config.h | 2 +- 6 files changed, 582 insertions(+), 562 deletions(-) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 4d4c744b1a..c41c2f98db 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -30,12 +30,12 @@ namespace cv if (descriptor_size == 0) { int t = (6 + 36 + 120) * descriptor_channels; - return ceil(t / 8.); + return (int)ceil(t / 8.); } else { // We use the random bit selection length binary descriptor - return ceil(descriptor_size / 8.); + return (int)ceil(descriptor_size / 8.); } } } diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 2adfe112ce..94b50eb562 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -47,12 +47,12 @@ AKAZEFeatures::~AKAZEFeatures(void) { */ void AKAZEFeatures::Allocate_Memory_Evolution(void) { - float rfactor = 0.0; + float rfactor = 0.0f; int level_height = 0, level_width = 0; // Allocate the dimension of the matrices for the evolution for (int i = 0; i <= options_.omax - 1; i++) { - rfactor = 1.0 / pow(2.f, i); + rfactor = 1.0f / pow(2.f, i); level_height = (int)(options_.img_height*rfactor); level_width = (int)(options_.img_width*rfactor); @@ -75,7 +75,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { step.Lstep = cv::Mat::zeros(level_height, level_width, CV_32F); step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i); step.sigma_size = fRound(step.esigma); - step.etime = 0.5*(step.esigma*step.esigma); + step.etime = 0.5f*(step.esigma*step.esigma); step.octave = i; step.sublevel = j; evolution_.push_back(step); @@ -86,9 +86,9 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { for (size_t i = 1; i < evolution_.size(); i++) { int naux = 0; vector tau; - float ttime = 0.0; + float ttime = 0.0f; ttime = evolution_[i].etime - evolution_[i - 1].etime; - naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_, tau); + naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau); nsteps_.push_back(naux); tsteps_.push_back(tau); ncycles_++; @@ -103,7 +103,7 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { */ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { - double t1 = 0.0, t2 = 0.0; + //double t1 = 0.0, t2 = 0.0; if (evolution_.size() == 0) { cerr << "Error generating the nonlinear scale space!!" << endl; @@ -111,7 +111,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { return -1; } - t1 = cv::getTickCount(); + //t1 = cv::getTickCount(); // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); @@ -120,23 +120,23 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { // First compute the kcontrast factor options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, - 1.0, options_.kcontrast_nbins, 0, 0); + 1.0f, options_.kcontrast_nbins, 0, 0); - t2 = cv::getTickCount(); - timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency(); + //t2 = cv::getTickCount(); + //timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency(); // Now generate the rest of evolution levels for (size_t i = 1; i < evolution_.size(); i++) { if (evolution_[i].octave > evolution_[i - 1].octave) { halfsample_image(evolution_[i - 1].Lt, evolution_[i].Lt); - options_.kcontrast = options_.kcontrast*0.75; + options_.kcontrast = options_.kcontrast*0.75f; } else { evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); } - gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0); + gaussian_2D_convolution(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0, 1.0f); // Compute the Gaussian derivatives Lx and Ly image_derivatives_scharr(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0); @@ -167,8 +167,8 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { } } - t2 = cv::getTickCount(); - timing_.scale = 1000.0*(t2 - t1) / cv::getTickFrequency(); + //t2 = cv::getTickCount(); + //timing_.scale = 1000.0*(t2 - t1) / cv::getTickFrequency(); return 0; } @@ -180,9 +180,9 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { */ void AKAZEFeatures::Feature_Detection(std::vector& kpts) { - double t1 = 0.0, t2 = 0.0; + //double t1 = 0.0, t2 = 0.0; - t1 = cv::getTickCount(); + //t1 = cv::getTickCount(); kpts.clear(); @@ -190,8 +190,8 @@ void AKAZEFeatures::Feature_Detection(std::vector& kpts) { Find_Scale_Space_Extrema(kpts); Do_Subpixel_Refinement(kpts); - t2 = cv::getTickCount(); - timing_.detector = 1000.0*(t2 - t1) / cv::getTickFrequency(); + //t2 = cv::getTickCount(); + //timing_.detector = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -200,7 +200,7 @@ class MultiscaleDerivativesInvoker : public cv::ParallelLoopBody { public: explicit MultiscaleDerivativesInvoker(std::vector& ev, const AKAZEOptions& opt) - : evolution_(ev) + : evolution_(&ev) , options_(opt) { } @@ -208,27 +208,29 @@ public: void operator()(const cv::Range& range) const { + std::vector& evolution = *evolution_; + for (int i = range.start; i < range.end; i++) { - float ratio = pow(2.f, (float)evolution_[i].octave); - int sigma_size_ = fRound(evolution_[i].esigma * options_.derivative_factor / ratio); + float ratio = pow(2.f, (float)evolution[i].octave); + int sigma_size_ = fRound(evolution[i].esigma * options_.derivative_factor / ratio); - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Lx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Ly, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxx, 1, 0, sigma_size_); + compute_scharr_derivatives(evolution[i].Ly, evolution[i].Lyy, 0, 1, sigma_size_); + compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxy, 0, 1, sigma_size_); - evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); - evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); - evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_)); - evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); - evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); + evolution[i].Lx = evolution[i].Lx*((sigma_size_)); + evolution[i].Ly = evolution[i].Ly*((sigma_size_)); + evolution[i].Lxx = evolution[i].Lxx*((sigma_size_)*(sigma_size_)); + evolution[i].Lxy = evolution[i].Lxy*((sigma_size_)*(sigma_size_)); + evolution[i].Lyy = evolution[i].Lyy*((sigma_size_)*(sigma_size_)); } } private: - std::vector & evolution_; + std::vector* evolution_; AKAZEOptions options_; }; @@ -237,9 +239,9 @@ private: */ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { - double t1 = 0.0, t2 = 0.0; + //double t1 = 0.0, t2 = 0.0; - t1 = cv::getTickCount(); + //t1 = cv::getTickCount(); cv::parallel_for_(cv::Range(0, evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_)); /* @@ -261,8 +263,8 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); } */ - t2 = cv::getTickCount(); - timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency(); + //t2 = cv::getTickCount(); + //timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -276,9 +278,10 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { Compute_Multiscale_Derivatives(); for (size_t i = 0; i < evolution_.size(); i++) { - if (options_.verbosity == true) { - cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; - } + + //if (options_.verbosity == true) { + // cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; + //} for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) { for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) { @@ -298,7 +301,7 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { */ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { - double t1 = 0.0, t2 = 0.0; + //double t1 = 0.0, t2 = 0.0; float value = 0.0; float dist = 0.0, ratio = 0.0, smax = 0.0; int npoints = 0, id_repeated = 0; @@ -310,13 +313,13 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { // Set maximum size if (options_.descriptor == SURF_UPRIGHT || options_.descriptor == SURF || options_.descriptor == MLDB_UPRIGHT || options_.descriptor == MLDB) { - smax = 10.0*sqrtf(2.0); + smax = 10.0f*sqrtf(2.0f); } else if (options_.descriptor == MSURF_UPRIGHT || options_.descriptor == MSURF) { - smax = 12.0*sqrtf(2.0); + smax = 12.0f*sqrtf(2.0f); } - t1 = cv::getTickCount(); + //t1 = cv::getTickCount(); for (size_t i = 0; i < evolution_.size(); i++) { for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) { @@ -344,8 +347,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { point.class_id = i; ratio = pow(2.f, point.octave); sigma_size_ = fRound(point.size / ratio); - point.pt.x = jx; - point.pt.y = ix; + point.pt.x = static_cast(jx); + point.pt.y = static_cast(ix); // Compare response with the same and lower scale for (size_t ik = 0; ik < kpts_aux.size(); ik++) { @@ -422,8 +425,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { kpts.push_back(point); } - t2 = cv::getTickCount(); - timing_.extrema = 1000.0*(t2 - t1) / cv::getTickFrequency(); + //t2 = cv::getTickCount(); + //timing_.extrema = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -433,7 +436,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { */ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { - double t1 = 0.0, t2 = 0.0; + //double t1 = 0.0, t2 = 0.0; float Dx = 0.0, Dy = 0.0, ratio = 0.0; float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; int x = 0, y = 0; @@ -441,7 +444,7 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); - t1 = cv::getTickCount(); + //t1 = cv::getTickCount(); for (size_t i = 0; i < kpts.size(); i++) { ratio = pow(2.f, kpts[i].octave); @@ -449,23 +452,23 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { y = fRound(kpts[i].pt.y / ratio); // Compute the gradient - Dx = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) + Dx = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) - *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1)); - Dy = (0.5)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) + Dy = (0.5f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) - *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x)); // Compute the Hessian Dxx = (*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x + 1) + *(evolution_[kpts[i].class_id].Ldet.ptr(y)+x - 1) - - 2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); Dyy = (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x) + *(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x) - - 2.0*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); + - 2.0f*(*(evolution_[kpts[i].class_id].Ldet.ptr(y)+x))); - Dxy = (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x + 1) + Dxy = (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x + 1) + (*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x - 1))) - - (0.25)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x + 1) + - (0.25f)*(*(evolution_[kpts[i].class_id].Ldet.ptr(y - 1) + x + 1) + (*(evolution_[kpts[i].class_id].Ldet.ptr(y + 1) + x - 1))); // Solve the linear system @@ -477,15 +480,15 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { cv::solve(A, b, dst, DECOMP_LU); - if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0) { + if (fabs(*(dst.ptr(0))) <= 1.0f && fabs(*(dst.ptr(1))) <= 1.0f) { kpts[i].pt.x = x + (*(dst.ptr(0))); kpts[i].pt.y = y + (*(dst.ptr(1))); - kpts[i].pt.x *= powf(2.f, evolution_[kpts[i].class_id].octave); - kpts[i].pt.y *= powf(2.f, evolution_[kpts[i].class_id].octave); + kpts[i].pt.x *= powf(2.f, (float)evolution_[kpts[i].class_id].octave); + kpts[i].pt.y *= powf(2.f, (float)evolution_[kpts[i].class_id].octave); kpts[i].angle = 0.0; // In OpenCV the size of a keypoint its the diameter - kpts[i].size *= 2.0; + kpts[i].size *= 2.0f; } // Delete the point since its not stable else { @@ -494,8 +497,8 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { } } - t2 = cv::getTickCount(); - timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency(); + //t2 = cv::getTickCount(); + //timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -554,10 +557,10 @@ class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody { public: SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) { } @@ -565,27 +568,27 @@ public: { for (int i = range.start; i < range.end; i++) { - Get_SURF_Descriptor_Upright_64(keypoints_[i], descriptors_.ptr(i)); + Get_SURF_Descriptor_Upright_64((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; }; class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) { } @@ -593,28 +596,28 @@ public: { for (int i = range.start; i < range.end; i++) { - AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); - Get_SURF_Descriptor_64(keypoints_[i], descriptors_.ptr(i)); + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_SURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; }; class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) { } @@ -622,27 +625,27 @@ public: { for (int i = range.start; i < range.end; i++) { - Get_MSURF_Upright_Descriptor_64(keypoints_[i], descriptors_.ptr(i)); + Get_MSURF_Upright_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; }; class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) { } @@ -650,28 +653,28 @@ public: { for (int i = range.start; i < range.end; i++) { - AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); - Get_MSURF_Descriptor_64(keypoints_[i], descriptors_.ptr(i)); + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_MSURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; }; class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody { public: Upright_MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) { } @@ -679,17 +682,17 @@ public: { for (int i = range.start; i < range.end; i++) { - Get_Upright_MLDB_Full_Descriptor(keypoints_[i], descriptors_.ptr(i)); + Get_Upright_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; }; class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody @@ -701,10 +704,10 @@ public: AKAZEOptions& options, cv::Mat descriptorSamples, cv::Mat descriptorBits) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) , descriptorSamples_(descriptorSamples) , descriptorBits_(descriptorBits) { @@ -714,17 +717,17 @@ public: { for (int i = range.start; i < range.end; i++) { - Get_Upright_MLDB_Descriptor_Subset(keypoints_[i], descriptors_.ptr(i)); + Get_Upright_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. cv::Mat descriptorBits_; @@ -734,10 +737,10 @@ class MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody { public: MLDB_Full_Descriptor_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) { } @@ -745,18 +748,18 @@ public: { for (int i = range.start; i < range.end; i++) { - AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); - Get_MLDB_Full_Descriptor(keypoints_[i], descriptors_.ptr(i)); + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; }; class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody @@ -768,10 +771,10 @@ public: AKAZEOptions& options, cv::Mat descriptorSamples, cv::Mat descriptorBits) - : keypoints_(kpts) - , descriptors_(desc) - , evolution_(evolution) - , options_(options) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) , descriptorSamples_(descriptorSamples) , descriptorBits_(descriptorBits) { @@ -781,18 +784,18 @@ public: { for (int i = range.start; i < range.end; i++) { - AKAZEFeatures::Compute_Main_Orientation(keypoints_[i], evolution_); - Get_MLDB_Descriptor_Subset(keypoints_[i], descriptors_.ptr(i)); + AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_); + Get_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr(i)); } } void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc) const; private: - std::vector& keypoints_; - cv::Mat& descriptors_; - std::vector& evolution_; - AKAZEOptions& options_; + std::vector* keypoints_; + cv::Mat* descriptors_; + std::vector* evolution_; + AKAZEOptions* options_; cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from. cv::Mat descriptorBits_; @@ -805,9 +808,9 @@ private: */ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) { - double t1 = 0.0, t2 = 0.0; + //double t1 = 0.0, t2 = 0.0; - t1 = cv::getTickCount(); + //t1 = cv::getTickCount(); // Allocate memory for the matrix with the descriptors if (options_.descriptor < MLDB_UPRIGHT) { @@ -817,11 +820,11 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat // We use the full length binary descriptor -> 486 bits if (options_.descriptor_size == 0) { int t = (6 + 36 + 120)*options_.descriptor_channels; - desc = cv::Mat::zeros(kpts.size(), ceil(t / 8.), CV_8UC1); + desc = cv::Mat::zeros(kpts.size(), (int)ceil(t / 8.), CV_8UC1); } else { // We use the random bit selection length binary descriptor - desc = cv::Mat::zeros(kpts.size(), ceil(options_.descriptor_size / 8.), CV_8UC1); + desc = cv::Mat::zeros(kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1); } } @@ -898,8 +901,8 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat break; } - t2 = cv::getTickCount(); - timing_.descriptor = 1000.0*(t2 - t1) / cv::getTickFrequency(); + //t2 = cv::getTickCount(); + //timing_.descriptor = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -922,7 +925,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto // Get the information from the keypoint level = kpt.class_id; ratio = (float)(1 << evolution_[level].octave); - s = fRound(0.5*kpt.size / ratio); + s = fRound(0.5f*kpt.size / ratio); xf = kpt.pt.x / ratio; yf = kpt.pt.y / ratio; @@ -944,8 +947,8 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto } // Loop slides pi/3 window around feature point - for (ang1 = 0; ang1 < 2.0*CV_PI; ang1 += 0.15f) { - ang2 = (ang1 + CV_PI / 3.0f > 2.0*CV_PI ? ang1 - 5.0f*CV_PI / 3.0f : ang1 + CV_PI / 3.0f); + for (ang1 = 0; ang1 < (float)(2.0 * CV_PI); ang1 += 0.15f) { + ang2 = (ang1 + (float)(CV_PI / 3.0) > (float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0)); sumX = sumY = 0.f; for (size_t k = 0; k < Ang.size(); ++k) { @@ -958,7 +961,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto sumY += resY[k]; } else if (ang2 < ang1 && - ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { + ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0f*CV_PI))) { sumX += resX[k]; sumY += resY[k]; } @@ -991,6 +994,8 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; int scale = 0, dsize = 0, level = 0; + const std::vector& evolution = *evolution_; + // Set the descriptor size and the sample and pattern sizes dsize = 64; sample_step = 5; @@ -998,7 +1003,7 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv // Get the information from the keypoint ratio = (float)(1 << kpt.octave); - scale = fRound(0.5*kpt.size / ratio); + scale = fRound(0.5f*kpt.size / ratio); level = kpt.class_id; yf = kpt.pt.y / ratio; xf = kpt.pt.x / ratio; @@ -1014,26 +1019,26 @@ void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv sample_y = yf + l*scale; sample_x = xf + k*scale; - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Sum the derivatives to the cumulative descriptor dx += rx; @@ -1086,9 +1091,11 @@ void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, sample_step = 5; pattern_size = 10; + const std::vector& evolution = *evolution_; + // Get the information from the keypoint ratio = (float)(1 << kpt.octave); - scale = fRound(0.5*kpt.size / ratio); + scale = fRound(0.5f*kpt.size / ratio); angle = kpt.angle; level = kpt.class_id; yf = kpt.pt.y / ratio; @@ -1107,26 +1114,26 @@ void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, sample_y = yf + (l*scale*co + k*scale*si); sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; @@ -1180,7 +1187,9 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const int scale = 0, dsize = 0, level = 0; // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + float cx = -0.5f, cy = 0.5f; + + const std::vector& evolution = *evolution_; // Set the descriptor size and the sample and pattern sizes dsize = 64; @@ -1189,7 +1198,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const // Get the information from the keypoint ratio = (float)(1 << kpt.octave); - scale = fRound(0.5*kpt.size / ratio); + scale = fRound(0.5f*kpt.size / ratio); level = kpt.class_id; yf = kpt.pt.y / ratio; xf = kpt.pt.x / ratio; @@ -1202,12 +1211,12 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const j = -8; i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0f; + cy = -0.5f; while (j < pattern_size) { dx = dy = mdx = mdy = 0.0; - cy += 1.0; + cy += 1.0f; j = j - 4; ky = i + sample_step; @@ -1222,7 +1231,7 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const sample_x = l*scale + xf; //Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50*scale); + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50f*scale); y1 = (int)(sample_y - .5); x1 = (int)(sample_x - .5); @@ -1233,17 +1242,17 @@ void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; rx = gauss_s1*rx; ry = gauss_s1*ry; @@ -1301,7 +1310,9 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp int scale = 0, dsize = 0, level = 0; // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + float cx = -0.5f, cy = 0.5f; + + const std::vector& evolution = *evolution_; // Set the descriptor size and the sample and pattern sizes dsize = 64; @@ -1310,7 +1321,7 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp // Get the information from the keypoint ratio = (float)(1 << kpt.octave); - scale = fRound(0.5*kpt.size / ratio); + scale = fRound(0.5f*kpt.size / ratio); angle = kpt.angle; level = kpt.class_id; yf = kpt.pt.y / ratio; @@ -1326,12 +1337,12 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp j = -8; i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0f; + cy = -0.5f; while (j < pattern_size) { dx = dy = mdx = mdy = 0.0; - cy += 1.0; + cy += 1.0f; j = j - 4; ky = i + sample_step; @@ -1347,28 +1358,28 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint& kp sample_x = xf + (-l*scale*si + k*scale*co); // Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale); - y1 = fRound(sample_y - .5); - x1 = fRound(sample_x - .5); + y1 = fRound(sample_y - 0.5f); + x1 = fRound(sample_x - 0.5f); - y2 = fRound(sample_y + .5); - x2 = fRound(sample_x + .5); + y2 = fRound(sample_y + 0.5f); + x2 = fRound(sample_x + 0.5f); fx = sample_x - x1; fy = sample_y - y1; - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Lx.ptr(y1)+x1); + res2 = *(evolution[level].Lx.ptr(y1)+x2); + res3 = *(evolution[level].Lx.ptr(y2)+x1); + res4 = *(evolution[level].Lx.ptr(y2)+x2); + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + res1 = *(evolution[level].Ly.ptr(y1)+x1); + res2 = *(evolution[level].Ly.ptr(y1)+x2); + res3 = *(evolution[level].Ly.ptr(y2)+x1); + res4 = *(evolution[level].Ly.ptr(y2)+x2); + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis rry = gauss_s1*(rx*co + ry*si); @@ -1421,20 +1432,23 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons int level = 0, nsamples = 0, scale = 0; int dcount1 = 0, dcount2 = 0; + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); - cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); + cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); // Get the information from the keypoint ratio = (float)(1 << kpt.octave); - scale = fRound(0.5*kpt.size / ratio); + scale = fRound(0.5f*kpt.size / ratio); level = kpt.class_id; yf = kpt.pt.y / ratio; xf = kpt.pt.x / ratio; // First 2x2 grid - pattern_size = options_.descriptor_pattern_size; + pattern_size = options_->descriptor_pattern_size; sample_step = pattern_size; for (int i = -pattern_size; i < pattern_size; i += sample_step) { @@ -1452,9 +1466,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons y1 = fRound(sample_y); x1 = fRound(sample_x); - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); di += ri; dx += rx; @@ -1495,7 +1509,7 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons } // Second 3x3 grid - sample_step = ceil(pattern_size*2. / 3.); + sample_step = static_cast(ceil(pattern_size*2. / 3.)); dcount2 = 0; for (int i = -pattern_size; i < pattern_size; i += sample_step) { @@ -1513,9 +1527,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons y1 = fRound(sample_y); x1 = fRound(sample_x); - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); di += ri; dx += rx; @@ -1575,9 +1589,9 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons y1 = fRound(sample_y); x1 = fRound(sample_x); - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); di += ri; dx += rx; @@ -1635,14 +1649,17 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& int level = 0, nsamples = 0, scale = 0; int dcount1 = 0, dcount2 = 0; + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options_.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options_.descriptor_channels, CV_32FC1); - cv::Mat values_3 = cv::Mat::zeros(16, options_.descriptor_channels, CV_32FC1); + cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); + cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); // Get the information from the keypoint ratio = (float)(1 << kpt.octave); - scale = fRound(0.5*kpt.size / ratio); + scale = fRound(0.5f*kpt.size / ratio); angle = kpt.angle; level = kpt.class_id; yf = kpt.pt.y / ratio; @@ -1651,7 +1668,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& si = sin(angle); // First 2x2 grid - pattern_size = options_.descriptor_pattern_size; + pattern_size = options.descriptor_pattern_size; sample_step = pattern_size; for (int i = -pattern_size; i < pattern_size; i += sample_step) { @@ -1660,8 +1677,8 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& di = dx = dy = 0.0; nsamples = 0; - for (float k = i; k < i + sample_step; k++) { - for (float l = j; l < j + sample_step; l++) { + for (float k = (float)i; k < i + sample_step; k++) { + for (float l = (float)j; l < j + sample_step; l++) { // Get the coordinates of the sample point sample_y = yf + (l*scale*co + k*scale*si); @@ -1670,16 +1687,16 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& y1 = fRound(sample_y); x1 = fRound(sample_x); - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); di += ri; - if (options_.descriptor_channels == 2) { + if (options.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (options_.descriptor_channels == 3) { + else if (options.descriptor_channels == 3) { // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; rrx = -rx*si + ry*co; @@ -1696,11 +1713,11 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& dy /= nsamples; *(values_1.ptr(dcount2)) = di; - if (options_.descriptor_channels > 1) { + if (options.descriptor_channels > 1) { *(values_1.ptr(dcount2)+1) = dx; } - if (options_.descriptor_channels > 2) { + if (options.descriptor_channels > 2) { *(values_1.ptr(dcount2)+2) = dy; } @@ -1718,7 +1735,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& } } - if (options_.descriptor_channels > 1) { + if (options.descriptor_channels > 1) { for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { if (*(values_1.ptr(i)+1) > *(values_1.ptr(j)+1)) { @@ -1730,7 +1747,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& } } - if (options_.descriptor_channels > 2) { + if (options.descriptor_channels > 2) { for (int i = 0; i < 4; i++) { for (int j = i + 1; j < 4; j++) { if (*(values_1.ptr(i)+2) > *(values_1.ptr(j)+2)) { @@ -1742,7 +1759,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& } // Second 3x3 grid - sample_step = ceil(pattern_size*2. / 3.); + sample_step = static_cast(ceil(pattern_size*2. / 3.)); dcount2 = 0; for (int i = -pattern_size; i < pattern_size; i += sample_step) { @@ -1761,15 +1778,15 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& y1 = fRound(sample_y); x1 = fRound(sample_x); - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); di += ri; - if (options_.descriptor_channels == 2) { + if (options.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (options_.descriptor_channels == 3) { + else if (options.descriptor_channels == 3) { // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; rrx = -rx*si + ry*co; @@ -1786,11 +1803,11 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& dy /= nsamples; *(values_2.ptr(dcount2)) = di; - if (options_.descriptor_channels > 1) { + if (options.descriptor_channels > 1) { *(values_2.ptr(dcount2)+1) = dx; } - if (options_.descriptor_channels > 2) { + if (options.descriptor_channels > 2) { *(values_2.ptr(dcount2)+2) = dy; } @@ -1808,7 +1825,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& } } - if (options_.descriptor_channels > 1) { + if (options.descriptor_channels > 1) { for (int i = 0; i < 9; i++) { for (int j = i + 1; j < 9; j++) { if (*(values_2.ptr(i)+1) > *(values_2.ptr(j)+1)) { @@ -1819,7 +1836,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& } } - if (options_.descriptor_channels > 2) { + if (options.descriptor_channels > 2) { for (int i = 0; i < 9; i++) { for (int j = i + 1; j < 9; j++) { if (*(values_2.ptr(i)+2) > *(values_2.ptr(j)+2)) { @@ -1849,15 +1866,15 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& y1 = fRound(sample_y); x1 = fRound(sample_x); - ri = *(evolution_[level].Lt.ptr(y1)+x1); - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + ri = *(evolution[level].Lt.ptr(y1)+x1); + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); di += ri; - if (options_.descriptor_channels == 2) { + if (options.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (options_.descriptor_channels == 3) { + else if (options.descriptor_channels == 3) { // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; rrx = -rx*si + ry*co; @@ -1874,10 +1891,10 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& dy /= nsamples; *(values_3.ptr(dcount2)) = di; - if (options_.descriptor_channels > 1) + if (options.descriptor_channels > 1) *(values_3.ptr(dcount2)+1) = dx; - if (options_.descriptor_channels > 2) + if (options.descriptor_channels > 2) *(values_3.ptr(dcount2)+2) = dy; dcount2++; @@ -1894,7 +1911,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& } } - if (options_.descriptor_channels > 1) { + if (options.descriptor_channels > 1) { for (int i = 0; i < 16; i++) { for (int j = i + 1; j < 16; j++) { if (*(values_3.ptr(i)+1) > *(values_3.ptr(j)+1)) { @@ -1905,7 +1922,7 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& } } - if (options_.descriptor_channels > 2) { + if (options.descriptor_channels > 2) { for (int i = 0; i < 16; i++) { for (int j = i + 1; j < 16; j++) { if (*(values_3.ptr(i)+2) > *(values_3.ptr(j)+2)) { @@ -1932,24 +1949,27 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi float sample_x = 0.f, sample_y = 0.f; int x1 = 0, y1 = 0; + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + // Get the information from the keypoint float ratio = (float)(1 << kpt.octave); - int scale = fRound(0.5*kpt.size / ratio); + int scale = fRound(0.5f*kpt.size / ratio); float angle = kpt.angle; - float level = kpt.class_id; + int level = kpt.class_id; float yf = kpt.pt.y / ratio; float xf = kpt.pt.x / ratio; float co = cos(angle); float si = sin(angle); // Allocate memory for the matrix of values - cv::Mat values = cv::Mat_::zeros((4 + 9 + 16)*options_.descriptor_channels, 1); + cv::Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); // Sample everything, but only do the comparisons vector steps(3); - steps.at(0) = options_.descriptor_pattern_size; - steps.at(1) = ceil(2.f*options_.descriptor_pattern_size / 3.f); - steps.at(2) = options_.descriptor_pattern_size / 2; + steps.at(0) = options.descriptor_pattern_size; + steps.at(1) = (int)ceil(2.f*options.descriptor_pattern_size / 3.f); + steps.at(2) = options.descriptor_pattern_size / 2; for (int i = 0; i < descriptorSamples_.rows; i++) { const int *coords = descriptorSamples_.ptr(i); @@ -1968,16 +1988,16 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi y1 = fRound(sample_y); x1 = fRound(sample_x); - di += *(evolution_[level].Lt.ptr(y1)+x1); + di += *(evolution[level].Lt.ptr(y1)+x1); - if (options_.descriptor_channels > 1) { - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + if (options.descriptor_channels > 1) { + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); - if (options_.descriptor_channels == 2) { + if (options.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (options_.descriptor_channels == 3) { + else if (options.descriptor_channels == 3) { // Get the x and y derivatives on the rotated axis dx += rx*co + ry*si; dy += -rx*si + ry*co; @@ -1986,14 +2006,14 @@ void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const cv::KeyPoi } } - *(values.ptr(options_.descriptor_channels*i)) = di; + *(values.ptr(options.descriptor_channels*i)) = di; - if (options_.descriptor_channels == 2) { - *(values.ptr(options_.descriptor_channels*i + 1)) = dx; + if (options.descriptor_channels == 2) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; } - else if (options_.descriptor_channels == 3) { - *(values.ptr(options_.descriptor_channels*i + 1)) = dx; - *(values.ptr(options_.descriptor_channels*i + 2)) = dy; + else if (options.descriptor_channels == 3) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; + *(values.ptr(options.descriptor_channels*i + 2)) = dy; } } @@ -2023,20 +2043,23 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( float sample_x = 0.0f, sample_y = 0.0f; int x1 = 0, y1 = 0; + const AKAZEOptions & options = *options_; + const std::vector& evolution = *evolution_; + // Get the information from the keypoint float ratio = (float)(1 << kpt.octave); - int scale = fRound(0.5*kpt.size / ratio); - float level = kpt.class_id; + int scale = fRound(0.5f*kpt.size / ratio); + int level = kpt.class_id; float yf = kpt.pt.y / ratio; float xf = kpt.pt.x / ratio; // Allocate memory for the matrix of values - Mat values = cv::Mat_::zeros((4 + 9 + 16)*options_.descriptor_channels, 1); + Mat values = cv::Mat_::zeros((4 + 9 + 16)*options.descriptor_channels, 1); vector steps(3); - steps.at(0) = options_.descriptor_pattern_size; - steps.at(1) = ceil(2.f*options_.descriptor_pattern_size / 3.f); - steps.at(2) = options_.descriptor_pattern_size / 2; + steps.at(0) = options.descriptor_pattern_size; + steps.at(1) = static_cast(ceil(2.f*options.descriptor_pattern_size / 3.f)); + steps.at(2) = options.descriptor_pattern_size / 2; for (int i = 0; i < descriptorSamples_.rows; i++) { const int *coords = descriptorSamples_.ptr(i); @@ -2052,16 +2075,16 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( y1 = fRound(sample_y); x1 = fRound(sample_x); - di += *(evolution_[level].Lt.ptr(y1)+x1); + di += *(evolution[level].Lt.ptr(y1)+x1); - if (options_.descriptor_channels > 1) { - rx = *(evolution_[level].Lx.ptr(y1)+x1); - ry = *(evolution_[level].Ly.ptr(y1)+x1); + if (options.descriptor_channels > 1) { + rx = *(evolution[level].Lx.ptr(y1)+x1); + ry = *(evolution[level].Ly.ptr(y1)+x1); - if (options_.descriptor_channels == 2) { + if (options.descriptor_channels == 2) { dx += sqrtf(rx*rx + ry*ry); } - else if (options_.descriptor_channels == 3) { + else if (options.descriptor_channels == 3) { dx += rx; dy += ry; } @@ -2069,14 +2092,14 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( } } - *(values.ptr(options_.descriptor_channels*i)) = di; + *(values.ptr(options.descriptor_channels*i)) = di; - if (options_.descriptor_channels == 2) { - *(values.ptr(options_.descriptor_channels*i + 1)) = dx; + if (options.descriptor_channels == 2) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; } - else if (options_.descriptor_channels == 3) { - *(values.ptr(options_.descriptor_channels*i + 1)) = dx; - *(values.ptr(options_.descriptor_channels*i + 2)) = dy; + else if (options.descriptor_channels == 3) { + *(values.ptr(options.descriptor_channels*i + 1)) = dx; + *(values.ptr(options.descriptor_channels*i + 2)) = dy; } } @@ -2097,15 +2120,15 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( /** * @brief This method displays the computation times */ -void AKAZEFeatures::Show_Computation_Times() const { - cout << "(*) Time Scale Space: " << timing_.scale << endl; - cout << "(*) Time Detector: " << timing_.detector << endl; - cout << " - Time Derivatives: " << timing_.derivatives << endl; - cout << " - Time Extrema: " << timing_.extrema << endl; - cout << " - Time Subpixel: " << timing_.subpixel << endl; - cout << "(*) Time Descriptor: " << timing_.descriptor << endl; - cout << endl; -} +//void AKAZEFeatures::Show_Computation_Times() const { +// cout << "(*) Time Scale Space: " << timing_.scale << endl; +// cout << "(*) Time Detector: " << timing_.detector << endl; +// cout << " - Time Derivatives: " << timing_.derivatives << endl; +// cout << " - Time Extrema: " << timing_.extrema << endl; +// cout << " - Time Subpixel: " << timing_.subpixel << endl; +// cout << "(*) Time Descriptor: " << timing_.descriptor << endl; +// cout << endl; +//} /* ************************************************************************* */ /** @@ -2142,7 +2165,7 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int for (size_t i = 0, c = 0; i < 3; i++) { int gdiv = i + 2; //grid divisions, per row int gsz = gdiv*gdiv; - int psz = ceil(2.*pattern_size / (float)gdiv); + int psz = (int)ceil(2.f*pattern_size / (float)gdiv); for (int j = 0; j < gsz; j++) { for (int k = j + 1; k < gsz; k++, c++) { @@ -2156,12 +2179,12 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int } srand(1024); - Mat_ comps = Mat_(nchannels*ceil(nbits / (float)nchannels), 2); + Mat_ comps = Mat_(nchannels * (int)ceil(nbits / (float)nchannels), 2); comps = 1000; // Select some samples. A sample includes all channels int count = 0; - size_t npicks = ceil(nbits / (float)nchannels); + size_t npicks = (size_t)ceil(nbits / (float)nchannels); Mat_ samples(29, 3); Mat_ fullcopy = fullM.clone(); samples = -1; @@ -2235,15 +2258,15 @@ inline float get_angle(float x, float y) { } if (x < 0 && y >= 0) { - return CV_PI - atanf(-y / x); + return static_cast(CV_PI) - atanf(-y / x); } if (x < 0 && y < 0) { - return CV_PI + atanf(y / x); + return static_cast(CV_PI) + atanf(y / x); } if (x >= 0 && y < 0) { - return 2.0*CV_PI - atanf(-y / x); + return static_cast(2.0 * CV_PI) - atanf(-y / x); } return 0; diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZE.h index b5849d64a9..cabfae8819 100644 --- a/modules/features2d/src/akaze/AKAZE.h +++ b/modules/features2d/src/akaze/AKAZE.h @@ -33,9 +33,6 @@ private: cv::Mat descriptorBits_; cv::Mat bitMask_; - /// Computation times variables in ms - AKAZETiming timing_; - public: /// Constructor with input arguments @@ -74,14 +71,14 @@ public: //void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); // Methods for saving some results and showing computation times - void Save_Scale_Space(); - void Save_Detector_Responses(); - void Show_Computation_Times() const; + //void Save_Scale_Space(); + //void Save_Detector_Responses(); + //void Show_Computation_Times() const; /// Return the computation times - AKAZETiming Get_Computation_Times() const { - return timing_; - } + //AKAZETiming Get_Computation_Times() const { + // return timing_; + //} }; /* ************************************************************************* */ diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZE.cpp index 0b818f8fe8..36e9690095 100644 --- a/modules/features2d/src/kaze/KAZE.cpp +++ b/modules/features2d/src/kaze/KAZE.cpp @@ -54,12 +54,12 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { kcontrast_ = DEFAULT_KCONTRAST; ncycles_ = 0; reordering_ = true; - tkcontrast_ = 0.0; - tnlscale_ = 0.0; - tdetector_ = 0.0; - tmderivatives_ = 0.0; - tdresponse_ = 0.0; - tdescriptor_ = 0.0; + //tkcontrast_ = 0.0; + //tnlscale_ = 0.0; + //tdetector_ = 0.0; + //tmderivatives_ = 0.0; + //tdresponse_ = 0.0; + //tdescriptor_ = 0.0; // Now allocate memory for the evolution Allocate_Memory_Evolution(); @@ -99,11 +99,11 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { aux.Lsmooth = cv::Mat::zeros(img_height_, img_width_, CV_32F); aux.Lstep = cv::Mat::zeros(img_height_, img_width_, CV_32F); aux.Ldet = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.esigma = soffset_*pow((float)2.0, (float)(j) / (float)(nsublevels_)+i); - aux.etime = 0.5*(aux.esigma*aux.esigma); + aux.esigma = soffset_*pow((float)2.0f, (float)(j) / (float)(nsublevels_)+i); + aux.etime = 0.5f*(aux.esigma*aux.esigma); aux.sigma_size = fRound(aux.esigma); - aux.octave = i; - aux.sublevel = j; + aux.octave = (float)i; + aux.sublevel = (float)j; evolution_.push_back(aux); } } @@ -115,7 +115,7 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { vector tau; float ttime = 0.0; ttime = evolution_[i].etime - evolution_[i - 1].etime; - naux = fed_tau_by_process_time(ttime, 1, 0.25, reordering_, tau); + naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau); nsteps_.push_back(naux); tsteps_.push_back(tau); ncycles_++; @@ -147,7 +147,7 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { */ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { - double t2 = 0.0, t1 = 0.0; + //double t2 = 0.0, t1 = 0.0; if (evolution_.size() == 0) { cout << "Error generating the nonlinear scale space!!" << endl; @@ -155,7 +155,7 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { return -1; } - t1 = getTickCount(); + //t1 = getTickCount(); // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); @@ -165,8 +165,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { // Firstly compute the kcontrast factor Compute_KContrast(evolution_[0].Lt, KCONTRAST_PERCENTILE); - t2 = getTickCount(); - tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency(); + //t2 = getTickCount(); + //tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency(); if (verbosity_ == true) { cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << @@ -212,8 +212,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { } } - t2 = getTickCount(); - tnlscale_ = 1000.0*(t2 - t1) / getTickFrequency(); + //t2 = getTickCount(); + //tnlscale_ = 1000.0*(t2 - t1) / getTickFrequency(); return 0; } @@ -228,18 +228,18 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { */ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { - if (verbosity_ == true) { - cout << "Computing Kcontrast factor." << endl; - } + //if (verbosity_ == true) { + // cout << "Computing Kcontrast factor." << endl; + //} - if (COMPUTE_KCONTRAST == true) { + if (COMPUTE_KCONTRAST) { kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0); } - if (verbosity_ == true) { - cout << "kcontrast = " << kcontrast_ << endl; - cout << endl << "Now computing the nonlinear scale space!!" << endl; - } + //if (verbosity_ == true) { + // cout << "kcontrast = " << kcontrast_ << endl; + // cout << endl << "Now computing the nonlinear scale space!!" << endl; + //} } //************************************************************************************* @@ -250,18 +250,18 @@ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentil */ void KAZEFeatures::Compute_Multiscale_Derivatives(void) { - double t2 = 0.0, t1 = 0.0; - t1 = getTickCount(); + //double t2 = 0.0, t1 = 0.0; + //t1 = getTickCount(); #ifdef _OPENMP #pragma omp parallel for #endif for (size_t i = 0; i < evolution_.size(); i++) { - if (verbosity_ == true) { - cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime - << " Step (pixels): " << evolution_[i].sigma_size << endl; - } + //if (verbosity_ == true) { + // cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime + // << " Step (pixels): " << evolution_[i].sigma_size << endl; + //} // Compute multiscale derivatives for the detector compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, evolution_[i].sigma_size); @@ -277,8 +277,8 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void) evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); } - t2 = getTickCount(); - tmderivatives_ = 1000.0*(t2 - t1) / getTickFrequency(); + //t2 = getTickCount(); + //tmderivatives_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -290,10 +290,10 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void) */ void KAZEFeatures::Compute_Detector_Response(void) { - double t2 = 0.0, t1 = 0.0; + //double t2 = 0.0, t1 = 0.0; float lxx = 0.0, lxy = 0.0, lyy = 0.0; - t1 = getTickCount(); + //t1 = getTickCount(); // Firstly compute the multiscale derivatives Compute_Multiscale_Derivatives(); @@ -301,9 +301,9 @@ void KAZEFeatures::Compute_Detector_Response(void) { for (size_t i = 0; i < evolution_.size(); i++) { // Determinant of the Hessian - if (verbosity_ == true) { - cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; - } + //if (verbosity_ == true) { + // cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; + //} for (int ix = 0; ix < img_height_; ix++) { for (int jx = 0; jx < img_width_; jx++) { @@ -315,8 +315,8 @@ void KAZEFeatures::Compute_Detector_Response(void) { } } - t2 = getTickCount(); - tdresponse_ = 1000.0*(t2 - t1) / getTickFrequency(); + //t2 = getTickCount(); + //tdresponse_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -328,8 +328,8 @@ void KAZEFeatures::Compute_Detector_Response(void) { */ void KAZEFeatures::Feature_Detection(std::vector& kpts) { - double t2 = 0.0, t1 = 0.0; - t1 = getTickCount(); + //double t2 = 0.0, t1 = 0.0; + //t1 = getTickCount(); kpts.clear(); @@ -342,8 +342,8 @@ void KAZEFeatures::Feature_Detection(std::vector& kpts) { // Perform some subpixel refinement Do_Subpixel_Refinement(kpts); - t2 = getTickCount(); - tdetector_ = 1000.0*(t2 - t1) / getTickFrequency(); + //t2 = getTickCount(); + //tdetector_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -476,11 +476,11 @@ void KAZEFeatures::Find_Extremum_Threading(const int& level) { // Add the point of interest!! if (is_extremum == true) { KeyPoint point; - point.pt.x = jx; - point.pt.y = ix; + point.pt.x = (float)jx; + point.pt.y = (float)ix; point.response = fabs(value); point.size = evolution_[level].esigma; - point.octave = evolution_[level].octave; + point.octave = (int)evolution_[level].octave; point.class_id = level; // We use the angle field for the sublevel value @@ -508,50 +508,50 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { Mat A = Mat::zeros(3, 3, CV_32F); Mat b = Mat::zeros(3, 1, CV_32F); Mat dst = Mat::zeros(3, 1, CV_32F); - double t2 = 0.0, t1 = 0.0; + //double t2 = 0.0, t1 = 0.0; - t1 = cv::getTickCount(); + //t1 = cv::getTickCount(); vector kpts_(kpts); for (size_t i = 0; i < kpts_.size(); i++) { - x = kpts_[i].pt.x; - y = kpts_[i].pt.y; + x = static_cast(kpts_[i].pt.x); + y = static_cast(kpts_[i].pt.y); // Compute the gradient - Dx = (1.0 / (2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x + step) + Dx = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x + step) - *(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x - step)); - Dy = (1.0 / (2.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x) + Dy = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x) - *(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x)); - Ds = 0.5*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x) + Ds = 0.5f*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x) - *(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x)); // Compute the Hessian - Dxx = (1.0 / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x + step) + Dxx = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x + step) + *(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x - step) - - 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); + - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); - Dyy = (1.0 / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x) + Dyy = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x) + *(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x) - - 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); + - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x))); Dss = *(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x) + *(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x) - - 2.0*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x)); + - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y)+x)); - Dxy = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x + step) + Dxy = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x + step) + (*(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x - step))) - - (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x + step) + - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr(y - step) + x + step) + (*(evolution_[kpts_[i].class_id].Ldet.ptr(y + step) + x - step))); - Dxs = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x + step) + Dxs = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x + step) + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x - step))) - - (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x - step) + - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y)+x - step) + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y)+x + step))); - Dys = (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y + step) + x) + Dys = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y + step) + x) + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y - step) + x))) - - (1.0 / (4.0*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y - step) + x) + - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr(y - step) + x) + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr(y + step) + x))); // Solve the linear system @@ -569,13 +569,13 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { solve(A, b, dst, DECOMP_LU); - if (fabs(*(dst.ptr(0))) <= 1.0 && fabs(*(dst.ptr(1))) <= 1.0 && fabs(*(dst.ptr(2))) <= 1.0) { + if (fabs(*(dst.ptr(0))) <= 1.0f && fabs(*(dst.ptr(1))) <= 1.0f && fabs(*(dst.ptr(2))) <= 1.0f) { kpts_[i].pt.x += *(dst.ptr(0)); kpts_[i].pt.y += *(dst.ptr(1)); dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr(2))) / ((float)(nsublevels_)); // In OpenCV the size of a keypoint is the diameter!! - kpts_[i].size = 2.0*soffset_*pow((float)2.0, dsc); + kpts_[i].size = 2.0f*soffset_*pow((float)2.0f, dsc); kpts_[i].angle = 0.0; } // Set the points to be deleted after the for loop @@ -593,8 +593,8 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { } } - t2 = getTickCount(); - tsubpixel_ = 1000.0*(t2 - t1) / getTickFrequency(); + //t2 = getTickCount(); + //tsubpixel_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -663,8 +663,8 @@ void KAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, */ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) { - double t2 = 0.0, t1 = 0.0; - t1 = getTickCount(); + //double t2 = 0.0, t1 = 0.0; + //t1 = getTickCount(); // Allocate memory for the matrix of descriptors if (use_extended_ == true) { @@ -796,8 +796,8 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat } } - t2 = getTickCount(); - tdescriptor_ = 1000.0*(t2 - t1) / getTickFrequency(); + //t2 = getTickCount(); + //tdescriptor_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -822,7 +822,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) xf = kpt.pt.x; yf = kpt.pt.y; level = kpt.class_id; - s = fRound(kpt.size / 2.0); + s = fRound(kpt.size / 2.0f); // Calculate derivatives responses for points within radius of 6*scale for (int i = -6; i <= 6; ++i) { @@ -832,7 +832,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) ix = fRound(xf + i*s); if (iy >= 0 && iy < img_height_ && ix >= 0 && ix < img_width_) { - gweight = gaussian(iy - yf, ix - xf, 2.5*s); + gweight = gaussian(iy - yf, ix - xf, 2.5f*s); resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); } @@ -848,8 +848,8 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) } // Loop slides pi/3 window around feature point - for (ang1 = 0; ang1 < 2.0*CV_PI; ang1 += 0.15f) { - ang2 = (ang1 + CV_PI / 3.0f > 2.0*CV_PI ? ang1 - 5.0f*CV_PI / 3.0f : ang1 + CV_PI / 3.0f); + for (ang1 = 0; ang1 < 2.0f*CV_PI; ang1 += 0.15f) { + ang2 = (ang1 + (float)(CV_PI / 3.0) > (float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0)); sumX = sumY = 0.f; for (size_t k = 0; k < Ang.size(); ++k) { @@ -862,7 +862,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) sumY += resY[k]; } else if (ang2 < ang1 && - ((ang > 0 && ang < ang2) || (ang > ang1 && ang < 2.0*CV_PI))) { + ((ang > 0 && ang < ang2) || (ang > ang1 && ang < (float)(2.0*CV_PI)))) { sumX += resX[k]; sumY += resY[k]; } @@ -907,7 +907,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float yf = kpt.pt.y; xf = kpt.pt.x; level = kpt.class_id; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); // Calculate descriptor for this interest point for (int i = -pattern_size; i < pattern_size; i += sample_step) { @@ -921,13 +921,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float sample_y = k*scale + yf; sample_x = l*scale + xf; - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - .5f); + x1 = (int)(sample_x - .5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + .5f); + x2 = (int)(sample_x + .5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -938,13 +938,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Sum the derivatives to the cumulative descriptor dx += rx; @@ -1006,7 +1006,7 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); angle = kpt.angle; level = kpt.class_id; co = cos(angle); @@ -1024,13 +1024,13 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) sample_y = yf + (l*scale*co + k*scale*si); sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - .5f); + x1 = (int)(sample_x - .5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + .5f); + x2 = (int)(sample_x + .5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1041,13 +1041,13 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; @@ -1107,7 +1107,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa int dsize = 0, scale = 0, level = 0; // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + float cx = -0.5f, cy = 0.5f; // Set the descriptor size and the sample and pattern sizes dsize = 64; @@ -1117,7 +1117,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); level = kpt.class_id; i = -8; @@ -1128,13 +1128,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa j = -8; i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0f; + cy = -0.5f; while (j < pattern_size) { dx = dy = mdx = mdy = 0.0; - cy += 1.0; + cy += 1.0f; j = j - 4; ky = i + sample_step; @@ -1150,15 +1150,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa sample_x = l*scale + xf; //Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale); - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1169,13 +1169,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; rx = gauss_s1*rx; ry = gauss_s1*ry; @@ -1239,7 +1239,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) int dsize = 0, scale = 0, level = 0; // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + float cx = -0.5f, cy = 0.5f; // Set the descriptor size and the sample and pattern sizes dsize = 64; @@ -1249,7 +1249,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); angle = kpt.angle; level = kpt.class_id; co = cos(angle); @@ -1264,13 +1264,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) j = -8; i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0f; + cy = -0.5f; while (j < pattern_size) { dx = dy = mdx = mdy = 0.0; - cy += 1.0; + cy += 1.0f; j = j - 4; ky = i + sample_step; @@ -1287,14 +1287,14 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) sample_x = xf + (-l*scale*si + k*scale*co); // Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); - y1 = fRound(sample_y - .5); - x1 = fRound(sample_x - .5); + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale); + y1 = fRound(sample_y - 0.5f); + x1 = fRound(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1305,13 +1305,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis rry = gauss_s1*(rx*co + ry*si); @@ -1379,7 +1379,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); level = kpt.class_id; // Calculate descriptor for this interest point @@ -1395,13 +1395,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa sample_y = yf + l*scale; sample_x = xf + k*scale; - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1412,13 +1412,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; modg = pow(rx, 2) + pow(ry, 2); @@ -1428,25 +1428,25 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa res2 = *(evolution_[level].Lxx.ptr(y1)+x2); res3 = *(evolution_[level].Lxx.ptr(y2)+x1); res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lxy.ptr(y1)+x1); res2 = *(evolution_[level].Lxy.ptr(y1)+x2); res3 = *(evolution_[level].Lxy.ptr(y2)+x1); res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lyy.ptr(y1)+x1); res2 = *(evolution_[level].Lyy.ptr(y1)+x2); res3 = *(evolution_[level].Lyy.ptr(y2)+x1); res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); + lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); } else { lww = 0.0; @@ -1514,7 +1514,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); angle = kpt.angle; level = kpt.class_id; co = cos(angle); @@ -1533,13 +1533,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) sample_y = yf + (l*scale*co + k*scale*si); sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1550,13 +1550,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; modg = pow(rx, 2) + pow(ry, 2); @@ -1566,25 +1566,25 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) res2 = *(evolution_[level].Lxx.ptr(y1)+x2); res3 = *(evolution_[level].Lxx.ptr(y2)+x1); res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lxy.ptr(y1)+x1); res2 = *(evolution_[level].Lxy.ptr(y1)+x2); res3 = *(evolution_[level].Lxy.ptr(y2)+x1); res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lyy.ptr(y1)+x1); res2 = *(evolution_[level].Lyy.ptr(y1)+x2); res3 = *(evolution_[level].Lyy.ptr(y2)+x1); res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); + lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); } else { lww = 0.0; @@ -1652,7 +1652,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); level = kpt.class_id; // Calculate descriptor for this interest point @@ -1668,13 +1668,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa sample_y = k*scale + yf; sample_x = l*scale + xf; - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1685,13 +1685,13 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Sum the derivatives to the cumulative descriptor if (ry >= 0.0) { @@ -1772,7 +1772,7 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); angle = kpt.angle; level = kpt.class_id; co = cos(angle); @@ -1792,13 +1792,13 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) sample_y = yf + (l*scale*co + k*scale*si); sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1809,13 +1809,13 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis rry = rx*co + ry*si; @@ -1895,7 +1895,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo int dsize = 0, scale = 0, level = 0; // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + float cx = -0.5f, cy = 0.5f; // Set the descriptor size and the sample and pattern sizes dsize = 128; @@ -1905,7 +1905,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); level = kpt.class_id; i = -8; @@ -1917,15 +1917,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo j = -8; i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0f; + cy = -0.5f; while (j < pattern_size) { dxp = dxn = mdxp = mdxn = 0.0; dyp = dyn = mdyp = mdyn = 0.0; - cy += 1.0; + cy += 1.0f; j = j - 4; ky = i + sample_step; @@ -1941,15 +1941,15 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo sample_x = l*scale + xf; //Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50*scale); + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale); - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -1960,13 +1960,13 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; rx = gauss_s1*rx; ry = gauss_s1*ry; @@ -2051,7 +2051,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc int dsize = 0, scale = 0, level = 0; // Subregion centers for the 4x4 gaussian weighting - float cx = -0.5, cy = 0.5; + float cx = -0.5f, cy = 0.5f; // Set the descriptor size and the sample and pattern sizes dsize = 128; @@ -2061,7 +2061,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); angle = kpt.angle; level = kpt.class_id; co = cos(angle); @@ -2076,8 +2076,8 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc j = -8; i = i - 4; - cx += 1.0; - cy = -0.5; + cx += 1.0f; + cy = -0.5f; while (j < pattern_size) { @@ -2101,15 +2101,15 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc sample_x = xf + (-l*scale*si + k*scale*co); // Get the gaussian weighted x and y responses - gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5*scale); + gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale); - y1 = fRound(sample_y - .5); - x1 = fRound(sample_x - .5); + y1 = fRound(sample_y - 0.5f); + x1 = fRound(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -2120,13 +2120,13 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Get the x and y derivatives on the rotated axis rry = gauss_s1*(rx*co + ry*si); @@ -2217,7 +2217,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); level = kpt.class_id; // Calculate descriptor for this interest point @@ -2233,13 +2233,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo sample_y = k*scale + yf; sample_x = l*scale + xf; - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -2250,13 +2250,13 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; modg = pow(rx, 2) + pow(ry, 2); @@ -2266,25 +2266,25 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo res2 = *(evolution_[level].Lxx.ptr(y1)+x2); res3 = *(evolution_[level].Lxx.ptr(y2)+x1); res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lxy.ptr(y1)+x1); res2 = *(evolution_[level].Lxy.ptr(y1)+x2); res3 = *(evolution_[level].Lxy.ptr(y2)+x1); res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lyy.ptr(y1)+x1); res2 = *(evolution_[level].Lyy.ptr(y1)+x2); res3 = *(evolution_[level].Lyy.ptr(y2)+x1); res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); + lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); } else { lww = 0.0; @@ -2372,7 +2372,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc // Get the information from the keypoint yf = kpt.pt.y; xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0); + scale = fRound(kpt.size / 2.0f); angle = kpt.angle; level = kpt.class_id; co = cos(angle); @@ -2392,13 +2392,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc sample_y = yf + (l*scale*co + k*scale*si); sample_x = xf + (-l*scale*si + k*scale*co); - y1 = (int)(sample_y - .5); - x1 = (int)(sample_x - .5); + y1 = (int)(sample_y - 0.5f); + x1 = (int)(sample_x - 0.5f); checkDescriptorLimits(x1, y1, img_width_, img_height_); - y2 = (int)(sample_y + .5); - x2 = (int)(sample_x + .5); + y2 = (int)(sample_y + 0.5f); + x2 = (int)(sample_x + 0.5f); checkDescriptorLimits(x2, y2, img_width_, img_height_); @@ -2409,13 +2409,13 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc res2 = *(evolution_[level].Lx.ptr(y1)+x2); res3 = *(evolution_[level].Lx.ptr(y2)+x1); res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Ly.ptr(y1)+x1); res2 = *(evolution_[level].Ly.ptr(y1)+x2); res3 = *(evolution_[level].Ly.ptr(y2)+x1); res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; modg = pow(rx, 2) + pow(ry, 2); @@ -2424,25 +2424,25 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc res2 = *(evolution_[level].Lxx.ptr(y1)+x2); res3 = *(evolution_[level].Lxx.ptr(y2)+x1); res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lxy.ptr(y1)+x1); res2 = *(evolution_[level].Lxy.ptr(y1)+x2); res3 = *(evolution_[level].Lxy.ptr(y2)+x1); res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; res1 = *(evolution_[level].Lyy.ptr(y1)+x1); res2 = *(evolution_[level].Lyy.ptr(y1)+x2); res3 = *(evolution_[level].Lyy.ptr(y2)+x1); res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0 - fx)*(1.0 - fy)*res1 + fx*(1.0 - fy)*res2 + (1.0 - fx)*fy*res3 + fx*fy*res4; + ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); + lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); + lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); } else { lww = 0.0; @@ -2530,7 +2530,7 @@ void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv: AOS_Columns(Ldprev, c, stepsize); #endif - Ld = 0.5*(Lty_ + Ltx_.t()); + Ld = 0.5f*(Lty_ + Ltx_.t()); } //************************************************************************************* @@ -2567,7 +2567,7 @@ void KAZEFeatures::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float // a = 1 + t.*p; (p is -1*p) // b = -t.*q; - ay_ = 1.0 + stepsize*py_; // p is -1*p + ay_ = 1.0f + stepsize*py_; // p is -1*p by_ = -stepsize*qr_; // Do Thomas algorithm to solve the linear system of equations @@ -2607,7 +2607,7 @@ void KAZEFeatures::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const fl } // a = 1 + t.*p'; - ax_ = 1.0 + stepsize*px_.t(); + ax_ = 1.0f + stepsize*px_.t(); // b = -t.*q'; bx_ = -stepsize*qc_.t(); @@ -2697,15 +2697,15 @@ inline float getAngle(const float& x, const float& y) { } if (x < 0 && y >= 0) { - return CV_PI - atan(-y / x); + return (float)CV_PI - atan(-y / x); } if (x < 0 && y < 0) { - return CV_PI + atan(y / x); + return (float)CV_PI + atan(y / x); } if (x >= 0 && y < 0) { - return 2.0*CV_PI - atan(-y / x); + return 2.0f * (float)CV_PI - atan(-y / x); } return 0; @@ -2723,7 +2723,7 @@ inline float getAngle(const float& x, const float& y) { */ inline void clippingDescriptor(float *desc, const int& dsize, const int& niter, const float& ratio) { - float cratio = ratio / sqrt(dsize); + float cratio = ratio / sqrtf(static_cast(dsize)); float len = 0.0; for (int i = 0; i < niter; i++) { diff --git a/modules/features2d/src/kaze/KAZE.h b/modules/features2d/src/kaze/KAZE.h index 3e86ab2d86..71b44340e4 100644 --- a/modules/features2d/src/kaze/KAZE.h +++ b/modules/features2d/src/kaze/KAZE.h @@ -54,13 +54,13 @@ private: std::vector nsteps_; // Vector of number of steps per cycle // Computation times variables in ms - double tkcontrast_; // Kcontrast factor computation - double tnlscale_; // Nonlinear Scale space generation - double tdetector_; // Feature detector - double tmderivatives_; // Multiscale derivatives computation - double tdresponse_; // Detector response computation - double tdescriptor_; // Feature descriptor - double tsubpixel_; // Subpixel refinement + //double tkcontrast_; // Kcontrast factor computation + //double tnlscale_; // Nonlinear Scale space generation + //double tdetector_; // Feature detector + //double tmderivatives_; // Multiscale derivatives computation + //double tdresponse_; // Detector response computation + //double tdescriptor_; // Feature descriptor + //double tsubpixel_; // Subpixel refinement // Some auxiliary variables used in the AOS step cv::Mat Ltx_, Lty_, px_, py_, ax_, ay_, bx_, by_, qr_, qc_; @@ -243,33 +243,33 @@ public: return use_extended_; } - float Get_Time_KContrast(void) { - return tkcontrast_; - } + //float Get_Time_KContrast(void) { + // return tkcontrast_; + //} - float Get_Time_NLScale(void) { - return tnlscale_; - } + //float Get_Time_NLScale(void) { + // return tnlscale_; + //} - float Get_Time_Detector(void) { - return tdetector_; - } + //float Get_Time_Detector(void) { + // return tdetector_; + //} - float Get_Time_Multiscale_Derivatives(void) { - return tmderivatives_; - } + //float Get_Time_Multiscale_Derivatives(void) { + // return tmderivatives_; + //} - float Get_Time_Detector_Response(void) { - return tdresponse_; - } + //float Get_Time_Detector_Response(void) { + // return tdresponse_; + //} - float Get_Time_Descriptor(void) { - return tdescriptor_; - } + //float Get_Time_Descriptor(void) { + // return tdescriptor_; + //} - float Get_Time_Subpixel(void) { - return tsubpixel_; - } + //float Get_Time_Subpixel(void) { + // return tsubpixel_; + //} }; //************************************************************************************* diff --git a/modules/features2d/src/kaze/config.h b/modules/features2d/src/kaze/config.h index 2615f5fb64..aa2fed5410 100644 --- a/modules/features2d/src/kaze/config.h +++ b/modules/features2d/src/kaze/config.h @@ -63,7 +63,7 @@ struct KAZEOptions { KAZEOptions() { // Load the default options soffset = DEFAULT_SCALE_OFFSET; - omax = DEFAULT_OCTAVE_MAX; + omax = static_cast(DEFAULT_OCTAVE_MAX); nsublevels = DEFAULT_NSUBLEVELS; dthreshold = DEFAULT_DETECTOR_THRESHOLD; use_fed = DEFAULT_USE_FED; From 3c596184e4b86b67689f39ddb1bdab23af28254f Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 15:01:42 +0300 Subject: [PATCH 22/52] Added copyright headers for KAZE and AKAZE wrappers --- modules/features2d/src/akaze.cpp | 50 ++++++++++++++++++++++++++++++++ modules/features2d/src/kaze.cpp | 49 +++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index c41c2f98db..3c918bfba8 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -1,3 +1,53 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* +OpenCV wrapper of reference implementation of +[1] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. +Pablo F. Alcantarilla, J. Nuevo and Adrien Bartoli. +In British Machine Vision Conference (BMVC), Bristol, UK, September 2013 +http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pdf +@author Eugene Khvedchenya +*/ + #include "precomp.hpp" #include "akaze/AKAZE.h" diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index e49e1d2d78..52fef13661 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -1,3 +1,52 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* +OpenCV wrapper of reference implementation of +[1] KAZE Features. Pablo F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. +In European Conference on Computer Vision (ECCV), Fiorenze, Italy, October 2012 +http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pdf +@author Eugene Khvedchenya +*/ + #include "precomp.hpp" #include "kaze/KAZE.h" From f9422f60a6e86498f712bde91d8b837996fa96ee Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 15:13:42 +0300 Subject: [PATCH 23/52] Fix Fix casting from/to int/float that caused lot of compiler warnings. --- .../src/kaze/nldiffusion_functions.cpp | 28 ++++++++----------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index d76a3c40f7..ce3bef1758 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -47,7 +47,7 @@ void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, // Compute an appropriate kernel size according to the specified sigma if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = ceil(2.0*(1.0 + (sigma-0.8)/(0.3))); + ksize_x_ = (size_t)ceil(2.0f*(1.0f + (sigma-0.8f)/(0.3f))); ksize_y_ = ksize_x_; } @@ -111,7 +111,7 @@ void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, fl Mat modg; cv::pow((Lx.mul(Lx) + Ly.mul(Ly))/(k*k),4,modg); cv::exp(-3.315/modg, dst); - dst = 1.0 - dst; + dst = 1.0f - dst; } //************************************************************************************* @@ -138,18 +138,13 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, float hmax = 0.0; // Create the array for the histogram - float *hist = new float[nbins]; + std::vector hist(nbins, 0); // Create the matrices Mat gaussian = Mat::zeros(img.rows,img.cols,CV_32F); Mat Lx = Mat::zeros(img.rows,img.cols,CV_32F); Mat Ly = Mat::zeros(img.rows,img.cols,CV_32F); - // Set the histogram to zero, just in case - for (int i = 0; i < nbins; i++) { - hist[i] = 0.0; - } - // Perform the Gaussian convolution gaussian_2D_convolution(img,gaussian,ksize_x,ksize_y,gscale); @@ -180,7 +175,7 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, // Find the correspondent bin if (modg != 0.0) { - nbin = floor(nbins*(modg/hmax)); + nbin = (int)floor(nbins*(modg/hmax)); if (nbin == nbins) { nbin--; @@ -207,7 +202,6 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, kperc = hmax*((float)(k)/(float)nbins); } - delete hist; return kperc; } @@ -256,8 +250,8 @@ void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, Mat kx = _kx.getMat(); Mat ky = _ky.getMat(); - float w = 10.0/3.0; - float norm = 1.0/(2.0*scale*(w+2.0)); + float w = 10.0f/3.0f; + float norm = 1.0f/(2.0f*scale*(w+2.0f)); for (int k = 0; k < 2; k++) { Mat* kernel = k == 0 ? &kx : &ky; @@ -300,7 +294,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsi float xneg = ((*(c.ptr(i)+j-1))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i)+j-1))); float ypos = ((*(c.ptr(i)+j))+(*(c.ptr(i+1)+j)))*((*(Ld.ptr(i+1)+j))-(*(Ld.ptr(i)+j))); float yneg = ((*(c.ptr(i-1)+j))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i-1)+j))); - *(Lstep.ptr(i)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); } } @@ -309,7 +303,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsi float xneg = ((*(c.ptr(0)+j-1))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j-1))); float ypos = ((*(c.ptr(0)+j))+(*(c.ptr(1)+j)))*((*(Ld.ptr(1)+j))-(*(Ld.ptr(0)+j))); float yneg = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j))); - *(Lstep.ptr(0)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + *(Lstep.ptr(0)+j) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); } for (int j = 1; j < Lstep.cols-1; j++) { @@ -317,7 +311,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsi float xneg = ((*(c.ptr(Lstep.rows-1)+j-1))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j-1))); float ypos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j))); float yneg = ((*(c.ptr(Lstep.rows-2)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-2)+j))); - *(Lstep.ptr(Lstep.rows-1)+j) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + *(Lstep.ptr(Lstep.rows-1)+j) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); } for (int i = 1; i < Lstep.rows-1; i++) { @@ -325,7 +319,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsi float xneg = ((*(c.ptr(i)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i)))); float ypos = ((*(c.ptr(i)))+(*(c.ptr(i+1))))*((*(Ld.ptr(i+1)))-(*(Ld.ptr(i)))); float yneg = ((*(c.ptr(i-1)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i-1)))); - *(Lstep.ptr(i)) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + *(Lstep.ptr(i)) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); } for (int i = 1; i < Lstep.rows-1; i++) { @@ -333,7 +327,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsi float xneg = ((*(c.ptr(i)+Lstep.cols-2))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-2))); float ypos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i+1)+Lstep.cols-1)))*((*(Ld.ptr(i+1)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); float yneg = ((*(c.ptr(i-1)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i-1)+Lstep.cols-1))); - *(Lstep.ptr(i)+Lstep.cols-1) = 0.5*stepsize*(xpos-xneg + ypos-yneg); + *(Lstep.ptr(i)+Lstep.cols-1) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); } Ld = Ld + Lstep; From f9d3c49023ffd312f954653c9ea43a8d7bcf265c Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 15:14:03 +0300 Subject: [PATCH 24/52] Bugfix: wrong variable name --- modules/features2d/src/akaze/AKAZE.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 94b50eb562..75c1fc6515 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -422,7 +422,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { } if (is_repeated == false) - kpts.push_back(point); + kpts.push_back(pt); } //t2 = cv::getTickCount(); From 2162aab0e9b341194ecfa414f63f553408f5ad56 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 15:17:09 +0300 Subject: [PATCH 25/52] Remove AKAZEFeatures desctructor --- modules/features2d/src/akaze/AKAZE.cpp | 9 --------- modules/features2d/src/akaze/AKAZE.h | 3 --- 2 files changed, 12 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index 75c1fc6515..a2cd507ba7 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -32,15 +32,6 @@ AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { Allocate_Memory_Evolution(); } -/* ************************************************************************* */ -/** - * @brief AKAZEFeatures destructor - */ -AKAZEFeatures::~AKAZEFeatures(void) { - - evolution_.clear(); -} - /* ************************************************************************* */ /** * @brief This method allocates the memory for the nonlinear diffusion evolution diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZE.h index cabfae8819..f1bd7250bf 100644 --- a/modules/features2d/src/akaze/AKAZE.h +++ b/modules/features2d/src/akaze/AKAZE.h @@ -38,9 +38,6 @@ public: /// Constructor with input arguments AKAZEFeatures(const AKAZEOptions& options); - /// Destructor - ~AKAZEFeatures(); - /// Scale Space methods void Allocate_Memory_Evolution(); int Create_Nonlinear_Scale_Space(const cv::Mat& img); From cc0a94c536972447a6904cf4161a77fe8b1f7c6e Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 16:23:58 +0300 Subject: [PATCH 26/52] Fix "conditional expression constant" warning --- modules/features2d/src/akaze.cpp | 8 ++++---- modules/features2d/src/kaze.cpp | 8 ++++---- modules/features2d/src/kaze/KAZE.cpp | 30 +++++++++++++++------------- modules/features2d/src/kaze/KAZE.h | 1 + 4 files changed, 25 insertions(+), 22 deletions(-) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 3c918bfba8..7a0ac729bd 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -150,8 +150,8 @@ namespace cv impl.Compute_Descriptors(keypoints, desc); - CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); - CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); + CV_Assert((!desc.rows || desc.cols == descriptorSize())); + CV_Assert((!desc.rows || (desc.type() & descriptorType()))); } void AKAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const @@ -196,7 +196,7 @@ namespace cv impl.Create_Nonlinear_Scale_Space(img1_32); impl.Compute_Descriptors(keypoints, desc); - CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); - CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); + CV_Assert((!desc.rows || desc.cols == descriptorSize())); + CV_Assert((!desc.rows || (desc.type() & descriptorType()))); } } \ No newline at end of file diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 52fef13661..d975eaeb04 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -119,8 +119,8 @@ namespace cv impl.Feature_Description(keypoints, desc); - CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); - CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); + CV_Assert((!desc.rows || desc.cols == descriptorSize())); + CV_Assert((!desc.rows || (desc.type() & descriptorType()))); } void KAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const @@ -167,7 +167,7 @@ namespace cv impl.Create_Nonlinear_Scale_Space(img1_32); impl.Feature_Description(keypoints, desc); - CV_Assert((!desc.rows || desc.cols == descriptorSize()) && "Descriptor size does not match expected"); - CV_Assert((!desc.rows || (desc.type() & descriptorType())) && "Descriptor type does not match expected"); + CV_Assert((!desc.rows || desc.cols == descriptorSize())); + CV_Assert((!desc.rows || (desc.type() & descriptorType()))); } } \ No newline at end of file diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZE.cpp index 36e9690095..f8625f9833 100644 --- a/modules/features2d/src/kaze/KAZE.cpp +++ b/modules/features2d/src/kaze/KAZE.cpp @@ -51,6 +51,8 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { use_fed_ = options.use_fed; use_upright_ = options.upright; use_extended_ = options.extended; + use_normalization = USE_CLIPPING_NORMALIZATION; + kcontrast_ = DEFAULT_KCONTRAST; ncycles_ = 0; reordering_ = true; @@ -232,9 +234,9 @@ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentil // cout << "Computing Kcontrast factor." << endl; //} - if (COMPUTE_KCONTRAST) { + //if (COMPUTE_KCONTRAST) { kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0); - } + //} //if (verbosity_ == true) { // cout << "kcontrast = " << kcontrast_ << endl; @@ -972,7 +974,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -1079,7 +1081,7 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -1211,7 +1213,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -1344,7 +1346,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -1479,7 +1481,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -1617,7 +1619,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } @@ -1737,7 +1739,7 @@ void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, floa desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -1865,7 +1867,7 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -2021,7 +2023,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -2182,7 +2184,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -2335,7 +2337,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } @@ -2493,7 +2495,7 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc desc[i] /= len; } - if (USE_CLIPPING_NORMALIZATION == true) { + if (use_normalization == true) { clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); } } diff --git a/modules/features2d/src/kaze/KAZE.h b/modules/features2d/src/kaze/KAZE.h index 71b44340e4..31507a6024 100644 --- a/modules/features2d/src/kaze/KAZE.h +++ b/modules/features2d/src/kaze/KAZE.h @@ -43,6 +43,7 @@ private: bool use_fed_; // Set to true in case we want to use FED for the nonlinear diffusion filtering. Set false for using AOS bool use_upright_; // Set to true in case we want to use the upright version of the descriptors bool use_extended_; // Set to true in case we want to use the extended version of the descriptors + bool use_normalization; // Vector of keypoint vectors for finding extrema in multiple threads std::vector > kpts_par_; From 599bcfb5919653ac370c4d6042df002261e114e2 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 16:24:35 +0300 Subject: [PATCH 27/52] Fix size_t to int conversion --- modules/features2d/src/akaze/AKAZE.cpp | 44 +++++++++++++------------- modules/features2d/src/kaze/KAZE.cpp | 40 +++++++++++------------ modules/features2d/src/kaze/config.h | 16 +++++----- 3 files changed, 50 insertions(+), 50 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZE.cpp index a2cd507ba7..84d33d5579 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZE.cpp @@ -234,7 +234,7 @@ void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { //t1 = cv::getTickCount(); - cv::parallel_for_(cv::Range(0, evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_)); /* for (int i = 0; i < (int)(evolution_.size()); i++) { @@ -334,8 +334,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { is_extremum = true; point.response = fabs(value); point.size = evolution_[i].esigma*options_.derivative_factor; - point.octave = evolution_[i].octave; - point.class_id = i; + point.octave = (int)evolution_[i].octave; + point.class_id = (int)i; ratio = pow(2.f, point.octave); sigma_size_ = fRound(point.size / ratio); point.pt.x = static_cast(jx); @@ -349,7 +349,7 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { dist = sqrt(pow(point.pt.x*ratio - kpts_aux[ik].pt.x, 2) + pow(point.pt.y*ratio - kpts_aux[ik].pt.y, 2)); if (dist <= point.size) { if (point.response > kpts_aux[ik].response) { - id_repeated = ik; + id_repeated = (int)ik; is_repeated = true; } else { @@ -501,7 +501,7 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, float mdist) const { vector aux; - vector to_delete; + vector to_delete; float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; bool found = false; @@ -527,7 +527,7 @@ void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts for (size_t i = 0; i < kpts.size(); i++) { found = false; for (size_t j = 0; j < to_delete.size(); j++) { - if (i == (size_t)(to_delete[j])) { + if (i == to_delete[j]) { found = true; break; } @@ -805,17 +805,17 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat // Allocate memory for the matrix with the descriptors if (options_.descriptor < MLDB_UPRIGHT) { - desc = cv::Mat::zeros(kpts.size(), 64, CV_32FC1); + desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); } else { // We use the full length binary descriptor -> 486 bits if (options_.descriptor_size == 0) { int t = (6 + 36 + 120)*options_.descriptor_channels; - desc = cv::Mat::zeros(kpts.size(), (int)ceil(t / 8.), CV_8UC1); + desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1); } else { // We use the random bit selection length binary descriptor - desc = cv::Mat::zeros(kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1); + desc = cv::Mat::zeros((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.), CV_8UC1); } } @@ -823,7 +823,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat case SURF_UPRIGHT: // Upright descriptors, not invariant to rotation { - cv::parallel_for_(cv::Range(0, kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_, options_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Get_SURF_Descriptor_Upright_64(kpts[i], desc.ptr(i)); @@ -832,7 +832,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat break; case SURF: { - cv::parallel_for_(cv::Range(0, kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Compute_Main_Orientation(kpts[i]); @@ -842,7 +842,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat break; case MSURF_UPRIGHT: // Upright descriptors, not invariant to rotation { - cv::parallel_for_(cv::Range(0, kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); @@ -851,7 +851,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat break; case MSURF: { - cv::parallel_for_(cv::Range(0, kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Compute_Main_Orientation(kpts[i]); @@ -862,9 +862,9 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat case MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation { if (options_.descriptor_size == 0) - cv::parallel_for_(cv::Range(0, kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); else - cv::parallel_for_(cv::Range(0, kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // if (options_.descriptor_size == 0) @@ -877,9 +877,9 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat case MLDB: { if (options_.descriptor_size == 0) - cv::parallel_for_(cv::Range(0, kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); else - cv::parallel_for_(cv::Range(0, kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Compute_Main_Orientation(kpts[i]); @@ -2145,7 +2145,7 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int } ssz *= nchannels; - CV_Assert(nbits <= ssz && "descriptor size can't be bigger than full descriptor"); + CV_Assert((nbits <= ssz) && "descriptor size can't be bigger than full descriptor"); // Since the full descriptor is usually under 10k elements, we pick // the selection from the full matrix. We take as many samples per @@ -2153,7 +2153,7 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int // take the two samples involved and put them in the sampling list Mat_ fullM(ssz / nchannels, 5); - for (size_t i = 0, c = 0; i < 3; i++) { + for (int i = 0, c = 0; i < 3; i++) { int gdiv = i + 2; //grid divisions, per row int gsz = gdiv*gdiv; int psz = (int)ceil(2.f*pattern_size / (float)gdiv); @@ -2175,13 +2175,13 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int // Select some samples. A sample includes all channels int count = 0; - size_t npicks = (size_t)ceil(nbits / (float)nchannels); + int npicks = (int)ceil(nbits / (float)nchannels); Mat_ samples(29, 3); Mat_ fullcopy = fullM.clone(); samples = -1; - for (size_t i = 0; i < npicks; i++) { - size_t k = rand() % (fullM.rows - i); + for (int i = 0; i < npicks; i++) { + int k = rand() % (fullM.rows - i); if (i < 6) { // Force use of the coarser grid values and comparisons k = i; diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZE.cpp index f8625f9833..f1d0dc7039 100644 --- a/modules/features2d/src/kaze/KAZE.cpp +++ b/modules/features2d/src/kaze/KAZE.cpp @@ -381,20 +381,20 @@ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) #ifdef _OPENMP #pragma omp parallel for #endif - for (size_t i = 1; i < evolution_.size() - 1; i++) { + for (int i = 1; i < evolution_.size() - 1; i++) { Find_Extremum_Threading(i); } // Now fill the vector of keypoints!!! - for (size_t i = 0; i < kpts_par_.size(); i++) { - for (size_t j = 0; j < kpts_par_[i].size(); j++) { + for (int i = 0; i < kpts_par_.size(); i++) { + for (int j = 0; j < kpts_par_[i].size(); j++) { level = i + 1; is_extremum = true; is_repeated = false; is_out = false; // Check in case we have the same point as maxima in previous evolution levels - for (size_t ik = 0; ik < kpts.size(); ik++) { + for (int ik = 0; ik < kpts.size(); ik++) { if (kpts[ik].class_id == level || kpts[ik].class_id == level + 1 || kpts[ik].class_id == level - 1) { dist = pow(kpts_par_[i][j].pt.x - kpts[ik].pt.x, 2) + pow(kpts_par_[i][j].pt.y - kpts[ik].pt.y, 2); @@ -610,7 +610,7 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { void KAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, const float& mdist) { vector aux; - vector to_delete; + vector to_delete; float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; bool found = false; @@ -639,7 +639,7 @@ void KAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, found = false; for (size_t j = 0; j < to_delete.size(); j++) { - if (i == (size_t)(to_delete[j])) { + if (i == to_delete[j]) { found = true; break; } @@ -670,10 +670,10 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat // Allocate memory for the matrix of descriptors if (use_extended_ == true) { - desc = Mat::zeros(kpts.size(), 128, CV_32FC1); + desc = Mat::zeros((int)kpts.size(), 128, CV_32FC1); } else { - desc = Mat::zeros(kpts.size(), 64, CV_32FC1); + desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1); } if (use_upright_ == true) { @@ -684,7 +684,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { kpts[i].angle = 0.0; - Get_SURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + Get_SURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 1) { @@ -693,7 +693,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { kpts[i].angle = 0.0; - Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 2) { @@ -702,7 +702,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { kpts[i].angle = 0.0; - Get_GSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); + Get_GSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); } } } @@ -714,7 +714,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { kpts[i].angle = 0.0; - Get_SURF_Upright_Descriptor_128(kpts[i], desc.ptr(i)); + Get_SURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 1) { @@ -723,7 +723,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { kpts[i].angle = 0.0; - Get_MSURF_Upright_Descriptor_128(kpts[i], desc.ptr(i)); + Get_MSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 2) { @@ -732,7 +732,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { kpts[i].angle = 0.0; - Get_GSURF_Upright_Descriptor_128(kpts[i], desc.ptr(i)); + Get_GSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); } } } @@ -745,7 +745,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { Compute_Main_Orientation_SURF(kpts[i]); - Get_SURF_Descriptor_64(kpts[i], desc.ptr(i)); + Get_SURF_Descriptor_64(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 1) { @@ -754,7 +754,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { Compute_Main_Orientation_SURF(kpts[i]); - Get_MSURF_Descriptor_64(kpts[i], desc.ptr(i)); + Get_MSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 2) { @@ -763,7 +763,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { Compute_Main_Orientation_SURF(kpts[i]); - Get_GSURF_Descriptor_64(kpts[i], desc.ptr(i)); + Get_GSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); } } } @@ -774,7 +774,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { Compute_Main_Orientation_SURF(kpts[i]); - Get_SURF_Descriptor_128(kpts[i], desc.ptr(i)); + Get_SURF_Descriptor_128(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 1) { @@ -783,7 +783,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { Compute_Main_Orientation_SURF(kpts[i]); - Get_MSURF_Descriptor_128(kpts[i], desc.ptr(i)); + Get_MSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); } } else if (descriptor_mode_ == 2) { @@ -792,7 +792,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat #endif for (size_t i = 0; i < kpts.size(); i++) { Compute_Main_Orientation_SURF(kpts[i]); - Get_GSURF_Descriptor_128(kpts[i], desc.ptr(i)); + Get_GSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); } } } diff --git a/modules/features2d/src/kaze/config.h b/modules/features2d/src/kaze/config.h index aa2fed5410..1a3d02d657 100644 --- a/modules/features2d/src/kaze/config.h +++ b/modules/features2d/src/kaze/config.h @@ -30,11 +30,11 @@ #define NMAX_CHAR 400 // Some default options -static const float DEFAULT_SCALE_OFFSET = 1.60; // Base scale offset (sigma units) -static const float DEFAULT_OCTAVE_MAX = 4.0; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) +static const float DEFAULT_SCALE_OFFSET = 1.60f; // Base scale offset (sigma units) +static const float DEFAULT_OCTAVE_MAX = 4.0f; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) static const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level -static const float DEFAULT_DETECTOR_THRESHOLD = 0.001; // Detector response threshold to accept point -static const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001; // Minimum Detector response threshold to accept point +static const float DEFAULT_DETECTOR_THRESHOLD = 0.001f; // Detector response threshold to accept point +static const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001f; // Minimum Detector response threshold to accept point static const int DEFAULT_DESCRIPTOR_MODE = 1; // Descriptor Mode 0->SURF, 1->M-SURF static const bool DEFAULT_USE_FED = true; // 0->AOS, 1->FED static const bool DEFAULT_UPRIGHT = false; // Upright descriptors, not invariant to rotation @@ -45,14 +45,14 @@ static const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image w static const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints // Some important configuration variables -static const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0; -static const float DEFAULT_KCONTRAST = .01; -static const float KCONTRAST_PERCENTILE = 0.7; +static const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0f; +static const float DEFAULT_KCONTRAST = 0.01f; +static const float KCONTRAST_PERCENTILE = 0.7f; static const int KCONTRAST_NBINS = 300; static const bool COMPUTE_KCONTRAST = true; static const int DEFAULT_DIFFUSIVITY_TYPE = 1; // 0 -> PM G1, 1 -> PM G2, 2 -> Weickert static const bool USE_CLIPPING_NORMALIZATION = false; -static const float CLIPPING_NORMALIZATION_RATIO = 1.6; +static const float CLIPPING_NORMALIZATION_RATIO = 1.6f; static const int CLIPPING_NORMALIZATION_NITER = 5; //************************************************************************************* From 566229431999869eaed6c742b80419422c687069 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 16:34:35 +0300 Subject: [PATCH 28/52] Rename KAZE to KAZEFeatures to fix MSVS x64 compiler error (Duplicate file name confused it) --- modules/features2d/src/akaze.cpp | 2 +- .../src/akaze/{AKAZE.cpp => AKAZEFeatures.cpp} | 2 +- .../src/akaze/{AKAZE.h => AKAZEFeatures.h} | 0 modules/features2d/src/kaze.cpp | 2 +- .../src/kaze/{KAZE.cpp => KAZEFeatures.cpp} | 12 ++++++------ .../features2d/src/kaze/{KAZE.h => KAZEFeatures.h} | 0 6 files changed, 9 insertions(+), 9 deletions(-) rename modules/features2d/src/akaze/{AKAZE.cpp => AKAZEFeatures.cpp} (99%) rename modules/features2d/src/akaze/{AKAZE.h => AKAZEFeatures.h} (100%) rename modules/features2d/src/kaze/{KAZE.cpp => KAZEFeatures.cpp} (99%) rename modules/features2d/src/kaze/{KAZE.h => KAZEFeatures.h} (100%) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 7a0ac729bd..7b028cca8b 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -49,7 +49,7 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd */ #include "precomp.hpp" -#include "akaze/AKAZE.h" +#include "akaze/AKAZEFeatures.h" namespace cv { diff --git a/modules/features2d/src/akaze/AKAZE.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp similarity index 99% rename from modules/features2d/src/akaze/AKAZE.cpp rename to modules/features2d/src/akaze/AKAZEFeatures.cpp index 84d33d5579..527abadc30 100644 --- a/modules/features2d/src/akaze/AKAZE.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -6,7 +6,7 @@ * @author Pablo F. Alcantarilla, Jesus Nuevo */ -#include "AKAZE.h" +#include "AKAZEFeatures.h" #include "fed.h" #include "nldiffusion_functions.h" diff --git a/modules/features2d/src/akaze/AKAZE.h b/modules/features2d/src/akaze/AKAZEFeatures.h similarity index 100% rename from modules/features2d/src/akaze/AKAZE.h rename to modules/features2d/src/akaze/AKAZEFeatures.h diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index d975eaeb04..e5b935437e 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -48,7 +48,7 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd */ #include "precomp.hpp" -#include "kaze/KAZE.h" +#include "kaze/KAZEFeatures.h" namespace cv { diff --git a/modules/features2d/src/kaze/KAZE.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp similarity index 99% rename from modules/features2d/src/kaze/KAZE.cpp rename to modules/features2d/src/kaze/KAZEFeatures.cpp index f1d0dc7039..0fe41aeaa7 100644 --- a/modules/features2d/src/kaze/KAZE.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -14,14 +14,14 @@ //============================================================================= /** - * @file KAZE.cpp + * @file KAZEFeatures.cpp * @brief Main class for detecting and describing features in a nonlinear * scale space * @date Jan 21, 2012 * @author Pablo F. Alcantarilla */ -#include "KAZE.h" +#include "KAZEFeatures.h" // Namespaces using namespace std; @@ -381,20 +381,20 @@ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) #ifdef _OPENMP #pragma omp parallel for #endif - for (int i = 1; i < evolution_.size() - 1; i++) { + for (int i = 1; i < (int)evolution_.size() - 1; i++) { Find_Extremum_Threading(i); } // Now fill the vector of keypoints!!! - for (int i = 0; i < kpts_par_.size(); i++) { - for (int j = 0; j < kpts_par_[i].size(); j++) { + for (int i = 0; i < (int)kpts_par_.size(); i++) { + for (int j = 0; j < (int)kpts_par_[i].size(); j++) { level = i + 1; is_extremum = true; is_repeated = false; is_out = false; // Check in case we have the same point as maxima in previous evolution levels - for (int ik = 0; ik < kpts.size(); ik++) { + for (int ik = 0; ik < (int)kpts.size(); ik++) { if (kpts[ik].class_id == level || kpts[ik].class_id == level + 1 || kpts[ik].class_id == level - 1) { dist = pow(kpts_par_[i][j].pt.x - kpts[ik].pt.x, 2) + pow(kpts_par_[i][j].pt.y - kpts[ik].pt.y, 2); diff --git a/modules/features2d/src/kaze/KAZE.h b/modules/features2d/src/kaze/KAZEFeatures.h similarity index 100% rename from modules/features2d/src/kaze/KAZE.h rename to modules/features2d/src/kaze/KAZEFeatures.h From a941d25f6dac0bee68549a257cfd2584556eea88 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 16:46:09 +0300 Subject: [PATCH 29/52] Fix size_t to int conversion --- .../src/akaze/nldiffusion_functions.cpp | 33 ++++++++----------- modules/features2d/src/kaze/fed.cpp | 8 ++--- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/modules/features2d/src/akaze/nldiffusion_functions.cpp b/modules/features2d/src/akaze/nldiffusion_functions.cpp index 5300223fc3..31db4f101b 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/akaze/nldiffusion_functions.cpp @@ -36,11 +36,11 @@ using namespace cv; void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksize_x, const size_t& ksize_y, const float& sigma) { - size_t ksize_x_ = 0, ksize_y_ = 0; + int ksize_x_ = 0, ksize_y_ = 0; // Compute an appropriate kernel size according to the specified sigma if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = ceil(2.0*(1.0 + (sigma - 0.8) / (0.3))); + ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f))); ksize_y_ = ksize_x_; } @@ -158,17 +158,13 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, float hmax = 0.0; // Create the array for the histogram - float *hist = new float[nbins]; + std::vector hist(nbins, 0); // Create the matrices cv::Mat gaussian = cv::Mat::zeros(img.rows, img.cols, CV_32F); cv::Mat Lx = cv::Mat::zeros(img.rows, img.cols, CV_32F); cv::Mat Ly = cv::Mat::zeros(img.rows, img.cols, CV_32F); - // Set the histogram to zero - for (size_t i = 0; i < nbins; i++) - hist[i] = 0.0; - // Perform the Gaussian convolution gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); @@ -199,7 +195,7 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, // Find the correspondent bin if (modg != 0.0) { - nbin = floor(nbins*(modg / hmax)); + nbin = (size_t)floor(nbins*(modg / hmax)); if (nbin == nbins) { nbin--; @@ -219,13 +215,12 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, } if (nelements < nthreshold) { - kperc = 0.03; + kperc = 0.03f; } else { kperc = hmax*((float)(k) / (float)nbins); } - delete[] hist; return kperc; } @@ -268,7 +263,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); - *(Lstep.ptr(i)+j) = 0.5*stepsize*(xpos - xneg + ypos - yneg); + *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); } } @@ -276,7 +271,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); float xneg = ((*(c.ptr(0) + j - 1)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j - 1))); float ypos = ((*(c.ptr(0) + j)) + (*(c.ptr(1) + j)))*((*(Ld.ptr(1) + j)) - (*(Ld.ptr(0) + j))); - *(Lstep.ptr(0) + j) = 0.5*stepsize*(xpos - xneg + ypos); + *(Lstep.ptr(0) + j) = 0.5f*stepsize*(xpos - xneg + ypos); } for (int j = 1; j < Lstep.cols - 1; j++) { @@ -284,7 +279,7 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& float xneg = ((*(c.ptr(Lstep.rows - 1) + j - 1)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j - 1))); float ypos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j))); float yneg = ((*(c.ptr(Lstep.rows - 2) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 2) + j))); - *(Lstep.ptr(Lstep.rows - 1) + j) = 0.5*stepsize*(xpos - xneg + ypos - yneg); + *(Lstep.ptr(Lstep.rows - 1) + j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); } for (int i = 1; i < Lstep.rows - 1; i++) { @@ -292,14 +287,14 @@ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& float xneg = ((*(c.ptr(i))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i)))); float ypos = ((*(c.ptr(i))) + (*(c.ptr(i + 1))))*((*(Ld.ptr(i + 1))) - (*(Ld.ptr(i)))); float yneg = ((*(c.ptr(i - 1))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i - 1)))); - *(Lstep.ptr(i)) = 0.5*stepsize*(xpos - xneg + ypos - yneg); + *(Lstep.ptr(i)) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); } for (int i = 1; i < Lstep.rows - 1; i++) { float xneg = ((*(c.ptr(i)+Lstep.cols - 2)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 2))); float ypos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i + 1) + Lstep.cols - 1)))*((*(Ld.ptr(i + 1) + Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); float yneg = ((*(c.ptr(i - 1) + Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i - 1) + Lstep.cols - 1))); - *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5*stepsize*(-xneg + ypos - yneg); + *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5f*stepsize*(-xneg + ypos - yneg); } Ld = Ld + Lstep; @@ -318,7 +313,7 @@ void downsample_image(const cv::Mat& src, cv::Mat& dst) { for (i1 = 1; i1 < src.rows; i1 += 2) { j2 = 0; for (j1 = 1; j1 < src.cols; j1 += 2) { - *(dst.ptr(i2)+j2) = 0.5*(*(src.ptr(i1)+j1)) + 0.25*(*(src.ptr(i1)+j1 - 1) + *(src.ptr(i1)+j1 + 1)); + *(dst.ptr(i2)+j2) = 0.5f*(*(src.ptr(i1)+j1)) + 0.25f*(*(src.ptr(i1)+j1 - 1) + *(src.ptr(i1)+j1 + 1)); j2++; } @@ -352,7 +347,7 @@ void halfsample_image(const cv::Mat& src, cv::Mat& dst) { void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, const size_t& dx, const size_t& dy, const size_t& scale) { - const int ksize = 3 + 2 * (scale - 1); + const int ksize = 3 + 2 * ( (int)scale - 1); // The usual Scharr kernel if (scale == 1) { @@ -365,8 +360,8 @@ void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, Mat kx = kx_.getMat(); Mat ky = ky_.getMat(); - float w = 10.0 / 3.0; - float norm = 1.0 / (2.0*scale*(w + 2.0)); + float w = 10.0f / 3.0f; + float norm = 1.0f / (2.0f*scale*(w + 2.0f)); for (int k = 0; k < 2; k++) { Mat* kernel = k == 0 ? &kx : &ky; diff --git a/modules/features2d/src/kaze/fed.cpp b/modules/features2d/src/kaze/fed.cpp index f07d072d61..7c2588559d 100644 --- a/modules/features2d/src/kaze/fed.cpp +++ b/modules/features2d/src/kaze/fed.cpp @@ -72,8 +72,8 @@ int fed_tau_by_cycle_time(const float& t, const float& tau_max, float scale = 0.0; // Ratio of t we search to maximal t // Compute necessary number of time steps - n = (int)(ceilf(sqrtf(3.0*t/tau_max+0.25f)-0.5f-1.0e-8f)+ 0.5f); - scale = 3.0*t/(tau_max*(float)(n*(n+1))); + n = (int)(ceilf(sqrtf(3.0f*t/tau_max+0.25f)-0.5f-1.0e-8f)+ 0.5f); + scale = 3.0f*t/(tau_max*(float)(n*(n+1))); // Call internal FED time step creation routine return fed_tau_internal(n,scale,tau_max,reordering,tau); @@ -114,7 +114,7 @@ int fed_tau_internal(const int& n, const float& scale, const float& tau_max, // Set up originally ordered tau vector for (int k = 0; k < n; ++k) { - float h = cosf(CV_PI * (2.0f * (float)k + 1.0f) * c); + float h = cosf((float)CV_PI * (2.0f * (float)k + 1.0f) * c); if (reordering) { tauh[k] = d / (h * h); @@ -175,7 +175,7 @@ bool fed_is_prime_internal(const int& number) { } else { is_prime = true; - int upperLimit = sqrt(number+1.0); + int upperLimit = (int)sqrt(1.0f + number); int divisor = 11; while (divisor <= upperLimit ) { From 0e3bbd702624b5a35252a9246f4d34520c7954a7 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 19:32:04 +0300 Subject: [PATCH 30/52] Fix "conditional expression constant" warning --- modules/features2d/src/akaze/AKAZEFeatures.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index 527abadc30..ece1d7b301 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -2145,7 +2145,7 @@ void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, int } ssz *= nchannels; - CV_Assert((nbits <= ssz) && "descriptor size can't be bigger than full descriptor"); + CV_Assert(nbits <= ssz); // Descriptor size can't be bigger than full descriptor // Since the full descriptor is usually under 10k elements, we pick // the selection from the full matrix. We take as many samples per From c68cbfced30e6f265fa749157a757a2d1f6c37fa Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 19:32:29 +0300 Subject: [PATCH 31/52] Fix size_t to int conversion --- .../features2d/src/akaze/nldiffusion_functions.cpp | 11 ++++------- modules/features2d/src/akaze/nldiffusion_functions.h | 6 ++---- modules/features2d/src/kaze/nldiffusion_functions.cpp | 6 +++--- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/modules/features2d/src/akaze/nldiffusion_functions.cpp b/modules/features2d/src/akaze/nldiffusion_functions.cpp index 31db4f101b..9ead4ecfe8 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/akaze/nldiffusion_functions.cpp @@ -69,8 +69,7 @@ void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksi * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, * Journal of Visual Communication and Image Representation 2002 */ -void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, - const size_t& xorder, const size_t& yorder) { +void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) { Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); } @@ -233,8 +232,7 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, * @param yorder Derivative order in Y-direction (vertical) * @param scale Scale factor for the derivative size */ -void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, const size_t& xorder, - const size_t& yorder, const size_t& scale) { +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { Mat kx, ky; compute_derivative_kernels(kx, ky, xorder, yorder, scale); @@ -344,10 +342,9 @@ void halfsample_image(const cv::Mat& src, cv::Mat& dst) { * @param dy The derivative order in y-direction * @param scale The kernel size */ -void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, - const size_t& dx, const size_t& dy, const size_t& scale) { +void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale) { - const int ksize = 3 + 2 * ( (int)scale - 1); + const int ksize = 3 + 2 * (scale - 1); // The usual Scharr kernel if (scale == 1) { diff --git a/modules/features2d/src/akaze/nldiffusion_functions.h b/modules/features2d/src/akaze/nldiffusion_functions.h index ba578758b0..ec0ef2a847 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.h +++ b/modules/features2d/src/akaze/nldiffusion_functions.h @@ -23,12 +23,10 @@ void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, co void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); float compute_k_percentile(const cv::Mat& img, float perc, float gscale, size_t nbins, size_t ksize_x, size_t ksize_y); -void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, const size_t& xorder, - const size_t& yorder, const size_t& scale); +void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int, int scale); void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize); void downsample_image(const cv::Mat& src, cv::Mat& dst); void halfsample_image(const cv::Mat& src, cv::Mat& dst); -void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, - const size_t& dx, const size_t& dy, const size_t& scale); +void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale); bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index ce3bef1758..c2c46d2b7a 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -43,11 +43,11 @@ using namespace cv; void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) { - size_t ksize_x_ = 0, ksize_y_ = 0; + int ksize_x_ = 0, ksize_y_ = 0; // Compute an appropriate kernel size according to the specified sigma if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = (size_t)ceil(2.0f*(1.0f + (sigma-0.8f)/(0.3f))); + ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma-0.8f)/(0.3f))); ksize_y_ = ksize_x_; } @@ -196,7 +196,7 @@ float compute_k_percentile(const cv::Mat& img, float perc, float gscale, } if (nelements < nthreshold) { - kperc = 0.03; + kperc = 0.03f; } else { kperc = hmax*((float)(k)/(float)nbins); From c1bf453266485394961dd70e9d42d402658ebbdc Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 20:41:44 +0300 Subject: [PATCH 32/52] Wrapped nldiffusion functions with details::kaze or details::amaze namespace to avoid collision of function names --- modules/features2d/src/akaze/AKAZEConfig.h | 2 +- .../features2d/src/akaze/AKAZEFeatures.cpp | 4 +- .../src/akaze/nldiffusion_functions.cpp | 689 +++++++++--------- .../src/akaze/nldiffusion_functions.h | 44 +- modules/features2d/src/kaze/KAZEFeatures.cpp | 1 + .../src/kaze/nldiffusion_functions.cpp | 584 +++++++-------- .../src/kaze/nldiffusion_functions.h | 54 +- 7 files changed, 697 insertions(+), 681 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZEConfig.h b/modules/features2d/src/akaze/AKAZEConfig.h index 7fed80e2ce..bc3ac93301 100644 --- a/modules/features2d/src/akaze/AKAZEConfig.h +++ b/modules/features2d/src/akaze/AKAZEConfig.h @@ -114,7 +114,7 @@ struct AKAZEOptions { float kcontrast; ///< The contrast factor parameter float kcontrast_percentile; ///< Percentile level for the contrast factor - size_t kcontrast_nbins; ///< Number of bins for the contrast factor histogram + int kcontrast_nbins; ///< Number of bins for the contrast factor histogram bool save_scale_space; ///< Set to true for saving the scale space images bool save_keypoints; ///< Set to true for saving the detected keypoints and descriptors diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index ece1d7b301..2204f5aba4 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -12,6 +12,7 @@ using namespace std; using namespace cv; +using namespace cv::details::akaze; /* ************************************************************************* */ /** @@ -110,8 +111,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { evolution_[0].Lt.copyTo(evolution_[0].Lsmooth); // First compute the kcontrast factor - options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, - 1.0f, options_.kcontrast_nbins, 0, 0); + options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, 1.0f, options_.kcontrast_nbins, 0, 0); //t2 = cv::getTickCount(); //timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency(); diff --git a/modules/features2d/src/akaze/nldiffusion_functions.cpp b/modules/features2d/src/akaze/nldiffusion_functions.cpp index 9ead4ecfe8..e0e2990d29 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/akaze/nldiffusion_functions.cpp @@ -19,368 +19,373 @@ * @author Pablo F. Alcantarilla, Jesus Nuevo */ -#include "nldiffusion_functions.h" +#include "akaze/nldiffusion_functions.h" using namespace std; using namespace cv; -/* ************************************************************************* */ -/** - * @brief This function smoothes an image with a Gaussian kernel - * @param src Input image - * @param dst Output image - * @param ksize_x Kernel size in X-direction (horizontal) - * @param ksize_y Kernel size in Y-direction (vertical) - * @param sigma Kernel standard deviation - */ -void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksize_x, - const size_t& ksize_y, const float& sigma) { +namespace cv { + namespace details { + namespace akaze { - int ksize_x_ = 0, ksize_y_ = 0; + /* ************************************************************************* */ + /** + * @brief This function smoothes an image with a Gaussian kernel + * @param src Input image + * @param dst Output image + * @param ksize_x Kernel size in X-direction (horizontal) + * @param ksize_y Kernel size in Y-direction (vertical) + * @param sigma Kernel standard deviation + */ + void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) { - // Compute an appropriate kernel size according to the specified sigma - if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f))); - ksize_y_ = ksize_x_; - } + int ksize_x_ = 0, ksize_y_ = 0; - // The kernel size must be and odd number - if ((ksize_x_ % 2) == 0) { - ksize_x_ += 1; - } - - if ((ksize_y_ % 2) == 0) { - ksize_y_ += 1; - } - - // Perform the Gaussian Smoothing with border replication - GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE); -} - -/* ************************************************************************* */ -/** - * @brief This function computes image derivatives with Scharr kernel - * @param src Input image - * @param dst Output image - * @param xorder Derivative order in X-direction (horizontal) - * @param yorder Derivative order in Y-direction (vertical) - * @note Scharr operator approximates better rotation invariance than - * other stencils such as Sobel. See Weickert and Scharr, - * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, - * Journal of Visual Communication and Image Representation 2002 - */ -void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) { - Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); -} - -/* ************************************************************************* */ -/** - * @brief This function computes the Perona and Malik conductivity coefficient g1 - * g1 = exp(-|dL|^2/k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ -void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - exp(-(Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), dst); -} - -/* ************************************************************************* */ -/** - * @brief This function computes the Perona and Malik conductivity coefficient g2 - * g2 = 1 / (1 + dL^2 / k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ -void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - dst = 1.0 / (1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); -} - -/* ************************************************************************* */ -/** - * @brief This function computes Weickert conductivity coefficient gw - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - * @note For more information check the following paper: J. Weickert - * Applications of nonlinear diffusion in image processing and computer vision, - * Proceedings of Algorithmy 2000 - */ -void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - Mat modg; - pow((Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), 4, modg); - cv::exp(-3.315 / modg, dst); - dst = 1.0 - dst; -} - -/* ************************************************************************* */ -/** - * @brief This function computes Charbonnier conductivity coefficient gc - * gc = 1 / sqrt(1 + dL^2 / k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - * @note For more information check the following paper: J. Weickert - * Applications of nonlinear diffusion in image processing and computer vision, - * Proceedings of Algorithmy 2000 - */ -void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - Mat den; - cv::sqrt(1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), den); - dst = 1.0 / den; -} - -/* ************************************************************************* */ -/** - * @brief This function computes a good empirical value for the k contrast factor - * given an input image, the percentile (0-1), the gradient scale and the number of - * bins in the histogram - * @param img Input image - * @param perc Percentile of the image gradient histogram (0-1) - * @param gscale Scale for computing the image gradient histogram - * @param nbins Number of histogram bins - * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel - * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel - * @return k contrast factor - */ -float compute_k_percentile(const cv::Mat& img, float perc, float gscale, - size_t nbins, size_t ksize_x, size_t ksize_y) { - - size_t nbin = 0, nelements = 0, nthreshold = 0, k = 0; - float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; - float npoints = 0.0; - float hmax = 0.0; - - // Create the array for the histogram - std::vector hist(nbins, 0); - - // Create the matrices - cv::Mat gaussian = cv::Mat::zeros(img.rows, img.cols, CV_32F); - cv::Mat Lx = cv::Mat::zeros(img.rows, img.cols, CV_32F); - cv::Mat Ly = cv::Mat::zeros(img.rows, img.cols, CV_32F); - - // Perform the Gaussian convolution - gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); - - // Compute the Gaussian derivatives Lx and Ly - image_derivatives_scharr(gaussian, Lx, 1, 0); - image_derivatives_scharr(gaussian, Ly, 0, 1); - - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows - 1; i++) { - for (int j = 1; j < gaussian.cols - 1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); - - // Get the maximum - if (modg > hmax) { - hmax = modg; - } - } - } - - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows - 1; i++) { - for (int j = 1; j < gaussian.cols - 1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); - - // Find the correspondent bin - if (modg != 0.0) { - nbin = (size_t)floor(nbins*(modg / hmax)); - - if (nbin == nbins) { - nbin--; + // Compute an appropriate kernel size according to the specified sigma + if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { + ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f))); + ksize_y_ = ksize_x_; } - hist[nbin]++; - npoints++; + // The kernel size must be and odd number + if ((ksize_x_ % 2) == 0) { + ksize_x_ += 1; + } + + if ((ksize_y_ % 2) == 0) { + ksize_y_ += 1; + } + + // Perform the Gaussian Smoothing with border replication + GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE); } - } - } - // Now find the perc of the histogram percentile - nthreshold = (size_t)(npoints*perc); + /* ************************************************************************* */ + /** + * @brief This function computes image derivatives with Scharr kernel + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @note Scharr operator approximates better rotation invariance than + * other stencils such as Sobel. See Weickert and Scharr, + * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, + * Journal of Visual Communication and Image Representation 2002 + */ + void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) { + Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); + } - for (k = 0; nelements < nthreshold && k < nbins; k++) { - nelements = nelements + hist[k]; - } + /* ************************************************************************* */ + /** + * @brief This function computes the Perona and Malik conductivity coefficient g1 + * g1 = exp(-|dL|^2/k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ + void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + exp(-(Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), dst); + } - if (nelements < nthreshold) { - kperc = 0.03f; - } - else { - kperc = hmax*((float)(k) / (float)nbins); - } + /* ************************************************************************* */ + /** + * @brief This function computes the Perona and Malik conductivity coefficient g2 + * g2 = 1 / (1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ + void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + dst = 1.0 / (1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); + } - return kperc; -} + /* ************************************************************************* */ + /** + * @brief This function computes Weickert conductivity coefficient gw + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ + void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + Mat modg; + pow((Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), 4, modg); + cv::exp(-3.315 / modg, dst); + dst = 1.0 - dst; + } -/* ************************************************************************* */ -/** - * @brief This function computes Scharr image derivatives - * @param src Input image - * @param dst Output image - * @param xorder Derivative order in X-direction (horizontal) - * @param yorder Derivative order in Y-direction (vertical) - * @param scale Scale factor for the derivative size - */ -void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { + /* ************************************************************************* */ + /** + * @brief This function computes Charbonnier conductivity coefficient gc + * gc = 1 / sqrt(1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ + void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { + Mat den; + cv::sqrt(1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), den); + dst = 1.0 / den; + } - Mat kx, ky; - compute_derivative_kernels(kx, ky, xorder, yorder, scale); - sepFilter2D(src, dst, CV_32F, kx, ky); -} + /* ************************************************************************* */ + /** + * @brief This function computes a good empirical value for the k contrast factor + * given an input image, the percentile (0-1), the gradient scale and the number of + * bins in the histogram + * @param img Input image + * @param perc Percentile of the image gradient histogram (0-1) + * @param gscale Scale for computing the image gradient histogram + * @param nbins Number of histogram bins + * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel + * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel + * @return k contrast factor + */ + float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) { -/* ************************************************************************* */ -/** - * @brief This function performs a scalar non-linear diffusion step - * @param Ld2 Output image in the evolution - * @param c Conductivity image - * @param Lstep Previous image in the evolution - * @param stepsize The step size in time units - * @note Forward Euler Scheme 3x3 stencil - * The function c is a scalar value that depends on the gradient norm - * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy - */ -void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize) { + int nbin = 0, nelements = 0, nthreshold = 0, k = 0; + float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; + float npoints = 0.0; + float hmax = 0.0; + + // Create the array for the histogram + std::vector hist(nbins, 0); + + // Create the matrices + cv::Mat gaussian = cv::Mat::zeros(img.rows, img.cols, CV_32F); + cv::Mat Lx = cv::Mat::zeros(img.rows, img.cols, CV_32F); + cv::Mat Ly = cv::Mat::zeros(img.rows, img.cols, CV_32F); + + // Perform the Gaussian convolution + gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); + + // Compute the Gaussian derivatives Lx and Ly + image_derivatives_scharr(gaussian, Lx, 1, 0); + image_derivatives_scharr(gaussian, Ly, 0, 1); + + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + for (int j = 1; j < gaussian.cols - 1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); + + // Get the maximum + if (modg > hmax) { + hmax = modg; + } + } + } + + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + for (int j = 1; j < gaussian.cols - 1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); + + // Find the correspondent bin + if (modg != 0.0) { + nbin = (int)floor(nbins*(modg / hmax)); + + if (nbin == nbins) { + nbin--; + } + + hist[nbin]++; + npoints++; + } + } + } + + // Now find the perc of the histogram percentile + nthreshold = (int)(npoints*perc); + + for (k = 0; nelements < nthreshold && k < nbins; k++) { + nelements = nelements + hist[k]; + } + + if (nelements < nthreshold) { + kperc = 0.03f; + } + else { + kperc = hmax*((float)(k) / (float)nbins); + } + + return kperc; + } + + /* ************************************************************************* */ + /** + * @brief This function computes Scharr image derivatives + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @param scale Scale factor for the derivative size + */ + void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { + + Mat kx, ky; + compute_derivative_kernels(kx, ky, xorder, yorder, scale); + sepFilter2D(src, dst, CV_32F, kx, ky); + } + + /* ************************************************************************* */ + /** + * @brief This function performs a scalar non-linear diffusion step + * @param Ld2 Output image in the evolution + * @param c Conductivity image + * @param Lstep Previous image in the evolution + * @param stepsize The step size in time units + * @note Forward Euler Scheme 3x3 stencil + * The function c is a scalar value that depends on the gradient norm + * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy + */ + void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize) { #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) #endif - for (int i = 1; i < Lstep.rows - 1; i++) { - for (int j = 1; j < Lstep.cols - 1; j++) { - float xpos = ((*(c.ptr(i)+j)) + (*(c.ptr(i)+j + 1)))*((*(Ld.ptr(i)+j + 1)) - (*(Ld.ptr(i)+j))); - float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); - float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); - float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); - *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + for (int i = 1; i < Lstep.rows - 1; i++) { + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(i)+j)) + (*(c.ptr(i)+j + 1)))*((*(Ld.ptr(i)+j + 1)) - (*(Ld.ptr(i)+j))); + float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); + float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); + float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); + *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } + } + + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); + float xneg = ((*(c.ptr(0) + j - 1)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j - 1))); + float ypos = ((*(c.ptr(0) + j)) + (*(c.ptr(1) + j)))*((*(Ld.ptr(1) + j)) - (*(Ld.ptr(0) + j))); + *(Lstep.ptr(0) + j) = 0.5f*stepsize*(xpos - xneg + ypos); + } + + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j + 1)))*((*(Ld.ptr(Lstep.rows - 1) + j + 1)) - (*(Ld.ptr(Lstep.rows - 1) + j))); + float xneg = ((*(c.ptr(Lstep.rows - 1) + j - 1)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j - 1))); + float ypos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j))); + float yneg = ((*(c.ptr(Lstep.rows - 2) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 2) + j))); + *(Lstep.ptr(Lstep.rows - 1) + j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } + + for (int i = 1; i < Lstep.rows - 1; i++) { + float xpos = ((*(c.ptr(i))) + (*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1)) - (*(Ld.ptr(i)))); + float xneg = ((*(c.ptr(i))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i)))); + float ypos = ((*(c.ptr(i))) + (*(c.ptr(i + 1))))*((*(Ld.ptr(i + 1))) - (*(Ld.ptr(i)))); + float yneg = ((*(c.ptr(i - 1))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i - 1)))); + *(Lstep.ptr(i)) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } + + for (int i = 1; i < Lstep.rows - 1; i++) { + float xneg = ((*(c.ptr(i)+Lstep.cols - 2)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 2))); + float ypos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i + 1) + Lstep.cols - 1)))*((*(Ld.ptr(i + 1) + Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); + float yneg = ((*(c.ptr(i - 1) + Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i - 1) + Lstep.cols - 1))); + *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5f*stepsize*(-xneg + ypos - yneg); + } + + Ld = Ld + Lstep; + } + + /* ************************************************************************* */ + /** + * @brief This function downsamples the input image with the kernel [1/4,1/2,1/4] + * @param img Input image to be downsampled + * @param dst Output image with half of the resolution of the input image + */ + void downsample_image(const cv::Mat& src, cv::Mat& dst) { + + int i1 = 0, j1 = 0, i2 = 0, j2 = 0; + + for (i1 = 1; i1 < src.rows; i1 += 2) { + j2 = 0; + for (j1 = 1; j1 < src.cols; j1 += 2) { + *(dst.ptr(i2)+j2) = 0.5f*(*(src.ptr(i1)+j1)) + 0.25f*(*(src.ptr(i1)+j1 - 1) + *(src.ptr(i1)+j1 + 1)); + j2++; + } + + i2++; + } + } + + /* ************************************************************************* */ + /** + * @brief This function downsamples the input image using OpenCV resize + * @param img Input image to be downsampled + * @param dst Output image with half of the resolution of the input image + */ + void halfsample_image(const cv::Mat& src, cv::Mat& dst) { + + // Make sure the destination image is of the right size + CV_Assert(src.cols / 2 == dst.cols); + CV_Assert(src.rows / 2 == dst.rows); + resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); + } + + /* ************************************************************************* */ + /** + * @brief Compute Scharr derivative kernels for sizes different than 3 + * @param kx_ The derivative kernel in x-direction + * @param ky_ The derivative kernel in y-direction + * @param dx The derivative order in x-direction + * @param dy The derivative order in y-direction + * @param scale The kernel size + */ + void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale) { + + const int ksize = 3 + 2 * (scale - 1); + + // The usual Scharr kernel + if (scale == 1) { + getDerivKernels(kx_, ky_, dx, dy, 0, true, CV_32F); + return; + } + + kx_.create(ksize, 1, CV_32F, -1, true); + ky_.create(ksize, 1, CV_32F, -1, true); + Mat kx = kx_.getMat(); + Mat ky = ky_.getMat(); + + float w = 10.0f / 3.0f; + float norm = 1.0f / (2.0f*scale*(w + 2.0f)); + + for (int k = 0; k < 2; k++) { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + float kerI[1000]; + + for (int t = 0; t < ksize; t++) { + kerI[t] = 0; + } + + if (order == 0) { + kerI[0] = norm; + kerI[ksize / 2] = w*norm; + kerI[ksize - 1] = norm; + } + else if (order == 1) { + kerI[0] = -1; + kerI[ksize / 2] = 0; + kerI[ksize - 1] = 1; + } + + Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); + temp.copyTo(*kernel); + } + } } } - - for (int j = 1; j < Lstep.cols - 1; j++) { - float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); - float xneg = ((*(c.ptr(0) + j - 1)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j - 1))); - float ypos = ((*(c.ptr(0) + j)) + (*(c.ptr(1) + j)))*((*(Ld.ptr(1) + j)) - (*(Ld.ptr(0) + j))); - *(Lstep.ptr(0) + j) = 0.5f*stepsize*(xpos - xneg + ypos); - } - - for (int j = 1; j < Lstep.cols - 1; j++) { - float xpos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j + 1)))*((*(Ld.ptr(Lstep.rows - 1) + j + 1)) - (*(Ld.ptr(Lstep.rows - 1) + j))); - float xneg = ((*(c.ptr(Lstep.rows - 1) + j - 1)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j - 1))); - float ypos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j))); - float yneg = ((*(c.ptr(Lstep.rows - 2) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 2) + j))); - *(Lstep.ptr(Lstep.rows - 1) + j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); - } - - for (int i = 1; i < Lstep.rows - 1; i++) { - float xpos = ((*(c.ptr(i))) + (*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1)) - (*(Ld.ptr(i)))); - float xneg = ((*(c.ptr(i))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i)))); - float ypos = ((*(c.ptr(i))) + (*(c.ptr(i + 1))))*((*(Ld.ptr(i + 1))) - (*(Ld.ptr(i)))); - float yneg = ((*(c.ptr(i - 1))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i - 1)))); - *(Lstep.ptr(i)) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); - } - - for (int i = 1; i < Lstep.rows - 1; i++) { - float xneg = ((*(c.ptr(i)+Lstep.cols - 2)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 2))); - float ypos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i + 1) + Lstep.cols - 1)))*((*(Ld.ptr(i + 1) + Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); - float yneg = ((*(c.ptr(i - 1) + Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i - 1) + Lstep.cols - 1))); - *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5f*stepsize*(-xneg + ypos - yneg); - } - - Ld = Ld + Lstep; -} - -/* ************************************************************************* */ -/** - * @brief This function downsamples the input image with the kernel [1/4,1/2,1/4] - * @param img Input image to be downsampled - * @param dst Output image with half of the resolution of the input image - */ -void downsample_image(const cv::Mat& src, cv::Mat& dst) { - - int i1 = 0, j1 = 0, i2 = 0, j2 = 0; - - for (i1 = 1; i1 < src.rows; i1 += 2) { - j2 = 0; - for (j1 = 1; j1 < src.cols; j1 += 2) { - *(dst.ptr(i2)+j2) = 0.5f*(*(src.ptr(i1)+j1)) + 0.25f*(*(src.ptr(i1)+j1 - 1) + *(src.ptr(i1)+j1 + 1)); - j2++; - } - - i2++; - } -} - -/* ************************************************************************* */ -/** - * @brief This function downsamples the input image using OpenCV resize - * @param img Input image to be downsampled - * @param dst Output image with half of the resolution of the input image - */ -void halfsample_image(const cv::Mat& src, cv::Mat& dst) { - - // Make sure the destination image is of the right size - CV_Assert(src.cols / 2 == dst.cols); - CV_Assert(src.rows / 2 == dst.rows); - resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); -} - -/* ************************************************************************* */ -/** - * @brief Compute Scharr derivative kernels for sizes different than 3 - * @param kx_ The derivative kernel in x-direction - * @param ky_ The derivative kernel in y-direction - * @param dx The derivative order in x-direction - * @param dy The derivative order in y-direction - * @param scale The kernel size - */ -void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale) { - - const int ksize = 3 + 2 * (scale - 1); - - // The usual Scharr kernel - if (scale == 1) { - getDerivKernels(kx_, ky_, dx, dy, 0, true, CV_32F); - return; - } - - kx_.create(ksize, 1, CV_32F, -1, true); - ky_.create(ksize, 1, CV_32F, -1, true); - Mat kx = kx_.getMat(); - Mat ky = ky_.getMat(); - - float w = 10.0f / 3.0f; - float norm = 1.0f / (2.0f*scale*(w + 2.0f)); - - for (int k = 0; k < 2; k++) { - Mat* kernel = k == 0 ? &kx : &ky; - int order = k == 0 ? dx : dy; - float kerI[1000]; - - for (int t = 0; t < ksize; t++) { - kerI[t] = 0; - } - - if (order == 0) { - kerI[0] = norm; - kerI[ksize / 2] = w*norm; - kerI[ksize - 1] = norm; - } - else if (order == 1) { - kerI[0] = -1; - kerI[ksize / 2] = 0; - kerI[ksize - 1] = 1; - } - - Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); - temp.copyTo(*kernel); - } -} +} \ No newline at end of file diff --git a/modules/features2d/src/akaze/nldiffusion_functions.h b/modules/features2d/src/akaze/nldiffusion_functions.h index ec0ef2a847..0fab6c59a7 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.h +++ b/modules/features2d/src/akaze/nldiffusion_functions.h @@ -5,7 +5,8 @@ * @author Pablo F. Alcantarilla, Jesus Nuevo */ -#pragma once +#ifndef AKAZE_NLDIFFUSION_FUNCTIONS_H +#define AKAZE_NLDIFFUSION_FUNCTIONS_H /* ************************************************************************* */ // Includes @@ -13,20 +14,27 @@ /* ************************************************************************* */ // Declaration of functions -void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, const size_t& ksize_x, - const size_t& ksize_y, const float& sigma); -void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, - const size_t& xorder, const size_t& yorder); -void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); -void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); -void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); -void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); -float compute_k_percentile(const cv::Mat& img, float perc, float gscale, - size_t nbins, size_t ksize_x, size_t ksize_y); -void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int, int scale); -void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize); -void downsample_image(const cv::Mat& src, cv::Mat& dst); -void halfsample_image(const cv::Mat& src, cv::Mat& dst); -void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale); -bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, - int row, int col, bool same_img); + +namespace cv { + namespace details { + namespace akaze { + + void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma); + void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder); + void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); + void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); + void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); + void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); + float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); + void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int, int scale); + void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize); + void downsample_image(const cv::Mat& src, cv::Mat& dst); + void halfsample_image(const cv::Mat& src, cv::Mat& dst); + void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale); + bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); + + } + } +} + +#endif diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 0fe41aeaa7..4d0127416a 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -26,6 +26,7 @@ // Namespaces using namespace std; using namespace cv; +using namespace cv::details::kaze; //******************************************************************************* //******************************************************************************* diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index c2c46d2b7a..23ffaf1f34 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -28,349 +28,355 @@ // Namespaces using namespace std; using namespace cv; +using namespace cv::details::kaze; //************************************************************************************* //************************************************************************************* -/** - * @brief This function smoothes an image with a Gaussian kernel - * @param src Input image - * @param dst Output image - * @param ksize_x Kernel size in X-direction (horizontal) - * @param ksize_y Kernel size in Y-direction (vertical) - * @param sigma Kernel standard deviation - */ -void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, - int ksize_x, int ksize_y, float sigma) { +namespace cv { + namespace details { + namespace kaze { + /** + * @brief This function smoothes an image with a Gaussian kernel + * @param src Input image + * @param dst Output image + * @param ksize_x Kernel size in X-direction (horizontal) + * @param ksize_y Kernel size in Y-direction (vertical) + * @param sigma Kernel standard deviation + */ + void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, + int ksize_x, int ksize_y, float sigma) { - int ksize_x_ = 0, ksize_y_ = 0; + int ksize_x_ = 0, ksize_y_ = 0; - // Compute an appropriate kernel size according to the specified sigma - if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma-0.8f)/(0.3f))); - ksize_y_ = ksize_x_; - } + // Compute an appropriate kernel size according to the specified sigma + if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { + ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f))); + ksize_y_ = ksize_x_; + } - // The kernel size must be and odd number - if ((ksize_x_ % 2) == 0) { - ksize_x_ += 1; - } + // The kernel size must be and odd number + if ((ksize_x_ % 2) == 0) { + ksize_x_ += 1; + } - if ((ksize_y_ % 2) == 0) { - ksize_y_ += 1; - } + if ((ksize_y_ % 2) == 0) { + ksize_y_ += 1; + } - // Perform the Gaussian Smoothing with border replication - GaussianBlur(src,dst,Size(ksize_x_,ksize_y_),sigma,sigma,cv::BORDER_REPLICATE); -} + // Perform the Gaussian Smoothing with border replication + GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, cv::BORDER_REPLICATE); + } -//************************************************************************************* -//************************************************************************************* + //************************************************************************************* + //************************************************************************************* -/** - * @brief This function computes the Perona and Malik conductivity coefficient g1 - * g1 = exp(-|dL|^2/k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ -void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - cv::exp(-(Lx.mul(Lx) + Ly.mul(Ly))/(k*k),dst); -} + /** + * @brief This function computes the Perona and Malik conductivity coefficient g1 + * g1 = exp(-|dL|^2/k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ + void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + cv::exp(-(Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), dst); + } -//************************************************************************************* -//************************************************************************************* + //************************************************************************************* + //************************************************************************************* -/** - * @brief This function computes the Perona and Malik conductivity coefficient g2 - * g2 = 1 / (1 + dL^2 / k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ -void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - dst = 1./(1. + (Lx.mul(Lx) + Ly.mul(Ly))/(k*k)); -} + /** + * @brief This function computes the Perona and Malik conductivity coefficient g2 + * g2 = 1 / (1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + */ + void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + dst = 1. / (1. + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); + } -//************************************************************************************* -//************************************************************************************* + //************************************************************************************* + //************************************************************************************* -/** - * @brief This function computes Weickert conductivity coefficient g3 - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - * @note For more information check the following paper: J. Weickert - * Applications of nonlinear diffusion in image processing and computer vision, - * Proceedings of Algorithmy 2000 - */ -void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - Mat modg; - cv::pow((Lx.mul(Lx) + Ly.mul(Ly))/(k*k),4,modg); - cv::exp(-3.315/modg, dst); - dst = 1.0f - dst; -} + /** + * @brief This function computes Weickert conductivity coefficient g3 + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ + void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + Mat modg; + cv::pow((Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), 4, modg); + cv::exp(-3.315 / modg, dst); + dst = 1.0f - dst; + } -//************************************************************************************* -//************************************************************************************* + //************************************************************************************* + //************************************************************************************* -/** - * @brief This function computes a good empirical value for the k contrast factor - * given an input image, the percentile (0-1), the gradient scale and the number of - * bins in the histogram - * @param img Input image - * @param perc Percentile of the image gradient histogram (0-1) - * @param gscale Scale for computing the image gradient histogram - * @param nbins Number of histogram bins - * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel - * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel - * @return k contrast factor - */ -float compute_k_percentile(const cv::Mat& img, float perc, float gscale, - int nbins, int ksize_x, int ksize_y) { + /** + * @brief This function computes a good empirical value for the k contrast factor + * given an input image, the percentile (0-1), the gradient scale and the number of + * bins in the histogram + * @param img Input image + * @param perc Percentile of the image gradient histogram (0-1) + * @param gscale Scale for computing the image gradient histogram + * @param nbins Number of histogram bins + * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel + * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel + * @return k contrast factor + */ + float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) { - int nbin = 0, nelements = 0, nthreshold = 0, k = 0; - float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; - float npoints = 0.0; - float hmax = 0.0; + int nbin = 0, nelements = 0, nthreshold = 0, k = 0; + float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; + float npoints = 0.0; + float hmax = 0.0; - // Create the array for the histogram - std::vector hist(nbins, 0); + // Create the array for the histogram + std::vector hist(nbins, 0); - // Create the matrices - Mat gaussian = Mat::zeros(img.rows,img.cols,CV_32F); - Mat Lx = Mat::zeros(img.rows,img.cols,CV_32F); - Mat Ly = Mat::zeros(img.rows,img.cols,CV_32F); + // Create the matrices + Mat gaussian = Mat::zeros(img.rows, img.cols, CV_32F); + Mat Lx = Mat::zeros(img.rows, img.cols, CV_32F); + Mat Ly = Mat::zeros(img.rows, img.cols, CV_32F); - // Perform the Gaussian convolution - gaussian_2D_convolution(img,gaussian,ksize_x,ksize_y,gscale); + // Perform the Gaussian convolution + gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); - // Compute the Gaussian derivatives Lx and Ly - Scharr(gaussian,Lx,CV_32F,1,0,1,0,cv::BORDER_DEFAULT); - Scharr(gaussian,Ly,CV_32F,0,1,1,0,cv::BORDER_DEFAULT); + // Compute the Gaussian derivatives Lx and Ly + Scharr(gaussian, Lx, CV_32F, 1, 0, 1, 0, cv::BORDER_DEFAULT); + Scharr(gaussian, Ly, CV_32F, 0, 1, 1, 0, cv::BORDER_DEFAULT); - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows-1; i++) { - for (int j = 1; j < gaussian.cols-1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + for (int j = 1; j < gaussian.cols - 1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); - // Get the maximum - if (modg > hmax) { - hmax = modg; - } - } - } + // Get the maximum + if (modg > hmax) { + hmax = modg; + } + } + } - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows-1; i++) { - for (int j = 1; j < gaussian.cols-1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); + // Skip the borders for computing the histogram + for (int i = 1; i < gaussian.rows - 1; i++) { + for (int j = 1; j < gaussian.cols - 1; j++) { + lx = *(Lx.ptr(i)+j); + ly = *(Ly.ptr(i)+j); + modg = sqrt(lx*lx + ly*ly); - // Find the correspondent bin - if (modg != 0.0) { - nbin = (int)floor(nbins*(modg/hmax)); + // Find the correspondent bin + if (modg != 0.0) { + nbin = (int)floor(nbins*(modg / hmax)); - if (nbin == nbins) { - nbin--; - } + if (nbin == nbins) { + nbin--; + } - hist[nbin]++; - npoints++; - } - } - } + hist[nbin]++; + npoints++; + } + } + } - // Now find the perc of the histogram percentile - nthreshold = (size_t)(npoints*perc); + // Now find the perc of the histogram percentile + nthreshold = (size_t)(npoints*perc); - for (k = 0; nelements < nthreshold && k < nbins; k++) { - nelements = nelements + hist[k]; - } + for (k = 0; nelements < nthreshold && k < nbins; k++) { + nelements = nelements + hist[k]; + } - if (nelements < nthreshold) { - kperc = 0.03f; - } - else { - kperc = hmax*((float)(k)/(float)nbins); - } + if (nelements < nthreshold) { + kperc = 0.03f; + } + else { + kperc = hmax*((float)(k) / (float)nbins); + } - return kperc; -} + return kperc; + } -//************************************************************************************* -//************************************************************************************* + //************************************************************************************* + //************************************************************************************* -/** - * @brief This function computes Scharr image derivatives - * @param src Input image - * @param dst Output image - * @param xorder Derivative order in X-direction (horizontal) - * @param yorder Derivative order in Y-direction (vertical) - * @param scale Scale factor or derivative size - */ -void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, - int xorder, int yorder, int scale) { - Mat kx, ky; - compute_derivative_kernels(kx,ky,xorder,yorder,scale); - sepFilter2D(src,dst,CV_32F,kx,ky); -} + /** + * @brief This function computes Scharr image derivatives + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @param scale Scale factor or derivative size + */ + void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, + int xorder, int yorder, int scale) { + Mat kx, ky; + compute_derivative_kernels(kx, ky, xorder, yorder, scale); + sepFilter2D(src, dst, CV_32F, kx, ky); + } -//************************************************************************************* -//************************************************************************************* + //************************************************************************************* + //************************************************************************************* -/** - * @brief Compute derivative kernels for sizes different than 3 - * @param _kx Horizontal kernel values - * @param _ky Vertical kernel values - * @param dx Derivative order in X-direction (horizontal) - * @param dy Derivative order in Y-direction (vertical) - * @param scale_ Scale factor or derivative size - */ -void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, - int dx, int dy, int scale) { + /** + * @brief Compute derivative kernels for sizes different than 3 + * @param _kx Horizontal kernel values + * @param _ky Vertical kernel values + * @param dx Derivative order in X-direction (horizontal) + * @param dy Derivative order in Y-direction (vertical) + * @param scale_ Scale factor or derivative size + */ + void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, + int dx, int dy, int scale) { - int ksize = 3 + 2*(scale-1); + int ksize = 3 + 2 * (scale - 1); - // The standard Scharr kernel - if (scale == 1) { - getDerivKernels(_kx,_ky,dx,dy,0,true,CV_32F); - return; - } + // The standard Scharr kernel + if (scale == 1) { + getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F); + return; + } - _kx.create(ksize,1,CV_32F,-1,true); - _ky.create(ksize,1,CV_32F,-1,true); - Mat kx = _kx.getMat(); - Mat ky = _ky.getMat(); + _kx.create(ksize, 1, CV_32F, -1, true); + _ky.create(ksize, 1, CV_32F, -1, true); + Mat kx = _kx.getMat(); + Mat ky = _ky.getMat(); - float w = 10.0f/3.0f; - float norm = 1.0f/(2.0f*scale*(w+2.0f)); + float w = 10.0f / 3.0f; + float norm = 1.0f / (2.0f*scale*(w + 2.0f)); - for (int k = 0; k < 2; k++) { - Mat* kernel = k == 0 ? &kx : &ky; - int order = k == 0 ? dx : dy; - std::vector kerI(ksize, 0.0f); + for (int k = 0; k < 2; k++) { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + std::vector kerI(ksize, 0.0f); - if (order == 0) { - kerI[0] = norm, kerI[ksize/2] = w*norm, kerI[ksize-1] = norm; - } - else if (order == 1) { - kerI[0] = -1, kerI[ksize/2] = 0, kerI[ksize-1] = 1; - } + if (order == 0) { + kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm; + } + else if (order == 1) { + kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1; + } - Mat temp(kernel->rows,kernel->cols,CV_32F,&kerI[0]); - temp.copyTo(*kernel); - } -} + Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); + temp.copyTo(*kernel); + } + } -//************************************************************************************* -//************************************************************************************* + //************************************************************************************* + //************************************************************************************* -/** - * @brief This function performs a scalar non-linear diffusion step - * @param Ld2 Output image in the evolution - * @param c Conductivity image - * @param Lstep Previous image in the evolution - * @param stepsize The step size in time units - * @note Forward Euler Scheme 3x3 stencil - * The function c is a scalar value that depends on the gradient norm - * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy - */ -void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) { + /** + * @brief This function performs a scalar non-linear diffusion step + * @param Ld2 Output image in the evolution + * @param c Conductivity image + * @param Lstep Previous image in the evolution + * @param stepsize The step size in time units + * @note Forward Euler Scheme 3x3 stencil + * The function c is a scalar value that depends on the gradient norm + * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy + */ + void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) { #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) #endif - for (int i = 1; i < Lstep.rows-1; i++) { - for (int j = 1; j < Lstep.cols-1; j++) { - float xpos = ((*(c.ptr(i)+j))+(*(c.ptr(i)+j+1)))*((*(Ld.ptr(i)+j+1))-(*(Ld.ptr(i)+j))); - float xneg = ((*(c.ptr(i)+j-1))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i)+j-1))); - float ypos = ((*(c.ptr(i)+j))+(*(c.ptr(i+1)+j)))*((*(Ld.ptr(i+1)+j))-(*(Ld.ptr(i)+j))); - float yneg = ((*(c.ptr(i-1)+j))+(*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j))-(*(Ld.ptr(i-1)+j))); - *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); - } - } + for (int i = 1; i < Lstep.rows - 1; i++) { + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(i)+j)) + (*(c.ptr(i)+j + 1)))*((*(Ld.ptr(i)+j + 1)) - (*(Ld.ptr(i)+j))); + float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); + float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); + float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); + *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } + } - for (int j = 1; j < Lstep.cols-1; j++) { - float xpos = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j+1)))*((*(Ld.ptr(0)+j+1))-(*(Ld.ptr(0)+j))); - float xneg = ((*(c.ptr(0)+j-1))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j-1))); - float ypos = ((*(c.ptr(0)+j))+(*(c.ptr(1)+j)))*((*(Ld.ptr(1)+j))-(*(Ld.ptr(0)+j))); - float yneg = ((*(c.ptr(0)+j))+(*(c.ptr(0)+j)))*((*(Ld.ptr(0)+j))-(*(Ld.ptr(0)+j))); - *(Lstep.ptr(0)+j) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); - } + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); + float xneg = ((*(c.ptr(0) + j - 1)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j - 1))); + float ypos = ((*(c.ptr(0) + j)) + (*(c.ptr(1) + j)))*((*(Ld.ptr(1) + j)) - (*(Ld.ptr(0) + j))); + float yneg = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j))); + *(Lstep.ptr(0) + j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } - for (int j = 1; j < Lstep.cols-1; j++) { - float xpos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j+1)))*((*(Ld.ptr(Lstep.rows-1)+j+1))-(*(Ld.ptr(Lstep.rows-1)+j))); - float xneg = ((*(c.ptr(Lstep.rows-1)+j-1))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j-1))); - float ypos = ((*(c.ptr(Lstep.rows-1)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-1)+j))); - float yneg = ((*(c.ptr(Lstep.rows-2)+j))+(*(c.ptr(Lstep.rows-1)+j)))*((*(Ld.ptr(Lstep.rows-1)+j))-(*(Ld.ptr(Lstep.rows-2)+j))); - *(Lstep.ptr(Lstep.rows-1)+j) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); - } + for (int j = 1; j < Lstep.cols - 1; j++) { + float xpos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j + 1)))*((*(Ld.ptr(Lstep.rows - 1) + j + 1)) - (*(Ld.ptr(Lstep.rows - 1) + j))); + float xneg = ((*(c.ptr(Lstep.rows - 1) + j - 1)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j - 1))); + float ypos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j))); + float yneg = ((*(c.ptr(Lstep.rows - 2) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 2) + j))); + *(Lstep.ptr(Lstep.rows - 1) + j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } - for (int i = 1; i < Lstep.rows-1; i++) { - float xpos = ((*(c.ptr(i)))+(*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1))-(*(Ld.ptr(i)))); - float xneg = ((*(c.ptr(i)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i)))); - float ypos = ((*(c.ptr(i)))+(*(c.ptr(i+1))))*((*(Ld.ptr(i+1)))-(*(Ld.ptr(i)))); - float yneg = ((*(c.ptr(i-1)))+(*(c.ptr(i))))*((*(Ld.ptr(i)))-(*(Ld.ptr(i-1)))); - *(Lstep.ptr(i)) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); - } + for (int i = 1; i < Lstep.rows - 1; i++) { + float xpos = ((*(c.ptr(i))) + (*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1)) - (*(Ld.ptr(i)))); + float xneg = ((*(c.ptr(i))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i)))); + float ypos = ((*(c.ptr(i))) + (*(c.ptr(i + 1))))*((*(Ld.ptr(i + 1))) - (*(Ld.ptr(i)))); + float yneg = ((*(c.ptr(i - 1))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i - 1)))); + *(Lstep.ptr(i)) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } - for (int i = 1; i < Lstep.rows-1; i++) { - float xpos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); - float xneg = ((*(c.ptr(i)+Lstep.cols-2))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-2))); - float ypos = ((*(c.ptr(i)+Lstep.cols-1))+(*(c.ptr(i+1)+Lstep.cols-1)))*((*(Ld.ptr(i+1)+Lstep.cols-1))-(*(Ld.ptr(i)+Lstep.cols-1))); - float yneg = ((*(c.ptr(i-1)+Lstep.cols-1))+(*(c.ptr(i)+Lstep.cols-1)))*((*(Ld.ptr(i)+Lstep.cols-1))-(*(Ld.ptr(i-1)+Lstep.cols-1))); - *(Lstep.ptr(i)+Lstep.cols-1) = 0.5f*stepsize*(xpos-xneg + ypos-yneg); - } + for (int i = 1; i < Lstep.rows - 1; i++) { + float xpos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); + float xneg = ((*(c.ptr(i)+Lstep.cols - 2)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 2))); + float ypos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i + 1) + Lstep.cols - 1)))*((*(Ld.ptr(i + 1) + Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); + float yneg = ((*(c.ptr(i - 1) + Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i - 1) + Lstep.cols - 1))); + *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } - Ld = Ld + Lstep; -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This function checks if a given pixel is a maximum in a local neighbourhood - * @param img Input image where we will perform the maximum search - * @param dsize Half size of the neighbourhood - * @param value Response value at (x,y) position - * @param row Image row coordinate - * @param col Image column coordinate - * @param same_img Flag to indicate if the image value at (x,y) is in the input image - * @return 1->is maximum, 0->otherwise - */ -bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, - int row, int col, bool same_img) { - - bool response = true; - - for (int i = row-dsize; i <= row+dsize; i++) { - for (int j = col-dsize; j <= col+dsize; j++) { - if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) { - if (same_img == true) { - if (i != row || j != col) { - if ((*(img.ptr(i)+j)) > value) { - response = false; - return response; + Ld = Ld + Lstep; } - } - } - else { - if ((*(img.ptr(i)+j)) > value) { - response = false; - return response; - } - } - } - } - } - return response; -} + //************************************************************************************* + //************************************************************************************* + + /** + * @brief This function checks if a given pixel is a maximum in a local neighbourhood + * @param img Input image where we will perform the maximum search + * @param dsize Half size of the neighbourhood + * @param value Response value at (x,y) position + * @param row Image row coordinate + * @param col Image column coordinate + * @param same_img Flag to indicate if the image value at (x,y) is in the input image + * @return 1->is maximum, 0->otherwise + */ + bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, + int row, int col, bool same_img) { + + bool response = true; + + for (int i = row - dsize; i <= row + dsize; i++) { + for (int j = col - dsize; j <= col + dsize; j++) { + if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) { + if (same_img == true) { + if (i != row || j != col) { + if ((*(img.ptr(i)+j)) > value) { + response = false; + return response; + } + } + } + else { + if ((*(img.ptr(i)+j)) > value) { + response = false; + return response; + } + } + } + } + } + + return response; + } + } + } +} \ No newline at end of file diff --git a/modules/features2d/src/kaze/nldiffusion_functions.h b/modules/features2d/src/kaze/nldiffusion_functions.h index d0ece89571..e9d5f03670 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.h +++ b/modules/features2d/src/kaze/nldiffusion_functions.h @@ -1,4 +1,3 @@ - /** * @file nldiffusion_functions.h * @brief Functions for non-linear diffusion applications: @@ -9,43 +8,40 @@ * @author Pablo F. Alcantarilla */ -#ifndef NLDIFFUSION_FUNCTIONS_H_ -#define NLDIFFUSION_FUNCTIONS_H_ - -//****************************************************************************** -//****************************************************************************** +#ifndef KAZE_NLDIFFUSION_FUNCTIONS_H +#define KAZE_NLDIFFUSION_FUNCTIONS_H // Includes -#include "config.h" +#include "precomp.hpp" //************************************************************************************* //************************************************************************************* -// Gaussian 2D convolution -void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, - int ksize_x, int ksize_y, float sigma); +namespace cv { + namespace details { + namespace kaze { -// Diffusivity functions -void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); -void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); -void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); -float compute_k_percentile(const cv::Mat& img, float perc, float gscale, - int nbins, int ksize_x, int ksize_y); + // Gaussian 2D convolution + void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma); -// Image derivatives -void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, - int xorder, int yorder, int scale); -void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, - int dx, int dy, int scale); + // Diffusivity functions + void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); + void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); + void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); + float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); -// Nonlinear diffusion filtering scalar step -void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); + // Image derivatives + void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale); + void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale); -// For non-maxima suppresion -bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, - int row, int col, bool same_img); + // Nonlinear diffusion filtering scalar step + void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); -//************************************************************************************* -//************************************************************************************* + // For non-maxima suppresion + bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); -#endif // NLDIFFUSION_FUNCTIONS_H_ + } + } +} + +#endif From a134e068efc4b1a966337bc34a7e37b022090c70 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 28 Apr 2014 22:25:27 +0300 Subject: [PATCH 33/52] Fix wrong checking of returned descriptor type --- modules/features2d/src/akaze.cpp | 4 ++-- modules/features2d/src/kaze.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 7b028cca8b..c5e2134dff 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -151,7 +151,7 @@ namespace cv impl.Compute_Descriptors(keypoints, desc); CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() & descriptorType()))); + CV_Assert((!desc.rows || (desc.type() == descriptorType()))); } void AKAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const @@ -197,6 +197,6 @@ namespace cv impl.Compute_Descriptors(keypoints, desc); CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() & descriptorType()))); + CV_Assert((!desc.rows || (desc.type() == descriptorType()))); } } \ No newline at end of file diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index e5b935437e..85835d8a18 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -120,7 +120,7 @@ namespace cv impl.Feature_Description(keypoints, desc); CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() & descriptorType()))); + CV_Assert((!desc.rows || (desc.type() == descriptorType()))); } void KAZE::detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const @@ -168,6 +168,6 @@ namespace cv impl.Feature_Description(keypoints, desc); CV_Assert((!desc.rows || desc.cols == descriptorSize())); - CV_Assert((!desc.rows || (desc.type() & descriptorType()))); + CV_Assert((!desc.rows || (desc.type() == descriptorType()))); } } \ No newline at end of file From 2daa14e3c7ce2b308728d1d5c5439abb7097016b Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Tue, 29 Apr 2014 21:07:53 +0300 Subject: [PATCH 34/52] Clean-up from dead code. --- modules/features2d/src/akaze/AKAZEConfig.h | 66 ++++++------------- .../features2d/src/akaze/AKAZEFeatures.cpp | 24 +++---- modules/features2d/src/akaze/AKAZEFeatures.h | 5 -- modules/features2d/src/kaze/KAZEFeatures.cpp | 29 +++----- modules/features2d/src/kaze/KAZEFeatures.h | 12 ---- modules/features2d/src/kaze/config.h | 12 ---- 6 files changed, 35 insertions(+), 113 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZEConfig.h b/modules/features2d/src/akaze/AKAZEConfig.h index bc3ac93301..acf165bf98 100644 --- a/modules/features2d/src/akaze/AKAZEConfig.h +++ b/modules/features2d/src/akaze/AKAZEConfig.h @@ -43,58 +43,34 @@ enum DIFFUSIVITY_TYPE { CHARBONNIER = 3 }; -/* ************************************************************************* */ -/// AKAZE Timing structure -struct AKAZETiming { - - AKAZETiming() { - kcontrast = 0.0; - scale = 0.0; - derivatives = 0.0; - detector = 0.0; - extrema = 0.0; - subpixel = 0.0; - descriptor = 0.0; - } - - double kcontrast; ///< Contrast factor computation time in ms - double scale; ///< Nonlinear scale space computation time in ms - double derivatives; ///< Multiscale derivatives computation time in ms - double detector; ///< Feature detector computation time in ms - double extrema; ///< Scale space extrema computation time in ms - double subpixel; ///< Subpixel refinement computation time in ms - double descriptor; ///< Descriptors computation time in ms -}; - /* ************************************************************************* */ /// AKAZE configuration options structure struct AKAZEOptions { - AKAZEOptions() { - soffset = 1.6f; - derivative_factor = 1.5f; - omax = 4; - nsublevels = 4; - dthreshold = 0.001f; - min_dthreshold = 0.00001f; + AKAZEOptions() + : omax(4) + , nsublevels(4) + , img_width(0) + , img_height(0) + , soffset(1.6f) + , derivative_factor(1.5f) + , sderivatives(1.0) + , diffusivity(PM_G2) - diffusivity = PM_G2; - descriptor = MLDB; - descriptor_size = 0; - descriptor_channels = 3; - descriptor_pattern_size = 10; - sderivatives = 1.0; + , dthreshold(0.001f) + , min_dthreshold(0.00001f) - kcontrast = 0.001f; - kcontrast_percentile = 0.7f; - kcontrast_nbins = 300; + , descriptor(MLDB) + , descriptor_size(0) + , descriptor_channels(3) + , descriptor_pattern_size(10) - save_scale_space = false; - save_keypoints = false; - verbosity = false; + , kcontrast(0.001f) + , kcontrast_percentile(0.7f) + , kcontrast_nbins(300) + { } - int omin; ///< Initial octave level (-1 means that the size of the input image is duplicated) int omax; ///< Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) int nsublevels; ///< Default number of sublevels per scale level int img_width; ///< Width of the input image @@ -115,10 +91,6 @@ struct AKAZEOptions { float kcontrast; ///< The contrast factor parameter float kcontrast_percentile; ///< Percentile level for the contrast factor int kcontrast_nbins; ///< Number of bins for the contrast factor histogram - - bool save_scale_space; ///< Set to true for saving the scale space images - bool save_keypoints; ///< Set to true for saving the detected keypoints and descriptors - bool verbosity; ///< Set to true for displaying verbosity information }; /* ************************************************************************* */ diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index 2204f5aba4..0b201519bf 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -547,11 +547,10 @@ void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody { public: - SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + SURF_Descriptor_Upright_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) - , options_(&options) { } @@ -569,17 +568,15 @@ private: std::vector* keypoints_; cv::Mat* descriptors_; std::vector* evolution_; - AKAZEOptions* options_; }; class SURF_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: - SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + SURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) - , options_(&options) { } @@ -598,17 +595,15 @@ private: std::vector* keypoints_; cv::Mat* descriptors_; std::vector* evolution_; - AKAZEOptions* options_; }; class MSURF_Upright_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: - MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + MSURF_Upright_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) - , options_(&options) { } @@ -626,17 +621,15 @@ private: std::vector* keypoints_; cv::Mat* descriptors_; std::vector* evolution_; - AKAZEOptions* options_; }; class MSURF_Descriptor_64_Invoker : public cv::ParallelLoopBody { public: - MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution, AKAZEOptions& options) + MSURF_Descriptor_64_Invoker(std::vector& kpts, cv::Mat& desc, std::vector& evolution) : keypoints_(&kpts) , descriptors_(&desc) , evolution_(&evolution) - , options_(&options) { } @@ -655,7 +648,6 @@ private: std::vector* keypoints_; cv::Mat* descriptors_; std::vector* evolution_; - AKAZEOptions* options_; }; class Upright_MLDB_Full_Descriptor_Invoker : public cv::ParallelLoopBody @@ -823,7 +815,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat case SURF_UPRIGHT: // Upright descriptors, not invariant to rotation { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Get_SURF_Descriptor_Upright_64(kpts[i], desc.ptr(i)); @@ -832,7 +824,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat break; case SURF: { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Compute_Main_Orientation(kpts[i]); @@ -842,7 +834,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat break; case MSURF_UPRIGHT: // Upright descriptors, not invariant to rotation { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); @@ -851,7 +843,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat break; case MSURF: { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_, options_)); + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); //for (int i = 0; i < (int)(kpts.size()); i++) { // Compute_Main_Orientation(kpts[i]); diff --git a/modules/features2d/src/akaze/AKAZEFeatures.h b/modules/features2d/src/akaze/AKAZEFeatures.h index f1bd7250bf..389848c9bf 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.h +++ b/modules/features2d/src/akaze/AKAZEFeatures.h @@ -80,11 +80,6 @@ public: /* ************************************************************************* */ // Inline functions -/** - * @brief This function sets default parameters for the A-KAZE detector. - * @param options AKAZE options - */ -void setDefaultAKAZEOptions(AKAZEOptions& options); // Inline functions void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons, diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 4d0127416a..a3582af3f5 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -42,8 +42,6 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { sderivatives_ = options.sderivatives; omax_ = options.omax; nsublevels_ = options.nsublevels; - save_scale_space_ = options.save_scale_space; - verbosity_ = options.verbosity; img_width_ = options.img_width; img_height_ = options.img_height; dthreshold_ = options.dthreshold; @@ -71,17 +69,6 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { //******************************************************************************* //******************************************************************************* -/** - * @brief KAZE destructor - */ -KAZEFeatures::~KAZEFeatures(void) { - - evolution_.clear(); -} - -//******************************************************************************* -//******************************************************************************* - /** * @brief This method allocates the memory for the nonlinear diffusion evolution */ @@ -171,10 +158,10 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { //t2 = getTickCount(); //tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency(); - if (verbosity_ == true) { - cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << - " Sigma: " << evolution_[0].esigma << endl; - } + //if (verbosity_ == true) { + // cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << + // " Sigma: " << evolution_[0].esigma << endl; + //} // Now generate the rest of evolution levels for (size_t i = 1; i < evolution_.size(); i++) { @@ -209,10 +196,10 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { evolution_[i].etime - evolution_[i - 1].etime); } - if (verbosity_ == true) { - cout << "Computed image evolution step " << i << " Evolution time: " << evolution_[i].etime << - " Sigma: " << evolution_[i].esigma << endl; - } + //if (verbosity_ == true) { + // cout << "Computed image evolution step " << i << " Evolution time: " << evolution_[i].etime << + // " Sigma: " << evolution_[i].esigma << endl; + //} } //t2 = getTickCount(); diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index 31507a6024..8b4c326462 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -34,7 +34,6 @@ private: int img_width_; // Width of the original image int img_height_; // Height of the original image bool save_scale_space_; // For saving scale space images - bool verbosity_; // Verbosity level std::vector evolution_; // Vector of nonlinear diffusion evolution float kcontrast_; // The contrast parameter for the scalar nonlinear diffusion float dthreshold_; // Feature detector threshold response @@ -71,9 +70,6 @@ public: // Constructor KAZEFeatures(KAZEOptions& options); - // Destructor - ~KAZEFeatures(void); - // Public methods for KAZE interface void Allocate_Memory_Evolution(void); int Create_Nonlinear_Scale_Space(const cv::Mat& img); @@ -155,10 +151,6 @@ public: img_height_ = img_height; } - void Set_Verbosity_Level(bool verbosity) { - verbosity_ = verbosity; - } - void Set_KContrast(float kcontrast) { kcontrast_ = kcontrast; } @@ -216,10 +208,6 @@ public: return img_height_; } - bool Get_Verbosity_Level(void) { - return verbosity_; - } - float Get_KContrast(void) { return kcontrast_; } diff --git a/modules/features2d/src/kaze/config.h b/modules/features2d/src/kaze/config.h index 1a3d02d657..d8a9ca1237 100644 --- a/modules/features2d/src/kaze/config.h +++ b/modules/features2d/src/kaze/config.h @@ -39,10 +39,6 @@ static const int DEFAULT_DESCRIPTOR_MODE = 1; // Descriptor Mode 0->SURF, 1->M-S static const bool DEFAULT_USE_FED = true; // 0->AOS, 1->FED static const bool DEFAULT_UPRIGHT = false; // Upright descriptors, not invariant to rotation static const bool DEFAULT_EXTENDED = false; // Extended descriptor, dimension 128 -static const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images -static const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) -static const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios -static const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints // Some important configuration variables static const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0f; @@ -72,10 +68,6 @@ struct KAZEOptions { descriptor = DEFAULT_DESCRIPTOR_MODE; diffusivity = DEFAULT_DIFFUSIVITY_TYPE; sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; - save_scale_space = DEFAULT_SAVE_SCALE_SPACE; - save_keypoints = DEFAULT_SAVE_KEYPOINTS; - verbosity = DEFAULT_VERBOSITY; - show_results = DEFAULT_SHOW_RESULTS; } float soffset; @@ -90,10 +82,6 @@ struct KAZEOptions { bool upright; bool extended; int descriptor; - bool save_scale_space; - bool save_keypoints; - bool verbosity; - bool show_results; }; struct TEvolution { From 4509fe55c2d730ca068d1d05046bf4086b64c09c Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Tue, 29 Apr 2014 21:39:27 +0300 Subject: [PATCH 35/52] Clean-up of getters/setters that are not needed by OpenCV --- modules/features2d/src/kaze/KAZEFeatures.h | 141 --------------------- 1 file changed, 141 deletions(-) diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index 8b4c326462..21bfe6d537 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -33,7 +33,6 @@ private: int nsublevels_; // Number of sublevels per octave level int img_width_; // Width of the original image int img_height_; // Height of the original image - bool save_scale_space_; // For saving scale space images std::vector evolution_; // Vector of nonlinear diffusion evolution float kcontrast_; // The contrast parameter for the scalar nonlinear diffusion float dthreshold_; // Feature detector threshold response @@ -119,146 +118,6 @@ private: // Descriptor Mode -> 2 G-SURF 128 void Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc); void Get_GSURF_Descriptor_128(const cv::KeyPoint& kpt, float* desc); - -public: - - // Setters - void Set_Scale_Offset(float soffset) { - soffset_ = soffset; - } - - void Set_SDerivatives(float sderivatives) { - sderivatives_ = sderivatives; - } - - void Set_Octave_Max(int omax) { - omax_ = omax; - } - - void Set_NSublevels(int nsublevels) { - nsublevels_ = nsublevels; - } - - void Set_Save_Scale_Space_Flag(bool save_scale_space) { - save_scale_space_ = save_scale_space; - } - - void Set_Image_Width(int img_width) { - img_width_ = img_width; - } - - void Set_Image_Height(int img_height) { - img_height_ = img_height; - } - - void Set_KContrast(float kcontrast) { - kcontrast_ = kcontrast; - } - - void Set_Detector_Threshold(float dthreshold) { - dthreshold_ = dthreshold; - } - - void Set_Diffusivity_Type(int diffusivity) { - diffusivity_ = diffusivity; - } - - void Set_Descriptor_Mode(int descriptor_mode) { - descriptor_mode_ = descriptor_mode; - } - - void Set_Use_FED(bool use_fed) { - use_fed_ = use_fed; - } - - void Set_Upright(bool use_upright) { - use_upright_ = use_upright; - } - - void Set_Extended(bool use_extended) { - use_extended_ = use_extended; - } - - // Getters - float Get_Scale_Offset(void) { - return soffset_; - } - - float Get_SDerivatives(void) { - return sderivatives_; - } - - int Get_Octave_Max(void) { - return omax_; - } - - int Get_NSublevels(void) { - return nsublevels_; - } - - bool Get_Save_Scale_Space_Flag(void) { - return save_scale_space_; - } - - int Get_Image_Width(void) { - return img_width_; - } - - int Get_Image_Height(void) { - return img_height_; - } - - float Get_KContrast(void) { - return kcontrast_; - } - - float Get_Detector_Threshold(void) { - return dthreshold_; - } - - int Get_Diffusivity_Type(void) { - return diffusivity_; - } - - int Get_Descriptor_Mode(void) { - return descriptor_mode_; - } - - bool Get_Upright(void) { - return use_upright_; - } - - bool Get_Extended(void) { - return use_extended_; - } - - //float Get_Time_KContrast(void) { - // return tkcontrast_; - //} - - //float Get_Time_NLScale(void) { - // return tnlscale_; - //} - - //float Get_Time_Detector(void) { - // return tdetector_; - //} - - //float Get_Time_Multiscale_Derivatives(void) { - // return tmderivatives_; - //} - - //float Get_Time_Detector_Response(void) { - // return tdresponse_; - //} - - //float Get_Time_Descriptor(void) { - // return tdescriptor_; - //} - - //float Get_Time_Subpixel(void) { - // return tsubpixel_; - //} }; //************************************************************************************* From ab1ef08f0e2d3a5e3901b345e25ba9b7f589cd6b Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Tue, 29 Apr 2014 22:17:18 +0300 Subject: [PATCH 36/52] Rename file config.h to KAZEConfig.h --- .../features2d/src/kaze/{config.h => KAZEConfig.h} | 11 +---------- modules/features2d/src/kaze/KAZEFeatures.h | 2 +- 2 files changed, 2 insertions(+), 11 deletions(-) rename modules/features2d/src/kaze/{config.h => KAZEConfig.h} (95%) diff --git a/modules/features2d/src/kaze/config.h b/modules/features2d/src/kaze/KAZEConfig.h similarity index 95% rename from modules/features2d/src/kaze/config.h rename to modules/features2d/src/kaze/KAZEConfig.h index d8a9ca1237..94c3aaa4d3 100644 --- a/modules/features2d/src/kaze/config.h +++ b/modules/features2d/src/kaze/KAZEConfig.h @@ -1,5 +1,5 @@ /** - * @file config.h + * @file KAZEConfig.h * @brief Configuration file * @date Dec 27, 2011 * @author Pablo F. Alcantarilla @@ -11,15 +11,6 @@ //****************************************************************************** //****************************************************************************** -// System Includes -#include -#include -#include -#include -#include -#include -#include - // OpenCV Includes #include "precomp.hpp" diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index 21bfe6d537..3f845e193e 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -14,7 +14,7 @@ //************************************************************************************* // Includes -#include "config.h" +#include "KAZEConfig.h" #include "nldiffusion_functions.h" #include "fed.h" From 3e51da38fb5f0f6acd9980894066ee94586d3abc Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 1 May 2014 10:58:34 +0300 Subject: [PATCH 37/52] Removed Feature_Suppression_Distance function that is not used anywhere. --- .../features2d/src/akaze/AKAZEFeatures.cpp | 50 ----------------- modules/features2d/src/akaze/AKAZEFeatures.h | 1 - modules/features2d/src/kaze/KAZEFeatures.cpp | 56 ------------------- modules/features2d/src/kaze/KAZEFeatures.h | 1 - 4 files changed, 108 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index 0b201519bf..27d5692d3d 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -492,56 +492,6 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { //timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency(); } -/* ************************************************************************* */ -/** - * @brief This method performs feature suppression based on 2D distance - * @param kpts Vector of keypoints - * @param mdist Maximum distance in pixels - */ -void AKAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, float mdist) const { - - vector aux; - vector to_delete; - float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; - bool found = false; - - for (size_t i = 0; i < kpts.size(); i++) { - x1 = kpts[i].pt.x; - y1 = kpts[i].pt.y; - for (size_t j = i + 1; j < kpts.size(); j++) { - x2 = kpts[j].pt.x; - y2 = kpts[j].pt.y; - dist = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)); - if (dist < mdist) { - if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { - to_delete.push_back(j); - } - else { - to_delete.push_back(i); - break; - } - } - } - } - - for (size_t i = 0; i < kpts.size(); i++) { - found = false; - for (size_t j = 0; j < to_delete.size(); j++) { - if (i == to_delete[j]) { - found = true; - break; - } - } - if (found == false) { - aux.push_back(kpts[i]); - } - } - - kpts.clear(); - kpts = aux; - aux.clear(); -} - /* ************************************************************************* */ class SURF_Descriptor_Upright_64_Invoker : public cv::ParallelLoopBody diff --git a/modules/features2d/src/akaze/AKAZEFeatures.h b/modules/features2d/src/akaze/AKAZEFeatures.h index 389848c9bf..4bebc16730 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.h +++ b/modules/features2d/src/akaze/AKAZEFeatures.h @@ -46,7 +46,6 @@ public: void Compute_Multiscale_Derivatives(void); void Find_Scale_Space_Extrema(std::vector& kpts); void Do_Subpixel_Refinement(std::vector& kpts); - void Feature_Suppression_Distance(std::vector& kpts, float mdist) const; // Feature description methods void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index a3582af3f5..78348f833d 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -590,62 +590,6 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { //************************************************************************************* //************************************************************************************* -/** - * @brief This method performs feature suppression based on 2D distance - * @param kpts Vector of keypoints - * @param mdist Maximum distance in pixels - */ -void KAZEFeatures::Feature_Suppression_Distance(std::vector& kpts, const float& mdist) { - - vector aux; - vector to_delete; - float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; - bool found = false; - - for (size_t i = 0; i < kpts.size(); i++) { - x1 = kpts[i].pt.x; - y1 = kpts[i].pt.y; - - for (size_t j = i + 1; j < kpts.size(); j++) { - x2 = kpts[j].pt.x; - y2 = kpts[j].pt.y; - dist = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)); - - if (dist < mdist) { - if (fabs(kpts[i].response) >= fabs(kpts[j].response)) { - to_delete.push_back(j); - } - else { - to_delete.push_back(i); - break; - } - } - } - } - - for (size_t i = 0; i < kpts.size(); i++) { - found = false; - - for (size_t j = 0; j < to_delete.size(); j++) { - if (i == to_delete[j]) { - found = true; - break; - } - } - - if (found == false) { - aux.push_back(kpts[i]); - } - } - - kpts.clear(); - kpts = aux; - aux.clear(); -} - -//************************************************************************************* -//************************************************************************************* - /** * @brief This method computes the set of descriptors through the nonlinear scale space * @param kpts Vector of keypoints diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index 3f845e193e..c901561240 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -84,7 +84,6 @@ private: void Determinant_Hessian_Parallel(std::vector& kpts); void Find_Extremum_Threading(const int& level); void Do_Subpixel_Refinement(std::vector& kpts); - void Feature_Suppression_Distance(std::vector& kpts, const float& mdist); // AOS Methods void AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); From 30f73623ce3294212625027db405b86977e74218 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 1 May 2014 18:24:13 +0300 Subject: [PATCH 38/52] Replace runtime checks with assertions --- modules/features2d/src/akaze/AKAZEFeatures.cpp | 14 +++++++------- modules/features2d/src/kaze/KAZEFeatures.cpp | 11 ++++++----- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index 27d5692d3d..4f33508fc2 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -96,12 +96,12 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { //double t1 = 0.0, t2 = 0.0; - - if (evolution_.size() == 0) { - cerr << "Error generating the nonlinear scale space!!" << endl; - cerr << "Firstly you need to call AKAZEFeatures::Allocate_Memory_Evolution()" << endl; - return -1; - } + CV_Assert(evolution_.size() > 0); + //if (evolution_.size() == 0) { + // cerr << "Error generating the nonlinear scale space!!" << endl; + // cerr << "Firstly you need to call AKAZEFeatures::Allocate_Memory_Evolution()" << endl; + // return -1; + //} //t1 = cv::getTickCount(); @@ -148,7 +148,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; default: - cerr << "Diffusivity: " << static_cast(options_.diffusivity) << " is not supported" << endl; + CV_Error(options_.diffusivity, "Diffusivity is not supported"); break; } diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 78348f833d..8d1b726636 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -139,11 +139,12 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { //double t2 = 0.0, t1 = 0.0; - if (evolution_.size() == 0) { - cout << "Error generating the nonlinear scale space!!" << endl; - cout << "Firstly you need to call KAZE::Allocate_Memory_Evolution()" << endl; - return -1; - } + CV_Assert(evolution_.size() > 0); + //if (evolution_.size() == 0) { + // cout << "Error generating the nonlinear scale space!!" << endl; + // cout << "Firstly you need to call KAZE::Allocate_Memory_Evolution()" << endl; + // return -1; + //} //t1 = getTickCount(); From 2df7242646e6da7ad86f56eb2f81d239526f461e Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 1 May 2014 18:27:24 +0300 Subject: [PATCH 39/52] Prepare to merge KAZE and AKAZE nldiffusion_functions source files (work in progress). --- .../features2d/src/akaze/AKAZEFeatures.cpp | 2 +- modules/features2d/src/akaze/fed.h | 26 ---- .../src/akaze/nldiffusion_functions.cpp | 124 ++++++++---------- .../src/akaze/nldiffusion_functions.h | 1 - modules/features2d/src/kaze/fed.h | 5 - .../src/kaze/nldiffusion_functions.cpp | 50 +++---- 6 files changed, 68 insertions(+), 140 deletions(-) delete mode 100644 modules/features2d/src/akaze/fed.h diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index 4f33508fc2..dd7876de04 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -7,7 +7,7 @@ */ #include "AKAZEFeatures.h" -#include "fed.h" +#include "../kaze/fed.h" #include "nldiffusion_functions.h" using namespace std; diff --git a/modules/features2d/src/akaze/fed.h b/modules/features2d/src/akaze/fed.h deleted file mode 100644 index 4ac82f68e3..0000000000 --- a/modules/features2d/src/akaze/fed.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef FED_H -#define FED_H - -//****************************************************************************** -//****************************************************************************** - -// Includes -#include -#include - -//************************************************************************************* -//************************************************************************************* - -// Declaration of functions -int fed_tau_by_process_time(const float& T, const int& M, const float& tau_max, - const bool& reordering, std::vector& tau); -int fed_tau_by_cycle_time(const float& t, const float& tau_max, - const bool& reordering, std::vector &tau) ; -int fed_tau_internal(const int& n, const float& scale, const float& tau_max, - const bool& reordering, std::vector &tau); -bool fed_is_prime_internal(const int& number); - -//************************************************************************************* -//************************************************************************************* - -#endif // FED_H diff --git a/modules/features2d/src/akaze/nldiffusion_functions.cpp b/modules/features2d/src/akaze/nldiffusion_functions.cpp index e0e2990d29..f64e50460d 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/akaze/nldiffusion_functions.cpp @@ -235,12 +235,63 @@ namespace cv { * @param scale Scale factor for the derivative size */ void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { - Mat kx, ky; compute_derivative_kernels(kx, ky, xorder, yorder, scale); sepFilter2D(src, dst, CV_32F, kx, ky); } + /* ************************************************************************* */ + /** + * @brief Compute Scharr derivative kernels for sizes different than 3 + * @param kx_ The derivative kernel in x-direction + * @param ky_ The derivative kernel in y-direction + * @param dx The derivative order in x-direction + * @param dy The derivative order in y-direction + * @param scale The kernel size + */ + void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale) { + + const int ksize = 3 + 2 * (scale - 1); + + // The usual Scharr kernel + if (scale == 1) { + getDerivKernels(kx_, ky_, dx, dy, 0, true, CV_32F); + return; + } + + kx_.create(ksize, 1, CV_32F, -1, true); + ky_.create(ksize, 1, CV_32F, -1, true); + Mat kx = kx_.getMat(); + Mat ky = ky_.getMat(); + + float w = 10.0f / 3.0f; + float norm = 1.0f / (2.0f*scale*(w + 2.0f)); + + for (int k = 0; k < 2; k++) { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + float kerI[1000]; + + for (int t = 0; t < ksize; t++) { + kerI[t] = 0; + } + + if (order == 0) { + kerI[0] = norm; + kerI[ksize / 2] = w*norm; + kerI[ksize - 1] = norm; + } + else if (order == 1) { + kerI[0] = -1; + kerI[ksize / 2] = 0; + kerI[ksize - 1] = 1; + } + + Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); + temp.copyTo(*kernel); + } + } + /* ************************************************************************* */ /** * @brief This function performs a scalar non-linear diffusion step @@ -300,27 +351,6 @@ namespace cv { Ld = Ld + Lstep; } - /* ************************************************************************* */ - /** - * @brief This function downsamples the input image with the kernel [1/4,1/2,1/4] - * @param img Input image to be downsampled - * @param dst Output image with half of the resolution of the input image - */ - void downsample_image(const cv::Mat& src, cv::Mat& dst) { - - int i1 = 0, j1 = 0, i2 = 0, j2 = 0; - - for (i1 = 1; i1 < src.rows; i1 += 2) { - j2 = 0; - for (j1 = 1; j1 < src.cols; j1 += 2) { - *(dst.ptr(i2)+j2) = 0.5f*(*(src.ptr(i1)+j1)) + 0.25f*(*(src.ptr(i1)+j1 - 1) + *(src.ptr(i1)+j1 + 1)); - j2++; - } - - i2++; - } - } - /* ************************************************************************* */ /** * @brief This function downsamples the input image using OpenCV resize @@ -335,57 +365,7 @@ namespace cv { resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); } - /* ************************************************************************* */ - /** - * @brief Compute Scharr derivative kernels for sizes different than 3 - * @param kx_ The derivative kernel in x-direction - * @param ky_ The derivative kernel in y-direction - * @param dx The derivative order in x-direction - * @param dy The derivative order in y-direction - * @param scale The kernel size - */ - void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale) { - const int ksize = 3 + 2 * (scale - 1); - - // The usual Scharr kernel - if (scale == 1) { - getDerivKernels(kx_, ky_, dx, dy, 0, true, CV_32F); - return; - } - - kx_.create(ksize, 1, CV_32F, -1, true); - ky_.create(ksize, 1, CV_32F, -1, true); - Mat kx = kx_.getMat(); - Mat ky = ky_.getMat(); - - float w = 10.0f / 3.0f; - float norm = 1.0f / (2.0f*scale*(w + 2.0f)); - - for (int k = 0; k < 2; k++) { - Mat* kernel = k == 0 ? &kx : &ky; - int order = k == 0 ? dx : dy; - float kerI[1000]; - - for (int t = 0; t < ksize; t++) { - kerI[t] = 0; - } - - if (order == 0) { - kerI[0] = norm; - kerI[ksize / 2] = w*norm; - kerI[ksize - 1] = norm; - } - else if (order == 1) { - kerI[0] = -1; - kerI[ksize / 2] = 0; - kerI[ksize - 1] = 1; - } - - Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); - temp.copyTo(*kernel); - } - } } } } \ No newline at end of file diff --git a/modules/features2d/src/akaze/nldiffusion_functions.h b/modules/features2d/src/akaze/nldiffusion_functions.h index 0fab6c59a7..b6dd2e8bab 100644 --- a/modules/features2d/src/akaze/nldiffusion_functions.h +++ b/modules/features2d/src/akaze/nldiffusion_functions.h @@ -28,7 +28,6 @@ namespace cv { float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int, int scale); void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize); - void downsample_image(const cv::Mat& src, cv::Mat& dst); void halfsample_image(const cv::Mat& src, cv::Mat& dst); void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale); bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); diff --git a/modules/features2d/src/kaze/fed.h b/modules/features2d/src/kaze/fed.h index d9e8c49924..c313b8134d 100644 --- a/modules/features2d/src/kaze/fed.h +++ b/modules/features2d/src/kaze/fed.h @@ -5,11 +5,6 @@ //****************************************************************************** // Includes -#include -#include -#include -#include -#include #include //************************************************************************************* diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index 23ffaf1f34..a24a1a51d5 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -28,14 +28,14 @@ // Namespaces using namespace std; using namespace cv; -using namespace cv::details::kaze; -//************************************************************************************* -//************************************************************************************* +/* ************************************************************************* */ namespace cv { namespace details { namespace kaze { + + /* ************************************************************************* */ /** * @brief This function smoothes an image with a Gaussian kernel * @param src Input image @@ -44,8 +44,7 @@ namespace cv { * @param ksize_y Kernel size in Y-direction (vertical) * @param sigma Kernel standard deviation */ - void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, - int ksize_x, int ksize_y, float sigma) { + void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) { int ksize_x_ = 0, ksize_y_ = 0; @@ -68,9 +67,7 @@ namespace cv { GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, cv::BORDER_REPLICATE); } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief This function computes the Perona and Malik conductivity coefficient g1 * g1 = exp(-|dL|^2/k^2) @@ -83,9 +80,7 @@ namespace cv { cv::exp(-(Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), dst); } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief This function computes the Perona and Malik conductivity coefficient g2 * g2 = 1 / (1 + dL^2 / k^2) @@ -98,9 +93,7 @@ namespace cv { dst = 1. / (1. + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief This function computes Weickert conductivity coefficient g3 * @param Lx First order image derivative in X-direction (horizontal) @@ -118,9 +111,7 @@ namespace cv { dst = 1.0f - dst; } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief This function computes a good empirical value for the k contrast factor * given an input image, the percentile (0-1), the gradient scale and the number of @@ -208,9 +199,7 @@ namespace cv { return kperc; } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief This function computes Scharr image derivatives * @param src Input image @@ -219,16 +208,13 @@ namespace cv { * @param yorder Derivative order in Y-direction (vertical) * @param scale Scale factor or derivative size */ - void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, - int xorder, int yorder, int scale) { + void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { Mat kx, ky; compute_derivative_kernels(kx, ky, xorder, yorder, scale); sepFilter2D(src, dst, CV_32F, kx, ky); } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief Compute derivative kernels for sizes different than 3 * @param _kx Horizontal kernel values @@ -237,8 +223,7 @@ namespace cv { * @param dy Derivative order in Y-direction (vertical) * @param scale_ Scale factor or derivative size */ - void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, - int dx, int dy, int scale) { + void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) { int ksize = 3 + 2 * (scale - 1); @@ -273,9 +258,7 @@ namespace cv { } } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief This function performs a scalar non-linear diffusion step * @param Ld2 Output image in the evolution @@ -336,9 +319,7 @@ namespace cv { Ld = Ld + Lstep; } - //************************************************************************************* - //************************************************************************************* - + /* ************************************************************************* */ /** * @brief This function checks if a given pixel is a maximum in a local neighbourhood * @param img Input image where we will perform the maximum search @@ -349,8 +330,7 @@ namespace cv { * @param same_img Flag to indicate if the image value at (x,y) is in the input image * @return 1->is maximum, 0->otherwise */ - bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, - int row, int col, bool same_img) { + bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img) { bool response = true; From 9fc90f4069710a49badb1139326fb6e0467dcd33 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Thu, 1 May 2014 22:24:15 +0300 Subject: [PATCH 40/52] Merged nldiffusion functions into one module with removal of duplicate functions --- .../features2d/src/akaze/AKAZEFeatures.cpp | 6 +- .../src/akaze/nldiffusion_functions.cpp | 371 ------------------ .../src/akaze/nldiffusion_functions.h | 39 -- .../src/kaze/nldiffusion_functions.cpp | 86 +++- .../src/kaze/nldiffusion_functions.h | 10 +- modules/features2d/test/test_keypoints.cpp | 2 +- 6 files changed, 77 insertions(+), 437 deletions(-) delete mode 100644 modules/features2d/src/akaze/nldiffusion_functions.cpp delete mode 100644 modules/features2d/src/akaze/nldiffusion_functions.h diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index dd7876de04..7400b2accc 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -8,11 +8,11 @@ #include "AKAZEFeatures.h" #include "../kaze/fed.h" -#include "nldiffusion_functions.h" +#include "../kaze/nldiffusion_functions.h" using namespace std; using namespace cv; -using namespace cv::details::akaze; +using namespace cv::details::kaze; /* ************************************************************************* */ /** @@ -154,7 +154,7 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { // Perform FED n inner steps for (int j = 0; j < nsteps_[i - 1]; j++) { - nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i - 1][j]); + cv::details::kaze::nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i - 1][j]); } } diff --git a/modules/features2d/src/akaze/nldiffusion_functions.cpp b/modules/features2d/src/akaze/nldiffusion_functions.cpp deleted file mode 100644 index f64e50460d..0000000000 --- a/modules/features2d/src/akaze/nldiffusion_functions.cpp +++ /dev/null @@ -1,371 +0,0 @@ -//============================================================================= -// -// nldiffusion_functions.cpp -// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2) -// Institutions: Georgia Institute of Technology (1) -// TrueVision Solutions (2) -// Date: 15/09/2013 -// Email: pablofdezalc@gmail.com -// -// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo -// All Rights Reserved -// See LICENSE for the license information -//============================================================================= - -/** - * @file nldiffusion_functions.cpp - * @brief Functions for nonlinear diffusion filtering applications - * @date Sep 15, 2013 - * @author Pablo F. Alcantarilla, Jesus Nuevo - */ - -#include "akaze/nldiffusion_functions.h" - -using namespace std; -using namespace cv; - -namespace cv { - namespace details { - namespace akaze { - - /* ************************************************************************* */ - /** - * @brief This function smoothes an image with a Gaussian kernel - * @param src Input image - * @param dst Output image - * @param ksize_x Kernel size in X-direction (horizontal) - * @param ksize_y Kernel size in Y-direction (vertical) - * @param sigma Kernel standard deviation - */ - void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) { - - int ksize_x_ = 0, ksize_y_ = 0; - - // Compute an appropriate kernel size according to the specified sigma - if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) { - ksize_x_ = (int)ceil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f))); - ksize_y_ = ksize_x_; - } - - // The kernel size must be and odd number - if ((ksize_x_ % 2) == 0) { - ksize_x_ += 1; - } - - if ((ksize_y_ % 2) == 0) { - ksize_y_ += 1; - } - - // Perform the Gaussian Smoothing with border replication - GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE); - } - - /* ************************************************************************* */ - /** - * @brief This function computes image derivatives with Scharr kernel - * @param src Input image - * @param dst Output image - * @param xorder Derivative order in X-direction (horizontal) - * @param yorder Derivative order in Y-direction (vertical) - * @note Scharr operator approximates better rotation invariance than - * other stencils such as Sobel. See Weickert and Scharr, - * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, - * Journal of Visual Communication and Image Representation 2002 - */ - void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) { - Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); - } - - /* ************************************************************************* */ - /** - * @brief This function computes the Perona and Malik conductivity coefficient g1 - * g1 = exp(-|dL|^2/k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ - void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - exp(-(Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), dst); - } - - /* ************************************************************************* */ - /** - * @brief This function computes the Perona and Malik conductivity coefficient g2 - * g2 = 1 / (1 + dL^2 / k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - */ - void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - dst = 1.0 / (1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); - } - - /* ************************************************************************* */ - /** - * @brief This function computes Weickert conductivity coefficient gw - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - * @note For more information check the following paper: J. Weickert - * Applications of nonlinear diffusion in image processing and computer vision, - * Proceedings of Algorithmy 2000 - */ - void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - Mat modg; - pow((Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), 4, modg); - cv::exp(-3.315 / modg, dst); - dst = 1.0 - dst; - } - - /* ************************************************************************* */ - /** - * @brief This function computes Charbonnier conductivity coefficient gc - * gc = 1 / sqrt(1 + dL^2 / k^2) - * @param Lx First order image derivative in X-direction (horizontal) - * @param Ly First order image derivative in Y-direction (vertical) - * @param dst Output image - * @param k Contrast factor parameter - * @note For more information check the following paper: J. Weickert - * Applications of nonlinear diffusion in image processing and computer vision, - * Proceedings of Algorithmy 2000 - */ - void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) { - Mat den; - cv::sqrt(1.0 + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), den); - dst = 1.0 / den; - } - - /* ************************************************************************* */ - /** - * @brief This function computes a good empirical value for the k contrast factor - * given an input image, the percentile (0-1), the gradient scale and the number of - * bins in the histogram - * @param img Input image - * @param perc Percentile of the image gradient histogram (0-1) - * @param gscale Scale for computing the image gradient histogram - * @param nbins Number of histogram bins - * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel - * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel - * @return k contrast factor - */ - float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) { - - int nbin = 0, nelements = 0, nthreshold = 0, k = 0; - float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; - float npoints = 0.0; - float hmax = 0.0; - - // Create the array for the histogram - std::vector hist(nbins, 0); - - // Create the matrices - cv::Mat gaussian = cv::Mat::zeros(img.rows, img.cols, CV_32F); - cv::Mat Lx = cv::Mat::zeros(img.rows, img.cols, CV_32F); - cv::Mat Ly = cv::Mat::zeros(img.rows, img.cols, CV_32F); - - // Perform the Gaussian convolution - gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale); - - // Compute the Gaussian derivatives Lx and Ly - image_derivatives_scharr(gaussian, Lx, 1, 0); - image_derivatives_scharr(gaussian, Ly, 0, 1); - - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows - 1; i++) { - for (int j = 1; j < gaussian.cols - 1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); - - // Get the maximum - if (modg > hmax) { - hmax = modg; - } - } - } - - // Skip the borders for computing the histogram - for (int i = 1; i < gaussian.rows - 1; i++) { - for (int j = 1; j < gaussian.cols - 1; j++) { - lx = *(Lx.ptr(i)+j); - ly = *(Ly.ptr(i)+j); - modg = sqrt(lx*lx + ly*ly); - - // Find the correspondent bin - if (modg != 0.0) { - nbin = (int)floor(nbins*(modg / hmax)); - - if (nbin == nbins) { - nbin--; - } - - hist[nbin]++; - npoints++; - } - } - } - - // Now find the perc of the histogram percentile - nthreshold = (int)(npoints*perc); - - for (k = 0; nelements < nthreshold && k < nbins; k++) { - nelements = nelements + hist[k]; - } - - if (nelements < nthreshold) { - kperc = 0.03f; - } - else { - kperc = hmax*((float)(k) / (float)nbins); - } - - return kperc; - } - - /* ************************************************************************* */ - /** - * @brief This function computes Scharr image derivatives - * @param src Input image - * @param dst Output image - * @param xorder Derivative order in X-direction (horizontal) - * @param yorder Derivative order in Y-direction (vertical) - * @param scale Scale factor for the derivative size - */ - void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { - Mat kx, ky; - compute_derivative_kernels(kx, ky, xorder, yorder, scale); - sepFilter2D(src, dst, CV_32F, kx, ky); - } - - /* ************************************************************************* */ - /** - * @brief Compute Scharr derivative kernels for sizes different than 3 - * @param kx_ The derivative kernel in x-direction - * @param ky_ The derivative kernel in y-direction - * @param dx The derivative order in x-direction - * @param dy The derivative order in y-direction - * @param scale The kernel size - */ - void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale) { - - const int ksize = 3 + 2 * (scale - 1); - - // The usual Scharr kernel - if (scale == 1) { - getDerivKernels(kx_, ky_, dx, dy, 0, true, CV_32F); - return; - } - - kx_.create(ksize, 1, CV_32F, -1, true); - ky_.create(ksize, 1, CV_32F, -1, true); - Mat kx = kx_.getMat(); - Mat ky = ky_.getMat(); - - float w = 10.0f / 3.0f; - float norm = 1.0f / (2.0f*scale*(w + 2.0f)); - - for (int k = 0; k < 2; k++) { - Mat* kernel = k == 0 ? &kx : &ky; - int order = k == 0 ? dx : dy; - float kerI[1000]; - - for (int t = 0; t < ksize; t++) { - kerI[t] = 0; - } - - if (order == 0) { - kerI[0] = norm; - kerI[ksize / 2] = w*norm; - kerI[ksize - 1] = norm; - } - else if (order == 1) { - kerI[0] = -1; - kerI[ksize / 2] = 0; - kerI[ksize - 1] = 1; - } - - Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); - temp.copyTo(*kernel); - } - } - - /* ************************************************************************* */ - /** - * @brief This function performs a scalar non-linear diffusion step - * @param Ld2 Output image in the evolution - * @param c Conductivity image - * @param Lstep Previous image in the evolution - * @param stepsize The step size in time units - * @note Forward Euler Scheme 3x3 stencil - * The function c is a scalar value that depends on the gradient norm - * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy - */ - void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize) { - -#ifdef _OPENMP -#pragma omp parallel for schedule(dynamic) -#endif - for (int i = 1; i < Lstep.rows - 1; i++) { - for (int j = 1; j < Lstep.cols - 1; j++) { - float xpos = ((*(c.ptr(i)+j)) + (*(c.ptr(i)+j + 1)))*((*(Ld.ptr(i)+j + 1)) - (*(Ld.ptr(i)+j))); - float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); - float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); - float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); - *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); - } - } - - for (int j = 1; j < Lstep.cols - 1; j++) { - float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); - float xneg = ((*(c.ptr(0) + j - 1)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j - 1))); - float ypos = ((*(c.ptr(0) + j)) + (*(c.ptr(1) + j)))*((*(Ld.ptr(1) + j)) - (*(Ld.ptr(0) + j))); - *(Lstep.ptr(0) + j) = 0.5f*stepsize*(xpos - xneg + ypos); - } - - for (int j = 1; j < Lstep.cols - 1; j++) { - float xpos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j + 1)))*((*(Ld.ptr(Lstep.rows - 1) + j + 1)) - (*(Ld.ptr(Lstep.rows - 1) + j))); - float xneg = ((*(c.ptr(Lstep.rows - 1) + j - 1)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j - 1))); - float ypos = ((*(c.ptr(Lstep.rows - 1) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 1) + j))); - float yneg = ((*(c.ptr(Lstep.rows - 2) + j)) + (*(c.ptr(Lstep.rows - 1) + j)))*((*(Ld.ptr(Lstep.rows - 1) + j)) - (*(Ld.ptr(Lstep.rows - 2) + j))); - *(Lstep.ptr(Lstep.rows - 1) + j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); - } - - for (int i = 1; i < Lstep.rows - 1; i++) { - float xpos = ((*(c.ptr(i))) + (*(c.ptr(i)+1)))*((*(Ld.ptr(i)+1)) - (*(Ld.ptr(i)))); - float xneg = ((*(c.ptr(i))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i)))); - float ypos = ((*(c.ptr(i))) + (*(c.ptr(i + 1))))*((*(Ld.ptr(i + 1))) - (*(Ld.ptr(i)))); - float yneg = ((*(c.ptr(i - 1))) + (*(c.ptr(i))))*((*(Ld.ptr(i))) - (*(Ld.ptr(i - 1)))); - *(Lstep.ptr(i)) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); - } - - for (int i = 1; i < Lstep.rows - 1; i++) { - float xneg = ((*(c.ptr(i)+Lstep.cols - 2)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 2))); - float ypos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i + 1) + Lstep.cols - 1)))*((*(Ld.ptr(i + 1) + Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); - float yneg = ((*(c.ptr(i - 1) + Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i - 1) + Lstep.cols - 1))); - *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5f*stepsize*(-xneg + ypos - yneg); - } - - Ld = Ld + Lstep; - } - - /* ************************************************************************* */ - /** - * @brief This function downsamples the input image using OpenCV resize - * @param img Input image to be downsampled - * @param dst Output image with half of the resolution of the input image - */ - void halfsample_image(const cv::Mat& src, cv::Mat& dst) { - - // Make sure the destination image is of the right size - CV_Assert(src.cols / 2 == dst.cols); - CV_Assert(src.rows / 2 == dst.rows); - resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); - } - - - } - } -} \ No newline at end of file diff --git a/modules/features2d/src/akaze/nldiffusion_functions.h b/modules/features2d/src/akaze/nldiffusion_functions.h deleted file mode 100644 index b6dd2e8bab..0000000000 --- a/modules/features2d/src/akaze/nldiffusion_functions.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * @file nldiffusion_functions.h - * @brief Functions for nonlinear diffusion filtering applications - * @date Sep 15, 2013 - * @author Pablo F. Alcantarilla, Jesus Nuevo - */ - -#ifndef AKAZE_NLDIFFUSION_FUNCTIONS_H -#define AKAZE_NLDIFFUSION_FUNCTIONS_H - -/* ************************************************************************* */ -// Includes -#include "precomp.hpp" - -/* ************************************************************************* */ -// Declaration of functions - -namespace cv { - namespace details { - namespace akaze { - - void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma); - void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder); - void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); - void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); - void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); - void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k); - float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); - void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int, int scale); - void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, const float& stepsize); - void halfsample_image(const cv::Mat& src, cv::Mat& dst); - void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, int dx, int dy, int scale); - bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); - - } - } -} - -#endif diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index a24a1a51d5..d006cca53f 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -1,4 +1,3 @@ - //============================================================================= // // nldiffusion_functions.cpp @@ -64,7 +63,23 @@ namespace cv { } // Perform the Gaussian Smoothing with border replication - GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, cv::BORDER_REPLICATE); + GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE); + } + + /* ************************************************************************* */ + /** + * @brief This function computes image derivatives with Scharr kernel + * @param src Input image + * @param dst Output image + * @param xorder Derivative order in X-direction (horizontal) + * @param yorder Derivative order in Y-direction (vertical) + * @note Scharr operator approximates better rotation invariance than + * other stencils such as Sobel. See Weickert and Scharr, + * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, + * Journal of Visual Communication and Image Representation 2002 + */ + void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) { + Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT); } /* ************************************************************************* */ @@ -90,12 +105,12 @@ namespace cv { * @param k Contrast factor parameter */ void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { - dst = 1. / (1. + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); + dst = 1.0f / (1.0f + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k)); } /* ************************************************************************* */ /** - * @brief This function computes Weickert conductivity coefficient g3 + * @brief This function computes Weickert conductivity coefficient gw * @param Lx First order image derivative in X-direction (horizontal) * @param Ly First order image derivative in Y-direction (vertical) * @param dst Output image @@ -107,10 +122,28 @@ namespace cv { void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { Mat modg; cv::pow((Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), 4, modg); - cv::exp(-3.315 / modg, dst); + cv::exp(-3.315f / modg, dst); dst = 1.0f - dst; } + /* ************************************************************************* */ + /** + * @brief This function computes Charbonnier conductivity coefficient gc + * gc = 1 / sqrt(1 + dL^2 / k^2) + * @param Lx First order image derivative in X-direction (horizontal) + * @param Ly First order image derivative in Y-direction (vertical) + * @param dst Output image + * @param k Contrast factor parameter + * @note For more information check the following paper: J. Weickert + * Applications of nonlinear diffusion in image processing and computer vision, + * Proceedings of Algorithmy 2000 + */ + void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) { + Mat den; + cv::sqrt(1.0f + (Lx.mul(Lx) + Ly.mul(Ly)) / (k*k), den); + dst = 1.0f / den; + } + /* ************************************************************************* */ /** * @brief This function computes a good empirical value for the k contrast factor @@ -182,8 +215,7 @@ namespace cv { } // Now find the perc of the histogram percentile - nthreshold = (size_t)(npoints*perc); - + nthreshold = (int)(npoints*perc); for (k = 0; nelements < nthreshold && k < nbins; k++) { nelements = nelements + hist[k]; @@ -206,7 +238,7 @@ namespace cv { * @param dst Output image * @param xorder Derivative order in X-direction (horizontal) * @param yorder Derivative order in Y-direction (vertical) - * @param scale Scale factor or derivative size + * @param scale Scale factor for the derivative size */ void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) { Mat kx, ky; @@ -260,15 +292,15 @@ namespace cv { /* ************************************************************************* */ /** - * @brief This function performs a scalar non-linear diffusion step - * @param Ld2 Output image in the evolution - * @param c Conductivity image - * @param Lstep Previous image in the evolution - * @param stepsize The step size in time units - * @note Forward Euler Scheme 3x3 stencil - * The function c is a scalar value that depends on the gradient norm - * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy - */ + * @brief This function performs a scalar non-linear diffusion step + * @param Ld2 Output image in the evolution + * @param c Conductivity image + * @param Lstep Previous image in the evolution + * @param stepsize The step size in time units + * @note Forward Euler Scheme 3x3 stencil + * The function c is a scalar value that depends on the gradient norm + * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy + */ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) { #ifdef _OPENMP @@ -288,8 +320,7 @@ namespace cv { float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); float xneg = ((*(c.ptr(0) + j - 1)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j - 1))); float ypos = ((*(c.ptr(0) + j)) + (*(c.ptr(1) + j)))*((*(Ld.ptr(1) + j)) - (*(Ld.ptr(0) + j))); - float yneg = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j)))*((*(Ld.ptr(0) + j)) - (*(Ld.ptr(0) + j))); - *(Lstep.ptr(0) + j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + *(Lstep.ptr(0) + j) = 0.5f*stepsize*(xpos - xneg + ypos); } for (int j = 1; j < Lstep.cols - 1; j++) { @@ -309,16 +340,29 @@ namespace cv { } for (int i = 1; i < Lstep.rows - 1; i++) { - float xpos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); float xneg = ((*(c.ptr(i)+Lstep.cols - 2)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 2))); float ypos = ((*(c.ptr(i)+Lstep.cols - 1)) + (*(c.ptr(i + 1) + Lstep.cols - 1)))*((*(Ld.ptr(i + 1) + Lstep.cols - 1)) - (*(Ld.ptr(i)+Lstep.cols - 1))); float yneg = ((*(c.ptr(i - 1) + Lstep.cols - 1)) + (*(c.ptr(i)+Lstep.cols - 1)))*((*(Ld.ptr(i)+Lstep.cols - 1)) - (*(Ld.ptr(i - 1) + Lstep.cols - 1))); - *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + *(Lstep.ptr(i)+Lstep.cols - 1) = 0.5f*stepsize*(-xneg + ypos - yneg); } Ld = Ld + Lstep; } + /* ************************************************************************* */ + /** + * @brief This function downsamples the input image using OpenCV resize + * @param img Input image to be downsampled + * @param dst Output image with half of the resolution of the input image + */ + void halfsample_image(const cv::Mat& src, cv::Mat& dst) { + + // Make sure the destination image is of the right size + CV_Assert(src.cols / 2 == dst.cols); + CV_Assert(src.rows / 2 == dst.rows); + resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA); + } + /* ************************************************************************* */ /** * @brief This function checks if a given pixel is a maximum in a local neighbourhood diff --git a/modules/features2d/src/kaze/nldiffusion_functions.h b/modules/features2d/src/kaze/nldiffusion_functions.h index e9d5f03670..773f7e4619 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.h +++ b/modules/features2d/src/kaze/nldiffusion_functions.h @@ -11,11 +11,12 @@ #ifndef KAZE_NLDIFFUSION_FUNCTIONS_H #define KAZE_NLDIFFUSION_FUNCTIONS_H +/* ************************************************************************* */ // Includes #include "precomp.hpp" -//************************************************************************************* -//************************************************************************************* +/* ************************************************************************* */ +// Declaration of functions namespace cv { namespace details { @@ -28,11 +29,14 @@ namespace cv { void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); + void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k); + float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y); // Image derivatives void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale); void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale); + void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder); // Nonlinear diffusion filtering scalar step void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize); @@ -40,6 +44,8 @@ namespace cv { // For non-maxima suppresion bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img); + // Image downsampling + void halfsample_image(const cv::Mat& src, cv::Mat& dst); } } } diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index f8163c1f36..3cbd3f6937 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -177,4 +177,4 @@ TEST(Features2d_Detector_Keypoints_AKAZE, validation) { CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.AKAZE")); test.safe_run(); -} \ No newline at end of file +} From e7e00201f1701ca8213f4ffbfe2973e61962769d Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 2 May 2014 14:07:30 +0300 Subject: [PATCH 41/52] Enabled parallel processing of the nld_step_scalar function --- .../src/kaze/nldiffusion_functions.cpp | 55 +++++++++++++++---- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/modules/features2d/src/kaze/nldiffusion_functions.cpp b/modules/features2d/src/kaze/nldiffusion_functions.cpp index d006cca53f..ea7cd8141a 100644 --- a/modules/features2d/src/kaze/nldiffusion_functions.cpp +++ b/modules/features2d/src/kaze/nldiffusion_functions.cpp @@ -290,6 +290,48 @@ namespace cv { } } + class Nld_Step_Scalar_Invoker : public cv::ParallelLoopBody + { + public: + Nld_Step_Scalar_Invoker(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float _stepsize) + : _Ld(&Ld) + , _c(&c) + , _Lstep(&Lstep) + , stepsize(_stepsize) + { + } + + virtual ~Nld_Step_Scalar_Invoker() + { + + } + + void operator()(const cv::Range& range) const + { + cv::Mat& Ld = *_Ld; + const cv::Mat& c = *_c; + cv::Mat& Lstep = *_Lstep; + + for (int i = range.start; i < range.end; i++) + { + for (int j = 1; j < Lstep.cols - 1; j++) + { + float xpos = ((*(c.ptr(i)+j)) + (*(c.ptr(i)+j + 1)))*((*(Ld.ptr(i)+j + 1)) - (*(Ld.ptr(i)+j))); + float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); + float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); + float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); + *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); + } + } + } + + private: + cv::Mat * _Ld; + const cv::Mat * _c; + cv::Mat * _Lstep; + float stepsize; + }; + /* ************************************************************************* */ /** * @brief This function performs a scalar non-linear diffusion step @@ -303,18 +345,7 @@ namespace cv { */ void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) { -#ifdef _OPENMP -#pragma omp parallel for schedule(dynamic) -#endif - for (int i = 1; i < Lstep.rows - 1; i++) { - for (int j = 1; j < Lstep.cols - 1; j++) { - float xpos = ((*(c.ptr(i)+j)) + (*(c.ptr(i)+j + 1)))*((*(Ld.ptr(i)+j + 1)) - (*(Ld.ptr(i)+j))); - float xneg = ((*(c.ptr(i)+j - 1)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i)+j - 1))); - float ypos = ((*(c.ptr(i)+j)) + (*(c.ptr(i + 1) + j)))*((*(Ld.ptr(i + 1) + j)) - (*(Ld.ptr(i)+j))); - float yneg = ((*(c.ptr(i - 1) + j)) + (*(c.ptr(i)+j)))*((*(Ld.ptr(i)+j)) - (*(Ld.ptr(i - 1) + j))); - *(Lstep.ptr(i)+j) = 0.5f*stepsize*(xpos - xneg + ypos - yneg); - } - } + cv::parallel_for_(cv::Range(1, Lstep.rows - 1), Nld_Step_Scalar_Invoker(Ld, c, Lstep, stepsize)); for (int j = 1; j < Lstep.cols - 1; j++) { float xpos = ((*(c.ptr(0) + j)) + (*(c.ptr(0) + j + 1)))*((*(Ld.ptr(0) + j + 1)) - (*(Ld.ptr(0) + j))); From 220de14077c6164891ff9e4f97719dc598ceb050 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Mon, 5 May 2014 21:48:54 +0300 Subject: [PATCH 42/52] Clean-up from the dead code --- .../features2d/src/akaze/AKAZEFeatures.cpp | 154 +++--------------- modules/features2d/src/akaze/AKAZEFeatures.h | 24 --- modules/features2d/src/kaze/KAZEFeatures.cpp | 123 +++----------- 3 files changed, 43 insertions(+), 258 deletions(-) diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index 7400b2accc..b1a4ba57d2 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -93,17 +93,9 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) { * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully, -1 otherwise */ -int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { - - //double t1 = 0.0, t2 = 0.0; +int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) +{ CV_Assert(evolution_.size() > 0); - //if (evolution_.size() == 0) { - // cerr << "Error generating the nonlinear scale space!!" << endl; - // cerr << "Firstly you need to call AKAZEFeatures::Allocate_Memory_Evolution()" << endl; - // return -1; - //} - - //t1 = cv::getTickCount(); // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); @@ -113,9 +105,6 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { // First compute the kcontrast factor options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, 1.0f, options_.kcontrast_nbins, 0, 0); - //t2 = cv::getTickCount(); - //timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency(); - // Now generate the rest of evolution levels for (size_t i = 1; i < evolution_.size(); i++) { @@ -158,9 +147,6 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { } } - //t2 = cv::getTickCount(); - //timing_.scale = 1000.0*(t2 - t1) / cv::getTickFrequency(); - return 0; } @@ -169,20 +155,13 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) { * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of detected keypoints */ -void AKAZEFeatures::Feature_Detection(std::vector& kpts) { - - //double t1 = 0.0, t2 = 0.0; - - //t1 = cv::getTickCount(); - +void AKAZEFeatures::Feature_Detection(std::vector& kpts) +{ kpts.clear(); Compute_Determinant_Hessian_Response(); Find_Scale_Space_Extrema(kpts); Do_Subpixel_Refinement(kpts); - - //t2 = cv::getTickCount(); - //timing_.detector = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -228,34 +207,10 @@ private: /** * @brief This method computes the multiscale derivatives for the nonlinear scale space */ -void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { - - //double t1 = 0.0, t2 = 0.0; - - //t1 = cv::getTickCount(); - - cv::parallel_for_(cv::Range(0, (int)evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_)); - /* - for (int i = 0; i < (int)(evolution_.size()); i++) { - - float ratio = pow(2.f, (float)evolution_[i].octave); - int sigma_size_ = fRound(evolution_[i].esigma*options_.derivative_factor / ratio); - - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_); - compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_); - compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_); - - evolution_[i].Lx = evolution_[i].Lx*((sigma_size_)); - evolution_[i].Ly = evolution_[i].Ly*((sigma_size_)); - evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_)); - evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_)); - evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_)); - } - */ - //t2 = cv::getTickCount(); - //timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency(); +void AKAZEFeatures::Compute_Multiscale_Derivatives(void) +{ + cv::parallel_for_(cv::Range(0, (int)evolution_.size()), + MultiscaleDerivativesInvoker(evolution_, options_)); } /* ************************************************************************* */ @@ -268,14 +223,12 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { // Firstly compute the multiscale derivatives Compute_Multiscale_Derivatives(); - for (size_t i = 0; i < evolution_.size(); i++) { - - //if (options_.verbosity == true) { - // cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; - //} - - for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) { - for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) { + for (size_t i = 0; i < evolution_.size(); i++) + { + for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) + { + for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) + { float lxx = *(evolution_[i].Lxx.ptr(ix)+jx); float lxy = *(evolution_[i].Lxy.ptr(ix)+jx); float lyy = *(evolution_[i].Lyy.ptr(ix)+jx); @@ -290,9 +243,9 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) { * @brief This method finds extrema in the nonlinear scale space * @param kpts Vector of detected keypoints */ -void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { +void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) +{ - //double t1 = 0.0, t2 = 0.0; float value = 0.0; float dist = 0.0, ratio = 0.0, smax = 0.0; int npoints = 0, id_repeated = 0; @@ -310,8 +263,6 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { smax = 12.0f*sqrtf(2.0f); } - //t1 = cv::getTickCount(); - for (size_t i = 0; i < evolution_.size(); i++) { for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) { for (int jx = 1; jx < evolution_[i].Ldet.cols - 1; jx++) { @@ -415,9 +366,6 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { if (is_repeated == false) kpts.push_back(pt); } - - //t2 = cv::getTickCount(); - //timing_.extrema = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -425,9 +373,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) { * @brief This method performs subpixel refinement of the detected keypoints * @param kpts Vector of detected keypoints */ -void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { - - //double t1 = 0.0, t2 = 0.0; +void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) +{ float Dx = 0.0, Dy = 0.0, ratio = 0.0; float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0; int x = 0, y = 0; @@ -435,8 +382,6 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { cv::Mat b = cv::Mat::zeros(2, 1, CV_32F); cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F); - //t1 = cv::getTickCount(); - for (size_t i = 0; i < kpts.size(); i++) { ratio = pow(2.f, kpts[i].octave); x = fRound(kpts[i].pt.x / ratio); @@ -487,9 +432,6 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector& kpts) { i--; } } - - //t2 = cv::getTickCount(); - //timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -739,12 +681,8 @@ private: * @param kpts Vector of detected keypoints * @param desc Matrix to store the descriptors */ -void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) { - - //double t1 = 0.0, t2 = 0.0; - - //t1 = cv::getTickCount(); - +void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) +{ // Allocate memory for the matrix with the descriptors if (options_.descriptor < MLDB_UPRIGHT) { desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); @@ -766,39 +704,21 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat case SURF_UPRIGHT: // Upright descriptors, not invariant to rotation { cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_)); - - //for (int i = 0; i < (int)(kpts.size()); i++) { - // Get_SURF_Descriptor_Upright_64(kpts[i], desc.ptr(i)); - //} } break; case SURF: { cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_)); - - //for (int i = 0; i < (int)(kpts.size()); i++) { - // Compute_Main_Orientation(kpts[i]); - // Get_SURF_Descriptor_64(kpts[i], desc.ptr(i)); - //} } break; case MSURF_UPRIGHT: // Upright descriptors, not invariant to rotation { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); - - //for (int i = 0; i < (int)(kpts.size()); i++) { - // Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr(i)); - //} } break; case MSURF: { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); - - //for (int i = 0; i < (int)(kpts.size()); i++) { - // Compute_Main_Orientation(kpts[i]); - // Get_MSURF_Descriptor_64(kpts[i], desc.ptr(i)); - //} } break; case MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation @@ -807,13 +727,6 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); else cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); - - //for (int i = 0; i < (int)(kpts.size()); i++) { - // if (options_.descriptor_size == 0) - // Get_Upright_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); - // else - // Get_Upright_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); - //} } break; case MLDB: @@ -822,20 +735,9 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); else cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); - - //for (int i = 0; i < (int)(kpts.size()); i++) { - // Compute_Main_Orientation(kpts[i]); - // if (options_.descriptor_size == 0) - // Get_MLDB_Full_Descriptor(kpts[i], desc.ptr(i)); - // else - // Get_MLDB_Descriptor_Subset(kpts[i], desc.ptr(i)); - //} } break; } - - //t2 = cv::getTickCount(); - //timing_.descriptor = 1000.0*(t2 - t1) / cv::getTickFrequency(); } /* ************************************************************************* */ @@ -2047,22 +1949,6 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset( } } - - -/* ************************************************************************* */ -/** - * @brief This method displays the computation times - */ -//void AKAZEFeatures::Show_Computation_Times() const { -// cout << "(*) Time Scale Space: " << timing_.scale << endl; -// cout << "(*) Time Detector: " << timing_.detector << endl; -// cout << " - Time Derivatives: " << timing_.derivatives << endl; -// cout << " - Time Extrema: " << timing_.extrema << endl; -// cout << " - Time Subpixel: " << timing_.subpixel << endl; -// cout << "(*) Time Descriptor: " << timing_.descriptor << endl; -// cout << endl; -//} - /* ************************************************************************* */ /** * @brief This function computes a (quasi-random) list of bits to be taken diff --git a/modules/features2d/src/akaze/AKAZEFeatures.h b/modules/features2d/src/akaze/AKAZEFeatures.h index 4bebc16730..302ef0d06d 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.h +++ b/modules/features2d/src/akaze/AKAZEFeatures.h @@ -51,30 +51,6 @@ public: void Compute_Descriptors(std::vector& kpts, cv::Mat& desc); static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_); - - // SURF Pattern Descriptor - //void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const; - //void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - - // M-SURF Pattern Descriptor - //void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - //void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - - // M-LDB Pattern Descriptor - //void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; - //void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const; - //void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); - //void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc); - - // Methods for saving some results and showing computation times - //void Save_Scale_Space(); - //void Save_Detector_Responses(); - //void Show_Computation_Times() const; - - /// Return the computation times - //AKAZETiming Get_Computation_Times() const { - // return timing_; - //} }; /* ************************************************************************* */ diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 8d1b726636..15c003e41e 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -135,18 +135,9 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { * @param img Input image for which the nonlinear scale space needs to be created * @return 0 if the nonlinear scale space was created successfully. -1 otherwise */ -int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { - - //double t2 = 0.0, t1 = 0.0; - +int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) +{ CV_Assert(evolution_.size() > 0); - //if (evolution_.size() == 0) { - // cout << "Error generating the nonlinear scale space!!" << endl; - // cout << "Firstly you need to call KAZE::Allocate_Memory_Evolution()" << endl; - // return -1; - //} - - //t1 = getTickCount(); // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); @@ -156,14 +147,6 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { // Firstly compute the kcontrast factor Compute_KContrast(evolution_[0].Lt, KCONTRAST_PERCENTILE); - //t2 = getTickCount(); - //tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency(); - - //if (verbosity_ == true) { - // cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime << - // " Sigma: " << evolution_[0].esigma << endl; - //} - // Now generate the rest of evolution levels for (size_t i = 1; i < evolution_.size(); i++) { @@ -196,16 +179,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { AOS_Step_Scalar(evolution_[i].Lt, evolution_[i - 1].Lt, evolution_[i].Lflow, evolution_[i].etime - evolution_[i - 1].etime); } - - //if (verbosity_ == true) { - // cout << "Computed image evolution step " << i << " Evolution time: " << evolution_[i].etime << - // " Sigma: " << evolution_[i].esigma << endl; - //} } - //t2 = getTickCount(); - //tnlscale_ = 1000.0*(t2 - t1) / getTickFrequency(); - return 0; } @@ -217,20 +192,9 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) { * @param img Input image * @param kpercentile Percentile of the gradient histogram */ -void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { - - //if (verbosity_ == true) { - // cout << "Computing Kcontrast factor." << endl; - //} - - //if (COMPUTE_KCONTRAST) { - kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0); - //} - - //if (verbosity_ == true) { - // cout << "kcontrast = " << kcontrast_ << endl; - // cout << endl << "Now computing the nonlinear scale space!!" << endl; - //} +void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) +{ + kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0); } //************************************************************************************* @@ -241,19 +205,9 @@ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentil */ void KAZEFeatures::Compute_Multiscale_Derivatives(void) { - //double t2 = 0.0, t1 = 0.0; - //t1 = getTickCount(); - -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < evolution_.size(); i++) { - - //if (verbosity_ == true) { - // cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime - // << " Step (pixels): " << evolution_[i].sigma_size << endl; - //} - + // TODO: use cv::parallel_for_ + for (size_t i = 0; i < evolution_.size(); i++) + { // Compute multiscale derivatives for the detector compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, evolution_[i].sigma_size); compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, evolution_[i].sigma_size); @@ -267,9 +221,6 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void) evolution_[i].Lxy = evolution_[i].Lxy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size)); } - - //t2 = getTickCount(); - //tmderivatives_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -279,25 +230,19 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void) * @brief This method computes the feature detector response for the nonlinear scale space * @note We use the Hessian determinant as feature detector */ -void KAZEFeatures::Compute_Detector_Response(void) { - - //double t2 = 0.0, t1 = 0.0; +void KAZEFeatures::Compute_Detector_Response(void) +{ float lxx = 0.0, lxy = 0.0, lyy = 0.0; - //t1 = getTickCount(); - // Firstly compute the multiscale derivatives Compute_Multiscale_Derivatives(); - for (size_t i = 0; i < evolution_.size(); i++) { - - // Determinant of the Hessian - //if (verbosity_ == true) { - // cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl; - //} - - for (int ix = 0; ix < img_height_; ix++) { - for (int jx = 0; jx < img_width_; jx++) { + for (size_t i = 0; i < evolution_.size(); i++) + { + for (int ix = 0; ix < img_height_; ix++) + { + for (int jx = 0; jx < img_width_; jx++) + { lxx = *(evolution_[i].Lxx.ptr(ix)+jx); lxy = *(evolution_[i].Lxy.ptr(ix)+jx); lyy = *(evolution_[i].Lyy.ptr(ix)+jx); @@ -305,9 +250,6 @@ void KAZEFeatures::Compute_Detector_Response(void) { } } } - - //t2 = getTickCount(); - //tdresponse_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -317,11 +259,8 @@ void KAZEFeatures::Compute_Detector_Response(void) { * @brief This method selects interesting keypoints through the nonlinear scale space * @param kpts Vector of keypoints */ -void KAZEFeatures::Feature_Detection(std::vector& kpts) { - - //double t2 = 0.0, t1 = 0.0; - //t1 = getTickCount(); - +void KAZEFeatures::Feature_Detection(std::vector& kpts) +{ kpts.clear(); // Firstly compute the detector response for each pixel and scale level @@ -332,9 +271,6 @@ void KAZEFeatures::Feature_Detection(std::vector& kpts) { // Perform some subpixel refinement Do_Subpixel_Refinement(kpts); - - //t2 = getTickCount(); - //tdetector_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -346,8 +282,8 @@ void KAZEFeatures::Feature_Detection(std::vector& kpts) { * @param kpts Vector of keypoints * @note We compute features for each of the nonlinear scale space level in a different processing thread */ -void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) { - +void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) +{ int level = 0; float dist = 0.0, smax = 3.0; int npoints = 0, id_repeated = 0; @@ -367,9 +303,7 @@ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector& kpts) kpts_par_.push_back(aux); } -#ifdef _OPENMP -#pragma omp parallel for -#endif + // TODO: Use cv::parallel_for_ for (int i = 1; i < (int)evolution_.size() - 1; i++) { Find_Extremum_Threading(i); } @@ -499,9 +433,7 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { Mat A = Mat::zeros(3, 3, CV_32F); Mat b = Mat::zeros(3, 1, CV_32F); Mat dst = Mat::zeros(3, 1, CV_32F); - //double t2 = 0.0, t1 = 0.0; - //t1 = cv::getTickCount(); vector kpts_(kpts); for (size_t i = 0; i < kpts_.size(); i++) { @@ -583,9 +515,6 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { kpts.push_back(kpts_[i]); } } - - //t2 = getTickCount(); - //tsubpixel_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* @@ -596,11 +525,8 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { * @param kpts Vector of keypoints * @param desc Matrix with the feature descriptors */ -void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) { - - //double t2 = 0.0, t1 = 0.0; - //t1 = getTickCount(); - +void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) +{ // Allocate memory for the matrix of descriptors if (use_extended_ == true) { desc = Mat::zeros((int)kpts.size(), 128, CV_32FC1); @@ -730,9 +656,6 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat } } } - - //t2 = getTickCount(); - //tdescriptor_ = 1000.0*(t2 - t1) / getTickFrequency(); } //************************************************************************************* From 1a5fcd715d6649dfe8cc8bd065c106a572414331 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 9 May 2014 18:46:00 +0300 Subject: [PATCH 43/52] Refactor of KAZE and AKAZE: 1) Clean-up from the unused code 2) Remove of SURF extraction method 3) Enabled threading for KAZE extraction 4) Exposed new properties for runtime configuration --- .../features2d/include/opencv2/features2d.hpp | 25 +- modules/features2d/src/akaze.cpp | 68 +- modules/features2d/src/akaze/AKAZEConfig.h | 33 +- .../features2d/src/akaze/AKAZEFeatures.cpp | 285 +----- modules/features2d/src/features2d_init.cpp | 2 + modules/features2d/src/kaze.cpp | 19 +- modules/features2d/src/kaze/KAZEConfig.h | 141 ++- modules/features2d/src/kaze/KAZEFeatures.cpp | 928 +++++------------- modules/features2d/src/kaze/KAZEFeatures.h | 111 +-- modules/features2d/test/test_keypoints.cpp | 14 +- 10 files changed, 481 insertions(+), 1145 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index b6f9e44490..f3ff13aaad 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -893,7 +893,15 @@ KAZE implementation class CV_EXPORTS_W KAZE : public Feature2D { public: - CV_WRAP explicit KAZE(bool _extended = false); + + /// AKAZE Descriptor Type + enum DESCRIPTOR_TYPE { + DESCRIPTOR_MSURF = 1, + DESCRIPTOR_GSURF = 2 + }; + + CV_WRAP KAZE(); + CV_WRAP explicit KAZE(DESCRIPTOR_TYPE type, bool _extended, bool _upright); virtual ~KAZE(); @@ -917,7 +925,9 @@ protected: void detectImpl(InputArray image, std::vector& keypoints, InputArray mask) const; void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; + CV_PROP int descriptor; CV_PROP bool extended; + CV_PROP bool upright; }; /*! @@ -926,7 +936,16 @@ AKAZE implementation class CV_EXPORTS_W AKAZE : public Feature2D { public: - CV_WRAP explicit AKAZE(int _descriptor = 5, int _descriptor_size = 0, int _descriptor_channels = 3); + /// AKAZE Descriptor Type + enum DESCRIPTOR_TYPE { + DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_KAZE = 3, + DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_MLDB = 5 + }; + + CV_WRAP AKAZE(); + CV_WRAP explicit AKAZE(DESCRIPTOR_TYPE _descriptor, int _descriptor_size = 0, int _descriptor_channels = 3); virtual ~AKAZE(); @@ -951,8 +970,8 @@ protected: void computeImpl(InputArray image, std::vector& keypoints, OutputArray descriptors) const; void detectImpl(InputArray image, std::vector& keypoints, InputArray mask = noArray()) const; - CV_PROP int descriptor_channels; CV_PROP int descriptor; + CV_PROP int descriptor_channels; CV_PROP int descriptor_size; }; diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index c5e2134dff..0c0df7c1d9 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -53,10 +53,16 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pd namespace cv { + AKAZE::AKAZE() + : descriptor(DESCRIPTOR_MLDB) + , descriptor_channels(3) + , descriptor_size(0) + { + } - AKAZE::AKAZE(int _descriptor, int _descriptor_size, int _descriptor_channels) - : descriptor_channels(_descriptor_channels) - , descriptor(_descriptor) + AKAZE::AKAZE(DESCRIPTOR_TYPE _descriptor, int _descriptor_size, int _descriptor_channels) + : descriptor(_descriptor) + , descriptor_channels(_descriptor_channels) , descriptor_size(_descriptor_size) { @@ -70,12 +76,14 @@ namespace cv // returns the descriptor size in bytes int AKAZE::descriptorSize() const { - if (descriptor < MLDB_UPRIGHT) + switch (descriptor) { + case cv::AKAZE::DESCRIPTOR_KAZE: + case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: return 64; - } - else - { + + case cv::AKAZE::DESCRIPTOR_MLDB: + case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // We use the full length binary descriptor -> 486 bits if (descriptor_size == 0) { @@ -87,32 +95,45 @@ namespace cv // We use the random bit selection length binary descriptor return (int)ceil(descriptor_size / 8.); } + + default: + return -1; } } // returns the descriptor type int AKAZE::descriptorType() const { - if (descriptor < MLDB_UPRIGHT) + switch (descriptor) { - return CV_32F; - } - else - { - return CV_8U; + case cv::AKAZE::DESCRIPTOR_KAZE: + case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: + return CV_32F; + + case cv::AKAZE::DESCRIPTOR_MLDB: + case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: + return CV_8U; + + default: + return -1; } } // returns the default norm type int AKAZE::defaultNorm() const { - if (descriptor < MLDB_UPRIGHT) + switch (descriptor) { - return NORM_L2; - } - else - { - return NORM_HAMMING; + case cv::AKAZE::DESCRIPTOR_KAZE: + case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: + return cv::NORM_L2; + + case cv::AKAZE::DESCRIPTOR_MLDB: + case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: + return cv::NORM_HAMMING; + + default: + return -1; } } @@ -132,6 +153,9 @@ namespace cv cv::Mat& desc = descriptors.getMatRef(); AKAZEOptions options; + options.descriptor = static_cast(descriptor); + options.descriptor_channels = descriptor_channels; + options.descriptor_size = descriptor_size; options.img_width = img.cols; options.img_height = img.rows; @@ -164,6 +188,9 @@ namespace cv img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); AKAZEOptions options; + options.descriptor = static_cast(descriptor); + options.descriptor_channels = descriptor_channels; + options.descriptor_size = descriptor_size; options.img_width = img.cols; options.img_height = img.rows; @@ -189,6 +216,9 @@ namespace cv cv::Mat& desc = descriptors.getMatRef(); AKAZEOptions options; + options.descriptor = static_cast(descriptor); + options.descriptor_channels = descriptor_channels; + options.descriptor_size = descriptor_size; options.img_width = img.cols; options.img_height = img.rows; diff --git a/modules/features2d/src/akaze/AKAZEConfig.h b/modules/features2d/src/akaze/AKAZEConfig.h index acf165bf98..1c1203f574 100644 --- a/modules/features2d/src/akaze/AKAZEConfig.h +++ b/modules/features2d/src/akaze/AKAZEConfig.h @@ -10,6 +10,7 @@ /* ************************************************************************* */ // OpenCV #include "precomp.hpp" +#include /* ************************************************************************* */ /// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right @@ -23,30 +24,18 @@ const float gauss25[7][7] = { { 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f } }; -/* ************************************************************************* */ -/// AKAZE Descriptor Type -enum DESCRIPTOR_TYPE { - SURF_UPRIGHT = 0, ///< Upright descriptors, not invariant to rotation - SURF = 1, - MSURF_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation - MSURF = 3, - MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation - MLDB = 5 -}; - -/* ************************************************************************* */ -/// AKAZE Diffusivities -enum DIFFUSIVITY_TYPE { - PM_G1 = 0, - PM_G2 = 1, - WEICKERT = 2, - CHARBONNIER = 3 -}; - /* ************************************************************************* */ /// AKAZE configuration options structure struct AKAZEOptions { + /// AKAZE Diffusivities + enum DIFFUSIVITY_TYPE { + PM_G1 = 0, + PM_G2 = 1, + WEICKERT = 2, + CHARBONNIER = 3 + }; + AKAZEOptions() : omax(4) , nsublevels(4) @@ -60,7 +49,7 @@ struct AKAZEOptions { , dthreshold(0.001f) , min_dthreshold(0.00001f) - , descriptor(MLDB) + , descriptor(cv::AKAZE::DESCRIPTOR_MLDB) , descriptor_size(0) , descriptor_channels(3) , descriptor_pattern_size(10) @@ -83,7 +72,7 @@ struct AKAZEOptions { float dthreshold; ///< Detector response threshold to accept point float min_dthreshold; ///< Minimum detector threshold to accept a point - DESCRIPTOR_TYPE descriptor; ///< Type of descriptor + cv::AKAZE::DESCRIPTOR_TYPE descriptor; ///< Type of descriptor int descriptor_size; ///< Size of the descriptor in bits. 0->Full size int descriptor_channels; ///< Number of channels in the descriptor (1, 2, 3) int descriptor_pattern_size; ///< Actual patch size is 2*pattern_size*point.scale diff --git a/modules/features2d/src/akaze/AKAZEFeatures.cpp b/modules/features2d/src/akaze/AKAZEFeatures.cpp index b1a4ba57d2..e5955b21c2 100644 --- a/modules/features2d/src/akaze/AKAZEFeatures.cpp +++ b/modules/features2d/src/akaze/AKAZEFeatures.cpp @@ -25,7 +25,8 @@ AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) { ncycles_ = 0; reordering_ = true; - if (options_.descriptor_size > 0 && options_.descriptor >= MLDB_UPRIGHT) { + if (options_.descriptor_size > 0 && options_.descriptor >= cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT) + { generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size, options_.descriptor_pattern_size, options_.descriptor_channels); } @@ -124,16 +125,16 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) // Compute the conductivity equation switch (options_.diffusivity) { - case PM_G1: + case AKAZEOptions::PM_G1: pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; - case PM_G2: + case AKAZEOptions::PM_G2: pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; - case WEICKERT: + case AKAZEOptions::WEICKERT: weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; - case CHARBONNIER: + case AKAZEOptions::CHARBONNIER: charbonnier_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options_.kcontrast); break; default: @@ -170,8 +171,8 @@ class MultiscaleDerivativesInvoker : public cv::ParallelLoopBody { public: explicit MultiscaleDerivativesInvoker(std::vector& ev, const AKAZEOptions& opt) - : evolution_(&ev) - , options_(opt) + : evolution_(&ev) + , options_(opt) { } @@ -210,7 +211,7 @@ private: void AKAZEFeatures::Compute_Multiscale_Derivatives(void) { cv::parallel_for_(cv::Range(0, (int)evolution_.size()), - MultiscaleDerivativesInvoker(evolution_, options_)); + MultiscaleDerivativesInvoker(evolution_, options_)); } /* ************************************************************************* */ @@ -255,11 +256,10 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector& kpts) vector kpts_aux; // Set maximum size - if (options_.descriptor == SURF_UPRIGHT || options_.descriptor == SURF || - options_.descriptor == MLDB_UPRIGHT || options_.descriptor == MLDB) { + if (options_.descriptor == cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == cv::AKAZE::DESCRIPTOR_MLDB) { smax = 10.0f*sqrtf(2.0f); } - else if (options_.descriptor == MSURF_UPRIGHT || options_.descriptor == MSURF) { + else if (options_.descriptor == cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == cv::AKAZE::DESCRIPTOR_KAZE) { smax = 12.0f*sqrtf(2.0f); } @@ -574,15 +574,15 @@ class Upright_MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody { public: Upright_MLDB_Descriptor_Subset_Invoker(std::vector& kpts, - cv::Mat& desc, - std::vector& evolution, - AKAZEOptions& options, - cv::Mat descriptorSamples, - cv::Mat descriptorBits) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - , options_(&options) + cv::Mat& desc, + std::vector& evolution, + AKAZEOptions& options, + cv::Mat descriptorSamples, + cv::Mat descriptorBits) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) , descriptorSamples_(descriptorSamples) , descriptorBits_(descriptorBits) { @@ -641,15 +641,15 @@ class MLDB_Descriptor_Subset_Invoker : public cv::ParallelLoopBody { public: MLDB_Descriptor_Subset_Invoker(std::vector& kpts, - cv::Mat& desc, - std::vector& evolution, - AKAZEOptions& options, - cv::Mat descriptorSamples, - cv::Mat descriptorBits) - : keypoints_(&kpts) - , descriptors_(&desc) - , evolution_(&evolution) - , options_(&options) + cv::Mat& desc, + std::vector& evolution, + AKAZEOptions& options, + cv::Mat descriptorSamples, + cv::Mat descriptorBits) + : keypoints_(&kpts) + , descriptors_(&desc) + , evolution_(&evolution) + , options_(&options) , descriptorSamples_(descriptorSamples) , descriptorBits_(descriptorBits) { @@ -684,7 +684,7 @@ private: void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat& desc) { // Allocate memory for the matrix with the descriptors - if (options_.descriptor < MLDB_UPRIGHT) { + if (options_.descriptor < cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT) { desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1); } else { @@ -699,29 +699,19 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat } } - switch (options_.descriptor) { - - case SURF_UPRIGHT: // Upright descriptors, not invariant to rotation + switch (options_.descriptor) { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_)); - } - break; - case SURF: - { - cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_)); - } - break; - case MSURF_UPRIGHT: // Upright descriptors, not invariant to rotation + case cv::AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; - case MSURF: + case cv::AKAZE::DESCRIPTOR_KAZE: { cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_)); } break; - case MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation + case cv::AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation { if (options_.descriptor_size == 0) cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); @@ -729,7 +719,7 @@ void AKAZEFeatures::Compute_Descriptors(std::vector& kpts, cv::Mat cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_)); } break; - case MLDB: + case cv::AKAZE::DESCRIPTOR_MLDB: { if (options_.descriptor_size == 0) cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_)); @@ -783,7 +773,7 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto // Loop slides pi/3 window around feature point for (ang1 = 0; ang1 < (float)(2.0 * CV_PI); ang1 += 0.15f) { - ang2 = (ang1 + (float)(CV_PI / 3.0) > (float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0)); + ang2 = (ang1 + (float)(CV_PI / 3.0) >(float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0)); sumX = sumY = 0.f; for (size_t k = 0; k < Ang.size(); ++k) { @@ -812,195 +802,6 @@ void AKAZEFeatures::Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vecto } } -/* ************************************************************************* */ -/** - * @brief This method computes the upright descriptor of the provided keypoint - * @param kpt Input keypoint - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional - * Gaussian weighting is performed. The descriptor is inspired from Bay et al., - * Speeded Up Robust Features, ECCV, 2006 - */ -void SURF_Descriptor_Upright_64_Invoker::Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float *desc) const { - - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int scale = 0, dsize = 0, level = 0; - - const std::vector& evolution = *evolution_; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - ratio = (float)(1 << kpt.octave); - scale = fRound(0.5f*kpt.size / ratio); - level = kpt.class_id; - yf = kpt.pt.y / ratio; - xf = kpt.pt.x / ratio; - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dx = dy = mdx = mdy = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + l*scale; - sample_x = xf + k*scale; - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution[level].Lx.ptr(y1)+x1); - res2 = *(evolution[level].Lx.ptr(y1)+x2); - res3 = *(evolution[level].Lx.ptr(y2)+x1); - res4 = *(evolution[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution[level].Ly.ptr(y1)+x1); - res2 = *(evolution[level].Ly.ptr(y1)+x2); - res3 = *(evolution[level].Ly.ptr(y2)+x1); - res4 = *(evolution[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Sum the derivatives to the cumulative descriptor - dx += rx; - dy += ry; - mdx += fabs(rx); - mdy += fabs(ry); - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } -} - -/* ************************************************************************* */ -/** - * @brief This method computes the descriptor of the provided keypoint given the - * main orientation - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional - * Gaussian weighting is performed. The descriptor is inspired from Bay et al., - * Speeded Up Robust Features, ECCV, 2006 - */ -void SURF_Descriptor_64_Invoker::Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const { - - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int scale = 0, dsize = 0, level = 0; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; - - const std::vector& evolution = *evolution_; - - // Get the information from the keypoint - ratio = (float)(1 << kpt.octave); - scale = fRound(0.5f*kpt.size / ratio); - angle = kpt.angle; - level = kpt.class_id; - yf = kpt.pt.y / ratio; - xf = kpt.pt.x / ratio; - co = cos(angle); - si = sin(angle); - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dx = dy = mdx = mdy = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution[level].Lx.ptr(y1)+x1); - res2 = *(evolution[level].Lx.ptr(y1)+x2); - res3 = *(evolution[level].Lx.ptr(y2)+x1); - res4 = *(evolution[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution[level].Ly.ptr(y1)+x1); - res2 = *(evolution[level].Ly.ptr(y1)+x2); - res3 = *(evolution[level].Ly.ptr(y2)+x1); - res4 = *(evolution[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - - // Sum the derivatives to the cumulative descriptor - dx += rrx; - dy += rry; - mdx += fabs(rrx); - mdy += fabs(rry); - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } -} - /* ************************************************************************* */ /** * @brief This method computes the upright descriptor (not rotation invariant) of @@ -1271,8 +1072,8 @@ void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(cons const std::vector& evolution = *evolution_; // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); + cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); // Get the information from the keypoint @@ -1484,12 +1285,12 @@ void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const cv::KeyPoint& int level = 0, nsamples = 0, scale = 0; int dcount1 = 0, dcount2 = 0; - const AKAZEOptions & options = *options_; + const AKAZEOptions & options = *options_; const std::vector& evolution = *evolution_; // Matrices for the M-LDB descriptor - cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); - cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); + cv::Mat values_1 = cv::Mat::zeros(4, options.descriptor_channels, CV_32FC1); + cv::Mat values_2 = cv::Mat::zeros(9, options.descriptor_channels, CV_32FC1); cv::Mat values_3 = cv::Mat::zeros(16, options.descriptor_channels, CV_32FC1); // Get the information from the keypoint @@ -2077,11 +1878,11 @@ inline float get_angle(float x, float y) { } if (x < 0 && y >= 0) { - return static_cast(CV_PI) - atanf(-y / x); + return static_cast(CV_PI)-atanf(-y / x); } if (x < 0 && y < 0) { - return static_cast(CV_PI) + atanf(y / x); + return static_cast(CV_PI)+atanf(y / x); } if (x >= 0 && y < 0) { diff --git a/modules/features2d/src/features2d_init.cpp b/modules/features2d/src/features2d_init.cpp index e3a3b3c363..c0365274de 100644 --- a/modules/features2d/src/features2d_init.cpp +++ b/modules/features2d/src/features2d_init.cpp @@ -126,6 +126,8 @@ CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT", /////////////////////////////////////////////////////////////////////////////////////////////////////////// CV_INIT_ALGORITHM(KAZE, "Feature2D.KAZE", + obj.info()->addParam(obj, "descriptor", obj.descriptor); + obj.info()->addParam(obj, "upright", obj.upright); obj.info()->addParam(obj, "extended", obj.extended)) /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 85835d8a18..dbb09a75e1 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -52,11 +52,20 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd namespace cv { - KAZE::KAZE(bool _extended /* = false */) - : extended(_extended) + KAZE::KAZE() + : descriptor(DESCRIPTOR_MSURF) + , extended(false) + , upright(false) { } + KAZE::KAZE(DESCRIPTOR_TYPE type, bool _extended, bool _upright) + : descriptor(type) + , extended(_extended) + , upright(_upright) + { + + } KAZE::~KAZE() { @@ -102,7 +111,9 @@ namespace cv KAZEOptions options; options.img_width = img.cols; options.img_height = img.rows; + options.descriptor = static_cast(descriptor); options.extended = extended; + options.upright = upright; KAZEFeatures impl(options); impl.Create_Nonlinear_Scale_Space(img1_32); @@ -135,7 +146,9 @@ namespace cv KAZEOptions options; options.img_width = img.cols; options.img_height = img.rows; + options.descriptor = static_cast(descriptor); options.extended = extended; + options.upright = upright; KAZEFeatures impl(options); impl.Create_Nonlinear_Scale_Space(img1_32); @@ -161,7 +174,9 @@ namespace cv KAZEOptions options; options.img_width = img.cols; options.img_height = img.rows; + options.descriptor = static_cast(descriptor); options.extended = extended; + options.upright = upright; KAZEFeatures impl(options); impl.Create_Nonlinear_Scale_Space(img1_32); diff --git a/modules/features2d/src/kaze/KAZEConfig.h b/modules/features2d/src/kaze/KAZEConfig.h index 94c3aaa4d3..b0e397d538 100644 --- a/modules/features2d/src/kaze/KAZEConfig.h +++ b/modules/features2d/src/kaze/KAZEConfig.h @@ -5,92 +5,81 @@ * @author Pablo F. Alcantarilla */ -#ifndef __OPENCV_FEATURES_2D_KAZE_CONFIG_HPP__ -#define __OPENCV_FEATURES_2D_KAZE_CONFIG_HPP__ - -//****************************************************************************** -//****************************************************************************** +#pragma once // OpenCV Includes #include "precomp.hpp" +#include -//************************************************************************************* -//************************************************************************************* - -// Some defines -#define NMAX_CHAR 400 - -// Some default options -static const float DEFAULT_SCALE_OFFSET = 1.60f; // Base scale offset (sigma units) -static const float DEFAULT_OCTAVE_MAX = 4.0f; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) -static const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level -static const float DEFAULT_DETECTOR_THRESHOLD = 0.001f; // Detector response threshold to accept point -static const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001f; // Minimum Detector response threshold to accept point -static const int DEFAULT_DESCRIPTOR_MODE = 1; // Descriptor Mode 0->SURF, 1->M-SURF -static const bool DEFAULT_USE_FED = true; // 0->AOS, 1->FED -static const bool DEFAULT_UPRIGHT = false; // Upright descriptors, not invariant to rotation -static const bool DEFAULT_EXTENDED = false; // Extended descriptor, dimension 128 - -// Some important configuration variables -static const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0f; -static const float DEFAULT_KCONTRAST = 0.01f; -static const float KCONTRAST_PERCENTILE = 0.7f; -static const int KCONTRAST_NBINS = 300; -static const bool COMPUTE_KCONTRAST = true; -static const int DEFAULT_DIFFUSIVITY_TYPE = 1; // 0 -> PM G1, 1 -> PM G2, 2 -> Weickert -static const bool USE_CLIPPING_NORMALIZATION = false; -static const float CLIPPING_NORMALIZATION_RATIO = 1.6f; -static const int CLIPPING_NORMALIZATION_NITER = 5; - -//************************************************************************************* //************************************************************************************* struct KAZEOptions { - KAZEOptions() { - // Load the default options - soffset = DEFAULT_SCALE_OFFSET; - omax = static_cast(DEFAULT_OCTAVE_MAX); - nsublevels = DEFAULT_NSUBLEVELS; - dthreshold = DEFAULT_DETECTOR_THRESHOLD; - use_fed = DEFAULT_USE_FED; - upright = DEFAULT_UPRIGHT; - extended = DEFAULT_EXTENDED; - descriptor = DEFAULT_DESCRIPTOR_MODE; - diffusivity = DEFAULT_DIFFUSIVITY_TYPE; - sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; - } + enum DIFFUSIVITY_TYPE { + PM_G1 = 0, + PM_G2 = 1, + WEICKERT = 2 + }; - float soffset; - int omax; - int nsublevels; - int img_width; - int img_height; - int diffusivity; - float sderivatives; - float dthreshold; - bool use_fed; - bool upright; - bool extended; - int descriptor; + KAZEOptions() + : descriptor(cv::KAZE::DESCRIPTOR_MSURF) + , diffusivity(PM_G2) + + , soffset(1.60f) + , omax(4) + , nsublevels(4) + , img_width(0) + , img_height(0) + , sderivatives(1.0f) + , dthreshold(0.001f) + , kcontrast(0.01f) + , kcontrast_percentille(0.7f) + , kcontrast_bins(300) + + , use_fed(true) + , upright(false) + , extended(false) + + , use_clipping_normalilzation(false) + , clipping_normalization_ratio(1.6f) + , clipping_normalization_niter(5) + { + } + + cv::KAZE::DESCRIPTOR_TYPE descriptor; + DIFFUSIVITY_TYPE diffusivity; + + float soffset; + int omax; + int nsublevels; + int img_width; + int img_height; + float sderivatives; + float dthreshold; + float kcontrast; + float kcontrast_percentille; + int kcontrast_bins; + + bool use_fed; + bool upright; + bool extended; + + bool use_clipping_normalilzation; + float clipping_normalization_ratio; + int clipping_normalization_niter; }; struct TEvolution { - cv::Mat Lx, Ly; // First order spatial derivatives - cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives - cv::Mat Lflow; // Diffusivity image - cv::Mat Lt; // Evolution image - cv::Mat Lsmooth; // Smoothed image - cv::Mat Lstep; // Evolution step update - cv::Mat Ldet; // Detector response - float etime; // Evolution time - float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 - float octave; // Image octave - float sublevel; // Image sublevel in each octave - int sigma_size; // Integer esigma. For computing the feature detector responses + cv::Mat Lx, Ly; // First order spatial derivatives + cv::Mat Lxx, Lxy, Lyy; // Second order spatial derivatives + cv::Mat Lflow; // Diffusivity image + cv::Mat Lt; // Evolution image + cv::Mat Lsmooth; // Smoothed image + cv::Mat Lstep; // Evolution step update + cv::Mat Ldet; // Detector response + float etime; // Evolution time + float esigma; // Evolution sigma. For linear diffusion t = sigma^2 / 2 + float octave; // Image octave + float sublevel; // Image sublevel in each octave + int sigma_size; // Integer esigma. For computing the feature detector responses }; - -//************************************************************************************* -//************************************************************************************* - -#endif \ No newline at end of file diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 15c003e41e..51e3a930f7 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -36,31 +36,11 @@ using namespace cv::details::kaze; * @param options KAZE configuration options * @note The constructor allocates memory for the nonlinear scale space */ -KAZEFeatures::KAZEFeatures(KAZEOptions& options) { - - soffset_ = options.soffset; - sderivatives_ = options.sderivatives; - omax_ = options.omax; - nsublevels_ = options.nsublevels; - img_width_ = options.img_width; - img_height_ = options.img_height; - dthreshold_ = options.dthreshold; - diffusivity_ = options.diffusivity; - descriptor_mode_ = options.descriptor; - use_fed_ = options.use_fed; - use_upright_ = options.upright; - use_extended_ = options.extended; - use_normalization = USE_CLIPPING_NORMALIZATION; - - kcontrast_ = DEFAULT_KCONTRAST; +KAZEFeatures::KAZEFeatures(KAZEOptions& _options) + : options(_options) +{ ncycles_ = 0; reordering_ = true; - //tkcontrast_ = 0.0; - //tnlscale_ = 0.0; - //tdetector_ = 0.0; - //tmderivatives_ = 0.0; - //tdresponse_ = 0.0; - //tdescriptor_ = 0.0; // Now allocate memory for the evolution Allocate_Memory_Evolution(); @@ -75,21 +55,21 @@ KAZEFeatures::KAZEFeatures(KAZEOptions& options) { void KAZEFeatures::Allocate_Memory_Evolution(void) { // Allocate the dimension of the matrices for the evolution - for (int i = 0; i <= omax_ - 1; i++) { - for (int j = 0; j <= nsublevels_ - 1; j++) { + for (int i = 0; i <= options.omax - 1; i++) { + for (int j = 0; j <= options.nsublevels - 1; j++) { TEvolution aux; - aux.Lx = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Ly = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Lxx = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Lxy = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Lyy = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Lflow = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Lt = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Lsmooth = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Lstep = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.Ldet = cv::Mat::zeros(img_height_, img_width_, CV_32F); - aux.esigma = soffset_*pow((float)2.0f, (float)(j) / (float)(nsublevels_)+i); + aux.Lx = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Ly = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Lxx = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Lxy = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Lyy = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Lflow = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Lt = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Lsmooth = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Lstep = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.Ldet = cv::Mat::zeros(options.img_height, options.img_width, CV_32F); + aux.esigma = options.soffset*pow((float)2.0f, (float)(j) / (float)(options.nsublevels)+i); aux.etime = 0.5f*(aux.esigma*aux.esigma); aux.sigma_size = fRound(aux.esigma); aux.octave = (float)i; @@ -99,7 +79,7 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { } // Allocate memory for the FED number of cycles and time steps - if (use_fed_) { + if (options.use_fed) { for (size_t i = 1; i < evolution_.size(); i++) { int naux = 0; vector tau; @@ -113,16 +93,16 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) { } else { // Allocate memory for the auxiliary variables that are used in the AOS scheme - Ltx_ = Mat::zeros(img_width_, img_height_, CV_32F); - Lty_ = Mat::zeros(img_height_, img_width_, CV_32F); - px_ = Mat::zeros(img_height_, img_width_, CV_32F); - py_ = Mat::zeros(img_height_, img_width_, CV_32F); - ax_ = Mat::zeros(img_height_, img_width_, CV_32F); - ay_ = Mat::zeros(img_height_, img_width_, CV_32F); - bx_ = Mat::zeros(img_height_ - 1, img_width_, CV_32F); - by_ = Mat::zeros(img_height_ - 1, img_width_, CV_32F); - qr_ = Mat::zeros(img_height_ - 1, img_width_, CV_32F); - qc_ = Mat::zeros(img_height_, img_width_ - 1, CV_32F); + Ltx_ = Mat::zeros(options.img_width, options.img_height, CV_32F); // TODO? IS IT A BUG??? + Lty_ = Mat::zeros(options.img_height, options.img_width, CV_32F); + px_ = Mat::zeros(options.img_height, options.img_width, CV_32F); + py_ = Mat::zeros(options.img_height, options.img_width, CV_32F); + ax_ = Mat::zeros(options.img_height, options.img_width, CV_32F); + ay_ = Mat::zeros(options.img_height, options.img_width, CV_32F); + bx_ = Mat::zeros(options.img_height - 1, options.img_width, CV_32F); + by_ = Mat::zeros(options.img_height - 1, options.img_width, CV_32F); + qr_ = Mat::zeros(options.img_height - 1, options.img_width, CV_32F); + qc_ = Mat::zeros(options.img_height, options.img_width - 1, CV_32F); } } @@ -141,35 +121,35 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) // Copy the original image to the first level of the evolution img.copyTo(evolution_[0].Lt); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, soffset_); - gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, sderivatives_); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options.soffset); + gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options.sderivatives); // Firstly compute the kcontrast factor - Compute_KContrast(evolution_[0].Lt, KCONTRAST_PERCENTILE); + Compute_KContrast(evolution_[0].Lt, options.kcontrast_percentille); // Now generate the rest of evolution levels for (size_t i = 1; i < evolution_.size(); i++) { evolution_[i - 1].Lt.copyTo(evolution_[i].Lt); - gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, sderivatives_); + gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options.sderivatives); // Compute the Gaussian derivatives Lx and Ly Scharr(evolution_[i].Lsmooth, evolution_[i].Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT); Scharr(evolution_[i].Lsmooth, evolution_[i].Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT); // Compute the conductivity equation - if (diffusivity_ == 0) { - pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, kcontrast_); + if (options.diffusivity == KAZEOptions::PM_G1) { + pm_g1(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options.kcontrast); } - else if (diffusivity_ == 1) { - pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, kcontrast_); + else if (options.diffusivity == KAZEOptions::PM_G2) { + pm_g2(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options.kcontrast); } - else if (diffusivity_ == 2) { - weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, kcontrast_); + else if (options.diffusivity == KAZEOptions::WEICKERT) { + weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, evolution_[i].Lflow, options.kcontrast); } // Perform FED n inner steps - if (use_fed_) { + if (options.use_fed) { for (int j = 0; j < nsteps_[i - 1]; j++) { nld_step_scalar(evolution_[i].Lt, evolution_[i].Lflow, evolution_[i].Lstep, tsteps_[i - 1][j]); } @@ -194,7 +174,7 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) */ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) { - kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0); + options.kcontrast = compute_k_percentile(img, kpercentile, options.sderivatives, options.kcontrast_bins, 0, 0); } //************************************************************************************* @@ -239,9 +219,9 @@ void KAZEFeatures::Compute_Detector_Response(void) for (size_t i = 0; i < evolution_.size(); i++) { - for (int ix = 0; ix < img_height_; ix++) + for (int ix = 0; ix < options.img_height; ix++) { - for (int jx = 0; jx < img_width_; jx++) + for (int jx = 0; jx < options.img_width; jx++) { lxx = *(evolution_[i].Lxx.ptr(ix)+jx); lxy = *(evolution_[i].Lxy.ptr(ix)+jx); @@ -376,14 +356,14 @@ void KAZEFeatures::Find_Extremum_Threading(const int& level) { float value = 0.0; bool is_extremum = false; - for (int ix = 1; ix < img_height_ - 1; ix++) { - for (int jx = 1; jx < img_width_ - 1; jx++) { + for (int ix = 1; ix < options.img_height - 1; ix++) { + for (int jx = 1; jx < options.img_width - 1; jx++) { is_extremum = false; value = *(evolution_[level].Ldet.ptr(ix)+jx); // Filter the points with the detector threshold - if (value > dthreshold_ && value >= DEFAULT_MIN_DETECTOR_THRESHOLD) { + if (value > options.dthreshold) { if (value >= *(evolution_[level].Ldet.ptr(ix)+jx - 1)) { // First check on the same scale if (check_maximum_neighbourhood(evolution_[level].Ldet, 1, value, ix, jx, 1)) { @@ -495,10 +475,10 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { if (fabs(*(dst.ptr(0))) <= 1.0f && fabs(*(dst.ptr(1))) <= 1.0f && fabs(*(dst.ptr(2))) <= 1.0f) { kpts_[i].pt.x += *(dst.ptr(0)); kpts_[i].pt.y += *(dst.ptr(1)); - dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr(2))) / ((float)(nsublevels_)); + dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr(2))) / ((float)(options.nsublevels)); // In OpenCV the size of a keypoint is the diameter!! - kpts_[i].size = 2.0f*soffset_*pow((float)2.0f, dsc); + kpts_[i].size = 2.0f*options.soffset*pow((float)2.0f, dsc); kpts_[i].angle = 0.0; } // Set the points to be deleted after the for loop @@ -520,6 +500,117 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { //************************************************************************************* //************************************************************************************* +class MSURF_Descriptor_Invoker : public cv::ParallelLoopBody +{ +public: + MSURF_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& _options) + : _kpts(&kpts) + , _desc(&desc) + , _evolution(&evolution) + , options(_options) + { + } + + virtual ~MSURF_Descriptor_Invoker() + { + } + + void operator() (const cv::Range& range) const + { + std::vector &kpts = *_kpts; + cv::Mat &desc = *_desc; + std::vector &evolution = *_evolution; + + for (int i = range.start; i < range.end; i++) + { + kpts[i].angle = 0.0; + if (options.upright) + { + kpts[i].angle = 0.0; + if (options.extended) + Get_MSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); + else + Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); + } + else + { + KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options); + + if (options.extended) + Get_MSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); + else + Get_MSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); + } + } + } +private: + void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; + void Get_MSURF_Descriptor_128(const cv::KeyPoint& kpt, float *desc) const; + + std::vector * _kpts; + cv::Mat * _desc; + std::vector * _evolution; + KAZEOptions options; +}; + +class GSURF_Descriptor_Invoker : public cv::ParallelLoopBody +{ +public: + GSURF_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& _options) + : _kpts(&kpts) + , _desc(&desc) + , _evolution(&evolution) + , options(_options) + { + } + + virtual ~GSURF_Descriptor_Invoker() + { + } + + void operator() (const cv::Range& range) const + { + std::vector &kpts = *_kpts; + cv::Mat &desc = *_desc; + std::vector &evolution = *_evolution; + + for (int i = range.start; i < range.end; i++) + { + kpts[i].angle = 0.0; + if (options.upright) + { + kpts[i].angle = 0.0; + if (options.extended) + Get_GSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); + else + Get_GSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); + } + else + { + KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options); + + if (options.extended) + Get_GSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); + else + Get_GSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); + } + } + } + +private: + void Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_GSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const; + void Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; + void Get_GSURF_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; + + std::vector * _kpts; + cv::Mat * _desc; + std::vector * _evolution; + KAZEOptions options; +}; + /** * @brief This method computes the set of descriptors through the nonlinear scale space * @param kpts Vector of keypoints @@ -528,134 +619,23 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat &desc) { // Allocate memory for the matrix of descriptors - if (use_extended_ == true) { + if (options.extended == true) { desc = Mat::zeros((int)kpts.size(), 128, CV_32FC1); } else { desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1); } - if (use_upright_ == true) { - if (use_extended_ == false) { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_SURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_GSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - } - } - else - { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_SURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_MSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - kpts[i].angle = 0.0; - Get_GSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); - } - } - } - } - else { - if (use_extended_ == false) { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_SURF_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_MSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_GSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - } - } - else { - if (descriptor_mode_ == 0) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_SURF_Descriptor_128(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 1) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_MSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); - } - } - else if (descriptor_mode_ == 2) { -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t i = 0; i < kpts.size(); i++) { - Compute_Main_Orientation_SURF(kpts[i]); - Get_GSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); - } - } - } - } + switch (options.descriptor) + { + case cv::KAZE::DESCRIPTOR_MSURF: + cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_Invoker(kpts, desc, evolution_, options)); + break; + + case cv::KAZE::DESCRIPTOR_GSURF: + cv::parallel_for_(cv::Range(0, (int)kpts.size()), GSURF_Descriptor_Invoker(kpts, desc, evolution_, options)); + break; + }; } //************************************************************************************* @@ -667,7 +647,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat * @note The orientation is computed using a similar approach as described in the * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 */ -void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) +void KAZEFeatures::Compute_Main_Orientation(cv::KeyPoint &kpt, const std::vector& evolution_, const KAZEOptions& options) { int ix = 0, iy = 0, idx = 0, s = 0, level = 0; float xf = 0.0, yf = 0.0, gweight = 0.0; @@ -689,7 +669,7 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) iy = fRound(yf + j*s); ix = fRound(xf + i*s); - if (iy >= 0 && iy < img_height_ && ix >= 0 && ix < img_width_) { + if (iy >= 0 && iy < options.img_height && ix >= 0 && ix < options.img_width) { gweight = gaussian(iy - yf, ix - xf, 2.5f*s); resX[idx] = gweight*(*(evolution_[level].Lx.ptr(iy)+ix)); resY[idx] = gweight*(*(evolution_[level].Ly.ptr(iy)+ix)); @@ -739,212 +719,6 @@ void KAZEFeatures::Compute_Main_Orientation_SURF(cv::KeyPoint &kpt) //************************************************************************************* //************************************************************************************* -/** - * @brief This method computes the upright descriptor (no rotation invariant) - * of the provided keypoint - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional - * Gaussian weighting is performed. The descriptor is inspired from Bay et al., - * Speeded Up Robust Features, ECCV, 2006 - */ -void KAZEFeatures::Get_SURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) -{ - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - level = kpt.class_id; - scale = fRound(kpt.size / 2.0f); - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - dx = dy = mdx = mdy = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - sample_y = k*scale + yf; - sample_x = l*scale + xf; - - y1 = (int)(sample_y - .5f); - x1 = (int)(sample_x - .5f); - - checkDescriptorLimits(x1, y1, img_width_, img_height_); - - y2 = (int)(sample_y + .5f); - x2 = (int)(sample_x + .5f); - - checkDescriptorLimits(x2, y2, img_width_, img_height_); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Sum the derivatives to the cumulative descriptor - dx += rx; - dy += ry; - mdx += fabs(rx); - mdy += fabs(ry); - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); - } -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method computes the descriptor of the provided keypoint given the - * main orientation - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional - * Gaussian weighting is performed. The descriptor is inspired from Bay et al., - * Speeded Up Robust Features, ECCV, 2006 - */ -void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) { - - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0f); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - dx = dy = mdx = mdy = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = (int)(sample_y - .5f); - x1 = (int)(sample_x - .5f); - - checkDescriptorLimits(x1, y1, img_width_, img_height_); - - y2 = (int)(sample_y + .5f); - x2 = (int)(sample_x + .5f); - - checkDescriptorLimits(x2, y2, img_width_, img_height_); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - - // Sum the derivatives to the cumulative descriptor - dx += rrx; - dy += rry; - mdx += fabs(rrx); - mdy += fabs(rry); - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); - } -} - -//************************************************************************************* -//************************************************************************************* - /** * @brief This method computes the upright descriptor (not rotation invariant) of * the provided keypoint @@ -954,7 +728,7 @@ void KAZEFeatures::Get_SURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void MSURF_Descriptor_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -964,6 +738,8 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; int dsize = 0, scale = 0, level = 0; + std::vector& evolution_ = *_evolution; + // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; @@ -1013,12 +789,12 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -1069,8 +845,8 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -1086,7 +862,7 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void MSURF_Descriptor_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1096,6 +872,8 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; + std::vector& evolution_ = *_evolution; + // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; @@ -1149,12 +927,12 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) y1 = fRound(sample_y - 0.5f); x1 = fRound(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -1202,8 +980,8 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -1219,7 +997,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void GSURF_Descriptor_Invoker::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -1229,6 +1007,8 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; + std::vector& evolution_ = *_evolution; + // Set the descriptor size and the sample and pattern sizes dsize = 64; sample_step = 5; @@ -1256,12 +1036,12 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -1337,8 +1117,8 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -1354,7 +1134,7 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, floa * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) +void GSURF_Descriptor_Invoker::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; @@ -1364,6 +1144,8 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; + std::vector& evolution_ = *_evolution; + // Set the descriptor size and the sample and pattern sizes dsize = 64; sample_step = 5; @@ -1394,12 +1176,12 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -1475,8 +1257,8 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -1484,253 +1266,6 @@ void KAZEFeatures::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) //************************************************************************************* //************************************************************************************* -/** - * @brief This method computes the upright extended descriptor (no rotation invariant) - * of the provided keypoint - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional - * Gaussian weighting is performed. The descriptor is inspired from Bay et al., - * Speeded Up Robust Features, ECCV, 2006 - */ -void KAZEFeatures::Get_SURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) -{ - float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0f); - level = kpt.class_id; - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - dxp = dxn = mdxp = mdxn = 0.0; - dyp = dyn = mdyp = mdyn = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - sample_y = k*scale + yf; - sample_x = l*scale + xf; - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - checkDescriptorLimits(x1, y1, img_width_, img_height_); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - checkDescriptorLimits(x2, y2, img_width_, img_height_); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Sum the derivatives to the cumulative descriptor - if (ry >= 0.0) { - dxp += rx; - mdxp += fabs(rx); - } - else { - dxn += rx; - mdxn += fabs(rx); - } - - if (rx >= 0.0) { - dyp += ry; - mdyp += fabs(ry); - } - else { - dyn += ry; - mdyn += fabs(ry); - } - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); - } -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method computes the extended descriptor of the provided keypoint given the - * main orientation - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional - * Gaussian weighting is performed. The descriptor is inspired from Bay et al., - * Speeded Up Robust Features, ECCV, 2006 - */ -void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) -{ - float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0f); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - dxp = dxn = mdxp = mdxn = 0.0; - dyp = dyn = mdyp = mdyn = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - checkDescriptorLimits(x1, y1, img_width_, img_height_); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - checkDescriptorLimits(x2, y2, img_width_, img_height_); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Get the x and y derivatives on the rotated axis - rry = rx*co + ry*si; - rrx = -rx*si + ry*co; - - // Sum the derivatives to the cumulative descriptor - if (rry >= 0.0) { - dxp += rrx; - mdxp += fabs(rrx); - } - else { - dxn += rrx; - mdxn += fabs(rrx); - } - - if (rrx >= 0.0) { - dyp += rry; - mdyp += fabs(rry); - } - else { - dyn += rry; - mdyn += fabs(rry); - } - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); - } -} - -//************************************************************************************* -//************************************************************************************* - /** * @brief This method computes the extended upright descriptor (not rotation invariant) of * the provided keypoint @@ -1740,8 +1275,8 @@ void KAZEFeatures::Get_SURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - +void MSURF_Descriptor_Invoker::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const +{ float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; float sample_x = 0.0, sample_y = 0.0; @@ -1755,6 +1290,8 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; + std::vector& evolution_ = *_evolution; + // Set the descriptor size and the sample and pattern sizes dsize = 128; sample_step = 5; @@ -1804,12 +1341,12 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -1879,8 +1416,8 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -1896,8 +1433,8 @@ void KAZEFeatures::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { - +void MSURF_Descriptor_Invoker::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const +{ float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; @@ -1908,6 +1445,8 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; + std::vector& evolution_ = *_evolution; + // Subregion centers for the 4x4 gaussian weighting float cx = -0.5f, cy = 0.5f; @@ -1964,12 +1503,12 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc y1 = fRound(sample_y - 0.5f); x1 = fRound(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -2040,8 +1579,8 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -2057,7 +1596,7 @@ void KAZEFeatures::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) +void GSURF_Descriptor_Invoker::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const { float len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0; @@ -2067,6 +1606,8 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; + std::vector& evolution_ = *_evolution; + // Set the descriptor size and the sample and pattern sizes dsize = 128; sample_step = 5; @@ -2094,12 +1635,12 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -2193,8 +1734,8 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -2210,7 +1751,8 @@ void KAZEFeatures::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, flo * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 */ -void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) { +void GSURF_Descriptor_Invoker::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const +{ float len = 0.0, xf = 0.0, yf = 0.0; float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0; @@ -2222,6 +1764,8 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; int dsize = 0, scale = 0, level = 0; + std::vector& evolution_ = *_evolution; + // Set the descriptor size and the sample and pattern sizes dsize = 128; sample_step = 5; @@ -2253,12 +1797,12 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc y1 = (int)(sample_y - 0.5f); x1 = (int)(sample_x - 0.5f); - checkDescriptorLimits(x1, y1, img_width_, img_height_); + checkDescriptorLimits(x1, y1, options.img_width, options.img_height); y2 = (int)(sample_y + 0.5f); x2 = (int)(sample_x + 0.5f); - checkDescriptorLimits(x2, y2, img_width_, img_height_); + checkDescriptorLimits(x2, y2, options.img_width, options.img_height); fx = sample_x - x1; fy = sample_y - y1; @@ -2351,8 +1895,8 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc desc[i] /= len; } - if (use_normalization == true) { - clippingDescriptor(desc, dsize, CLIPPING_NORMALIZATION_NITER, CLIPPING_NORMALIZATION_RATIO); + if (options.use_clipping_normalilzation) { + clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); } } @@ -2371,22 +1915,8 @@ void KAZEFeatures::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc */ void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { -#ifdef _OPENMP -#pragma omp sections - { -#pragma omp section - { - AOS_Rows(Ldprev,c,stepsize); - } -#pragma omp section - { - AOS_Columns(Ldprev,c,stepsize); - } - } -#else AOS_Rows(Ldprev, c, stepsize); AOS_Columns(Ldprev, c, stepsize); -#endif Ld = 0.5f*(Lty_ + Ltx_.t()); } diff --git a/modules/features2d/src/kaze/KAZEFeatures.h b/modules/features2d/src/kaze/KAZEFeatures.h index c901561240..81509c47d9 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.h +++ b/modules/features2d/src/kaze/KAZEFeatures.h @@ -26,97 +26,52 @@ class KAZEFeatures { private: - // Parameters of the Nonlinear diffusion class - float soffset_; // Base scale offset - float sderivatives_; // Standard deviation of the Gaussian for the nonlinear diff. derivatives - int omax_; // Maximum octave level - int nsublevels_; // Number of sublevels per octave level - int img_width_; // Width of the original image - int img_height_; // Height of the original image - std::vector evolution_; // Vector of nonlinear diffusion evolution - float kcontrast_; // The contrast parameter for the scalar nonlinear diffusion - float dthreshold_; // Feature detector threshold response - int diffusivity_; // Diffusivity type, 0->PM G1, 1->PM G2, 2-> Weickert - int descriptor_mode_; // Descriptor mode - bool use_fed_; // Set to true in case we want to use FED for the nonlinear diffusion filtering. Set false for using AOS - bool use_upright_; // Set to true in case we want to use the upright version of the descriptors - bool use_extended_; // Set to true in case we want to use the extended version of the descriptors - bool use_normalization; + KAZEOptions options; - // Vector of keypoint vectors for finding extrema in multiple threads - std::vector > kpts_par_; + // Parameters of the Nonlinear diffusion class + std::vector evolution_; // Vector of nonlinear diffusion evolution - // FED parameters - int ncycles_; // Number of cycles - bool reordering_; // Flag for reordering time steps - std::vector > tsteps_; // Vector of FED dynamic time steps - std::vector nsteps_; // Vector of number of steps per cycle + // Vector of keypoint vectors for finding extrema in multiple threads + std::vector > kpts_par_; - // Computation times variables in ms - //double tkcontrast_; // Kcontrast factor computation - //double tnlscale_; // Nonlinear Scale space generation - //double tdetector_; // Feature detector - //double tmderivatives_; // Multiscale derivatives computation - //double tdresponse_; // Detector response computation - //double tdescriptor_; // Feature descriptor - //double tsubpixel_; // Subpixel refinement + // FED parameters + int ncycles_; // Number of cycles + bool reordering_; // Flag for reordering time steps + std::vector > tsteps_; // Vector of FED dynamic time steps + std::vector nsteps_; // Vector of number of steps per cycle - // Some auxiliary variables used in the AOS step - cv::Mat Ltx_, Lty_, px_, py_, ax_, ay_, bx_, by_, qr_, qc_; + // Some auxiliary variables used in the AOS step + cv::Mat Ltx_, Lty_, px_, py_, ax_, ay_, bx_, by_, qr_, qc_; public: - // Constructor - KAZEFeatures(KAZEOptions& options); + // Constructor + KAZEFeatures(KAZEOptions& options); - // Public methods for KAZE interface - void Allocate_Memory_Evolution(void); - int Create_Nonlinear_Scale_Space(const cv::Mat& img); - void Feature_Detection(std::vector& kpts); - void Feature_Description(std::vector& kpts, cv::Mat& desc); + // Public methods for KAZE interface + void Allocate_Memory_Evolution(void); + int Create_Nonlinear_Scale_Space(const cv::Mat& img); + void Feature_Detection(std::vector& kpts); + void Feature_Description(std::vector& kpts, cv::Mat& desc); + + static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector& evolution_, const KAZEOptions& options); private: - // Feature Detection Methods - void Compute_KContrast(const cv::Mat& img, const float& kper); - void Compute_Multiscale_Derivatives(void); - void Compute_Detector_Response(void); - void Determinant_Hessian_Parallel(std::vector& kpts); - void Find_Extremum_Threading(const int& level); - void Do_Subpixel_Refinement(std::vector& kpts); + // Feature Detection Methods + void Compute_KContrast(const cv::Mat& img, const float& kper); + void Compute_Multiscale_Derivatives(void); + void Compute_Detector_Response(void); + void Determinant_Hessian_Parallel(std::vector& kpts); + void Find_Extremum_Threading(const int& level); + void Do_Subpixel_Refinement(std::vector& kpts); - // AOS Methods - void AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); - void AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); - void AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); - void Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x); + // AOS Methods + void AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); + void AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); + void AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize); + void Thomas(const cv::Mat &a, const cv::Mat &b, const cv::Mat &Ld, cv::Mat &x); - // Feature Description methods - void Compute_Main_Orientation_SURF(cv::KeyPoint& kpt); - - // Descriptor Mode -> 0 SURF 64 - void Get_SURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc); - void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc); - - // Descriptor Mode -> 0 SURF 128 - void Get_SURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc); - void Get_SURF_Descriptor_128(const cv::KeyPoint& kpt, float* desc); - - // Descriptor Mode -> 1 M-SURF 64 - void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc); - void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc); - - // Descriptor Mode -> 1 M-SURF 128 - void Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc); - void Get_MSURF_Descriptor_128(const cv::KeyPoint& kpt, float *desc); - - // Descriptor Mode -> 2 G-SURF 64 - void Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc); - void Get_GSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc); - - // Descriptor Mode -> 2 G-SURF 128 - void Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc); - void Get_GSURF_Descriptor_128(const cv::KeyPoint& kpt, float* desc); }; //************************************************************************************* diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index 3cbd3f6937..a14a9dd72c 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -169,12 +169,18 @@ TEST(Features2d_Detector_Keypoints_Dense, validation) TEST(Features2d_Detector_Keypoints_KAZE, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.KAZE")); - test.safe_run(); + CV_FeatureDetectorKeypointsTest test_gsurf(cv::Ptr(new cv::KAZE(cv::KAZE::DESCRIPTOR_GSURF, false, false))); + test_gsurf.safe_run(); + + CV_FeatureDetectorKeypointsTest test_msurf(cv::Ptr(new cv::KAZE(cv::KAZE::DESCRIPTOR_MSURF, false, false))); + test_msurf.safe_run(); } TEST(Features2d_Detector_Keypoints_AKAZE, validation) { - CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.AKAZE")); - test.safe_run(); + CV_FeatureDetectorKeypointsTest test_kaze(cv::Ptr(new cv::AKAZE(cv::AKAZE::DESCRIPTOR_KAZE))); + test_kaze.safe_run(); + + CV_FeatureDetectorKeypointsTest test_mldb(cv::Ptr(new cv::AKAZE(cv::AKAZE::DESCRIPTOR_MLDB))); + test_mldb.safe_run(); } From b42c26816431dc449ea5354eb15d4c82604b3ac6 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 9 May 2014 19:34:54 +0300 Subject: [PATCH 44/52] Temporary remove of CV_WRAP --- modules/features2d/include/opencv2/features2d.hpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index f3ff13aaad..87d7caff92 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -893,15 +893,14 @@ KAZE implementation class CV_EXPORTS_W KAZE : public Feature2D { public: - - /// AKAZE Descriptor Type + /// AKAZE Descriptor Type enum DESCRIPTOR_TYPE { DESCRIPTOR_MSURF = 1, DESCRIPTOR_GSURF = 2 }; CV_WRAP KAZE(); - CV_WRAP explicit KAZE(DESCRIPTOR_TYPE type, bool _extended, bool _upright); + explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool _extended, bool _upright); virtual ~KAZE(); @@ -945,7 +944,7 @@ public: }; CV_WRAP AKAZE(); - CV_WRAP explicit AKAZE(DESCRIPTOR_TYPE _descriptor, int _descriptor_size = 0, int _descriptor_channels = 3); + explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int _descriptor_size = 0, int _descriptor_channels = 3); virtual ~AKAZE(); From c4e49463a93a1d3e25f1823ace55d38d547c3955 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 9 May 2014 19:35:41 +0300 Subject: [PATCH 45/52] Initial commit of the KAZE & AKAZE docs --- .../doc/feature_detection_and_description.rst | 73 +++++++++++++++++++ modules/features2d/src/akaze.cpp | 4 +- modules/features2d/src/kaze.cpp | 4 +- 3 files changed, 77 insertions(+), 4 deletions(-) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index c0f611713e..76245c76be 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -249,3 +249,76 @@ We notice that for keypoint matching applications, image content has little effe :param keypoints: Set of detected keypoints :param corrThresh: Correlation threshold. :param verbose: Prints pair selection informations. + +KAZE +----- +.. ocv:class:: KAZE : public Feature2D + +Class implementing the KAZE keypoint detector and descriptor extractor, described in [ABD12]_. :: + + class CV_EXPORTS_W KAZE : public Feature2D + { + public: + + /// KAZE Descriptor Type + enum DESCRIPTOR_TYPE { + DESCRIPTOR_MSURF = 1, + DESCRIPTOR_GSURF = 2 + }; + + CV_WRAP KAZE(); + explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool _extended, bool _upright); + + .... + }; + +The KAZE constructor + +.. ocv:function:: KAZE::KAZE() + +.. ocv:function:: KAZE::KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright) + + :param descriptor_type: Type of the extracted descriptor. + :param extended: Set to enable extraction of extended (128-byte) descriptor. + :param upright: Set to enable use of upright descriptors (non rotation-invariant). + + +.. [ABD12] KAZE Features. Pablo F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision (ECCV), Fiorenze, Italy, October 2012. + + +AKAZE +----- +.. ocv:class:: AKAZE : public Feature2D + +Class implementing the AKAZE keypoint detector and descriptor extractor, described in [ANB13]_. :: + + class CV_EXPORTS_W AKAZE : public Feature2D + { + public: + /// AKAZE Descriptor Type + enum DESCRIPTOR_TYPE { + DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_KAZE = 3, + DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_MLDB = 5 + }; + + CV_WRAP AKAZE(); + explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int _descriptor_size = 0, int _descriptor_channels = 3); + + ... + }; + +The AKAZE constructor + +.. ocv:function:: AKAZE::AKAZE() + +.. ocv:function:: AKAZE::AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3) + + :param descriptor_type: Type of the extracted descriptor. + :param descriptor_size: Size of the descriptor in bits. 0 -> Full size + :param descriptor_channels: Number of channels in the descriptor (1, 2, 3). + + + +.. [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 0c0df7c1d9..1ffde9ebc2 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -60,8 +60,8 @@ namespace cv { } - AKAZE::AKAZE(DESCRIPTOR_TYPE _descriptor, int _descriptor_size, int _descriptor_channels) - : descriptor(_descriptor) + AKAZE::AKAZE(DESCRIPTOR_TYPE descriptor_type, int _descriptor_size, int _descriptor_channels) + : descriptor(descriptor_type) , descriptor_channels(_descriptor_channels) , descriptor_size(_descriptor_size) { diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index dbb09a75e1..3fc98e5863 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -59,8 +59,8 @@ namespace cv { } - KAZE::KAZE(DESCRIPTOR_TYPE type, bool _extended, bool _upright) - : descriptor(type) + KAZE::KAZE(DESCRIPTOR_TYPE descriptor_type, bool _extended, bool _upright) + : descriptor(descriptor_type) , extended(_extended) , upright(_upright) { From 3a8e15fad9b72531a8ba8511694fe76ac68b9aeb Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 9 May 2014 22:21:26 +0300 Subject: [PATCH 46/52] Fix documentation warnings --- modules/features2d/include/opencv2/features2d.hpp | 4 ++-- modules/features2d/src/akaze.cpp | 4 ++-- modules/features2d/src/kaze.cpp | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 87d7caff92..90a047719a 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -900,7 +900,7 @@ public: }; CV_WRAP KAZE(); - explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool _extended, bool _upright); + explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright); virtual ~KAZE(); @@ -944,7 +944,7 @@ public: }; CV_WRAP AKAZE(); - explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int _descriptor_size = 0, int _descriptor_channels = 3); + explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3); virtual ~AKAZE(); diff --git a/modules/features2d/src/akaze.cpp b/modules/features2d/src/akaze.cpp index 1ffde9ebc2..4b1eb196a2 100644 --- a/modules/features2d/src/akaze.cpp +++ b/modules/features2d/src/akaze.cpp @@ -60,8 +60,8 @@ namespace cv { } - AKAZE::AKAZE(DESCRIPTOR_TYPE descriptor_type, int _descriptor_size, int _descriptor_channels) - : descriptor(descriptor_type) + AKAZE::AKAZE(DESCRIPTOR_TYPE _descriptor_type, int _descriptor_size, int _descriptor_channels) + : descriptor(_descriptor_type) , descriptor_channels(_descriptor_channels) , descriptor_size(_descriptor_size) { diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 3fc98e5863..646ee4bdbb 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -59,8 +59,8 @@ namespace cv { } - KAZE::KAZE(DESCRIPTOR_TYPE descriptor_type, bool _extended, bool _upright) - : descriptor(descriptor_type) + KAZE::KAZE(DESCRIPTOR_TYPE _descriptor_type, bool _extended, bool _upright) + : descriptor(_descriptor_type) , extended(_extended) , upright(_upright) { From 616c348536e869b6c19d7389e563943f9fdca9f9 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 9 May 2014 22:31:20 +0300 Subject: [PATCH 47/52] Fix documentation warnings --- .../doc/feature_detection_and_description.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index 76245c76be..1ea2aae424 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -267,15 +267,15 @@ Class implementing the KAZE keypoint detector and descriptor extractor, describe }; CV_WRAP KAZE(); - explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool _extended, bool _upright); + explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright); .... }; +KAZE::KAZE +-------- The KAZE constructor -.. ocv:function:: KAZE::KAZE() - .. ocv:function:: KAZE::KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright) :param descriptor_type: Type of the extracted descriptor. @@ -304,15 +304,15 @@ Class implementing the AKAZE keypoint detector and descriptor extractor, describ }; CV_WRAP AKAZE(); - explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int _descriptor_size = 0, int _descriptor_channels = 3); + explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3); ... }; +AKAZE::AKAZE +-------- The AKAZE constructor -.. ocv:function:: AKAZE::AKAZE() - .. ocv:function:: AKAZE::AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3) :param descriptor_type: Type of the extracted descriptor. From a068ccbf51a64c0c45cac99706ccb4c8699c86c6 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Fri, 9 May 2014 22:53:28 +0300 Subject: [PATCH 48/52] Fix "Title underline too short" warning --- modules/features2d/doc/feature_detection_and_description.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index 1ea2aae424..c3029a34c2 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -273,7 +273,7 @@ Class implementing the KAZE keypoint detector and descriptor extractor, describe }; KAZE::KAZE --------- +---------- The KAZE constructor .. ocv:function:: KAZE::KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright) @@ -310,7 +310,7 @@ Class implementing the AKAZE keypoint detector and descriptor extractor, describ }; AKAZE::AKAZE --------- +------------ The AKAZE constructor .. ocv:function:: AKAZE::AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3) From 029a8c443a761fde35d1e6c246e7177e3de193f2 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 10 May 2014 20:57:37 +0300 Subject: [PATCH 49/52] Remove GSURF descriptor from KAZE algorithm --- .../doc/feature_detection_and_description.rst | 19 +- .../features2d/include/opencv2/features2d.hpp | 8 +- modules/features2d/src/features2d_init.cpp | 1 - modules/features2d/src/kaze.cpp | 11 +- modules/features2d/src/kaze/KAZEConfig.h | 4 +- modules/features2d/src/kaze/KAZEFeatures.cpp | 693 +----------------- modules/features2d/test/test_keypoints.cpp | 7 +- 7 files changed, 25 insertions(+), 718 deletions(-) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index c3029a34c2..fa18d4e3bc 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -256,29 +256,12 @@ KAZE Class implementing the KAZE keypoint detector and descriptor extractor, described in [ABD12]_. :: - class CV_EXPORTS_W KAZE : public Feature2D - { - public: - - /// KAZE Descriptor Type - enum DESCRIPTOR_TYPE { - DESCRIPTOR_MSURF = 1, - DESCRIPTOR_GSURF = 2 - }; - - CV_WRAP KAZE(); - explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright); - - .... - }; - KAZE::KAZE ---------- The KAZE constructor -.. ocv:function:: KAZE::KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright) +.. ocv:function:: KAZE::KAZE(bool extended, bool upright) - :param descriptor_type: Type of the extracted descriptor. :param extended: Set to enable extraction of extended (128-byte) descriptor. :param upright: Set to enable use of upright descriptors (non rotation-invariant). diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 90a047719a..73bc460447 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -893,14 +893,8 @@ KAZE implementation class CV_EXPORTS_W KAZE : public Feature2D { public: - /// AKAZE Descriptor Type - enum DESCRIPTOR_TYPE { - DESCRIPTOR_MSURF = 1, - DESCRIPTOR_GSURF = 2 - }; - CV_WRAP KAZE(); - explicit KAZE(DESCRIPTOR_TYPE descriptor_type, bool extended, bool upright); + CV_WRAP explicit KAZE(bool extended, bool upright); virtual ~KAZE(); diff --git a/modules/features2d/src/features2d_init.cpp b/modules/features2d/src/features2d_init.cpp index c0365274de..eb7145697b 100644 --- a/modules/features2d/src/features2d_init.cpp +++ b/modules/features2d/src/features2d_init.cpp @@ -126,7 +126,6 @@ CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT", /////////////////////////////////////////////////////////////////////////////////////////////////////////// CV_INIT_ALGORITHM(KAZE, "Feature2D.KAZE", - obj.info()->addParam(obj, "descriptor", obj.descriptor); obj.info()->addParam(obj, "upright", obj.upright); obj.info()->addParam(obj, "extended", obj.extended)) diff --git a/modules/features2d/src/kaze.cpp b/modules/features2d/src/kaze.cpp index 646ee4bdbb..88fb999d5f 100644 --- a/modules/features2d/src/kaze.cpp +++ b/modules/features2d/src/kaze.cpp @@ -53,15 +53,13 @@ http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pd namespace cv { KAZE::KAZE() - : descriptor(DESCRIPTOR_MSURF) - , extended(false) + : extended(false) , upright(false) { } - KAZE::KAZE(DESCRIPTOR_TYPE _descriptor_type, bool _extended, bool _upright) - : descriptor(_descriptor_type) - , extended(_extended) + KAZE::KAZE(bool _extended, bool _upright) + : extended(_extended) , upright(_upright) { @@ -111,7 +109,6 @@ namespace cv KAZEOptions options; options.img_width = img.cols; options.img_height = img.rows; - options.descriptor = static_cast(descriptor); options.extended = extended; options.upright = upright; @@ -146,7 +143,6 @@ namespace cv KAZEOptions options; options.img_width = img.cols; options.img_height = img.rows; - options.descriptor = static_cast(descriptor); options.extended = extended; options.upright = upright; @@ -174,7 +170,6 @@ namespace cv KAZEOptions options; options.img_width = img.cols; options.img_height = img.rows; - options.descriptor = static_cast(descriptor); options.extended = extended; options.upright = upright; diff --git a/modules/features2d/src/kaze/KAZEConfig.h b/modules/features2d/src/kaze/KAZEConfig.h index b0e397d538..988e247372 100644 --- a/modules/features2d/src/kaze/KAZEConfig.h +++ b/modules/features2d/src/kaze/KAZEConfig.h @@ -22,8 +22,7 @@ struct KAZEOptions { }; KAZEOptions() - : descriptor(cv::KAZE::DESCRIPTOR_MSURF) - , diffusivity(PM_G2) + : diffusivity(PM_G2) , soffset(1.60f) , omax(4) @@ -46,7 +45,6 @@ struct KAZEOptions { { } - cv::KAZE::DESCRIPTOR_TYPE descriptor; DIFFUSIVITY_TYPE diffusivity; float soffset; diff --git a/modules/features2d/src/kaze/KAZEFeatures.cpp b/modules/features2d/src/kaze/KAZEFeatures.cpp index 51e3a930f7..634f68da80 100644 --- a/modules/features2d/src/kaze/KAZEFeatures.cpp +++ b/modules/features2d/src/kaze/KAZEFeatures.cpp @@ -500,10 +500,10 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector &kpts) { //************************************************************************************* //************************************************************************************* -class MSURF_Descriptor_Invoker : public cv::ParallelLoopBody +class KAZE_Descriptor_Invoker : public cv::ParallelLoopBody { public: - MSURF_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& _options) + KAZE_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& _options) : _kpts(&kpts) , _desc(&desc) , _evolution(&evolution) @@ -511,7 +511,7 @@ public: { } - virtual ~MSURF_Descriptor_Invoker() + virtual ~KAZE_Descriptor_Invoker() { } @@ -528,82 +528,26 @@ public: { kpts[i].angle = 0.0; if (options.extended) - Get_MSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); + Get_KAZE_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); else - Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); + Get_KAZE_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); } else { KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options); if (options.extended) - Get_MSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); + Get_KAZE_Descriptor_128(kpts[i], desc.ptr((int)i)); else - Get_MSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); + Get_KAZE_Descriptor_64(kpts[i], desc.ptr((int)i)); } } } private: - void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - void Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; - void Get_MSURF_Descriptor_128(const cv::KeyPoint& kpt, float *desc) const; - - std::vector * _kpts; - cv::Mat * _desc; - std::vector * _evolution; - KAZEOptions options; -}; - -class GSURF_Descriptor_Invoker : public cv::ParallelLoopBody -{ -public: - GSURF_Descriptor_Invoker(std::vector &kpts, cv::Mat &desc, std::vector& evolution, const KAZEOptions& _options) - : _kpts(&kpts) - , _desc(&desc) - , _evolution(&evolution) - , options(_options) - { - } - - virtual ~GSURF_Descriptor_Invoker() - { - } - - void operator() (const cv::Range& range) const - { - std::vector &kpts = *_kpts; - cv::Mat &desc = *_desc; - std::vector &evolution = *_evolution; - - for (int i = range.start; i < range.end; i++) - { - kpts[i].angle = 0.0; - if (options.upright) - { - kpts[i].angle = 0.0; - if (options.extended) - Get_GSURF_Upright_Descriptor_128(kpts[i], desc.ptr((int)i)); - else - Get_GSURF_Upright_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - else - { - KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options); - - if (options.extended) - Get_GSURF_Descriptor_128(kpts[i], desc.ptr((int)i)); - else - Get_GSURF_Descriptor_64(kpts[i], desc.ptr((int)i)); - } - } - } - -private: - void Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; - void Get_GSURF_Descriptor_64(const cv::KeyPoint& kpt, float *desc) const; - void Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; - void Get_GSURF_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; + void Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_KAZE_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const; + void Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint& kpt, float* desc) const; + void Get_KAZE_Descriptor_128(const cv::KeyPoint& kpt, float *desc) const; std::vector * _kpts; cv::Mat * _desc; @@ -626,16 +570,7 @@ void KAZEFeatures::Feature_Description(std::vector &kpts, cv::Mat desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1); } - switch (options.descriptor) - { - case cv::KAZE::DESCRIPTOR_MSURF: - cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_Invoker(kpts, desc, evolution_, options)); - break; - - case cv::KAZE::DESCRIPTOR_GSURF: - cv::parallel_for_(cv::Range(0, (int)kpts.size()), GSURF_Descriptor_Invoker(kpts, desc, evolution_, options)); - break; - }; + cv::parallel_for_(cv::Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options)); } //************************************************************************************* @@ -728,7 +663,7 @@ void KAZEFeatures::Compute_Main_Orientation(cv::KeyPoint &kpt, const std::vector * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void MSURF_Descriptor_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -862,7 +797,7 @@ void MSURF_Descriptor_Invoker::Get_MSURF_Upright_Descriptor_64(const cv::KeyPoin * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void MSURF_Descriptor_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const { float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -988,284 +923,6 @@ void MSURF_Descriptor_Invoker::Get_MSURF_Descriptor_64(const cv::KeyPoint &kpt, //************************************************************************************* //************************************************************************************* -/** - * @brief This method computes the upright G-SURF descriptor of the provided keypoint - * given the main orientation - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional - * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and - * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 - */ -void GSURF_Descriptor_Invoker::Get_GSURF_Upright_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const -{ - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float lvv = 0.0, lww = 0.0, modg = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - std::vector& evolution_ = *_evolution; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0f); - level = kpt.class_id; - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - dx = dy = mdx = mdy = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + l*scale; - sample_x = xf + k*scale; - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - modg = pow(rx, 2) + pow(ry, 2); - - if (modg != 0.0) { - - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); - } - else { - lww = 0.0; - lvv = 0.0; - } - - // Sum the derivatives to the cumulative descriptor - dx += lww; - dy += lvv; - mdx += fabs(lww); - mdy += fabs(lvv); - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method computes the G-SURF descriptor of the provided keypoint given the - * main orientation - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional - * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and - * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 - */ -void GSURF_Descriptor_Invoker::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, float *desc) const -{ - float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float lvv = 0.0, lww = 0.0, modg = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - std::vector& evolution_ = *_evolution; - - // Set the descriptor size and the sample and pattern sizes - dsize = 64; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0f); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - dx = dy = mdx = mdy = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - modg = pow(rx, 2) + pow(ry, 2); - - if (modg != 0.0) { - - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); - } - else { - lww = 0.0; - lvv = 0.0; - } - - // Sum the derivatives to the cumulative descriptor - dx += lww; - dy += lvv; - mdx += fabs(lww); - mdy += fabs(lvv); - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dx; - desc[dcount++] = dy; - desc[dcount++] = mdx; - desc[dcount++] = mdy; - - // Store the current length^2 of the vector - len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } - -} - -//************************************************************************************* -//************************************************************************************* - /** * @brief This method computes the extended upright descriptor (not rotation invariant) of * the provided keypoint @@ -1275,7 +932,7 @@ void GSURF_Descriptor_Invoker::Get_GSURF_Descriptor_64(const cv::KeyPoint &kpt, * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void MSURF_Descriptor_Invoker::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const { float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1433,7 +1090,7 @@ void MSURF_Descriptor_Invoker::Get_MSURF_Upright_Descriptor_128(const cv::KeyPoi * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, * ECCV 2008 */ -void MSURF_Descriptor_Invoker::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const +void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const { float gauss_s1 = 0.0, gauss_s2 = 0.0; float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; @@ -1587,322 +1244,6 @@ void MSURF_Descriptor_Invoker::Get_MSURF_Descriptor_128(const cv::KeyPoint &kpt, //************************************************************************************* //************************************************************************************* -/** - * @brief This method computes the G-SURF upright extended descriptor - * (no rotation invariant) of the provided keypoint - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional - * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and - * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 - */ -void GSURF_Descriptor_Invoker::Get_GSURF_Upright_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const -{ - float len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0, lvv = 0.0, lww = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - std::vector& evolution_ = *_evolution; - - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0f); - level = kpt.class_id; - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - dxp = dxn = mdxp = mdxn = 0.0; - dyp = dyn = mdyp = mdyn = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - sample_y = k*scale + yf; - sample_x = l*scale + xf; - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - modg = pow(rx, 2) + pow(ry, 2); - - if (modg != 0.0) { - - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); - } - else { - lww = 0.0; - lvv = 0.0; - } - - // Sum the derivatives to the cumulative descriptor - if (lww >= 0.0) { - dxp += lvv; - mdxp += fabs(lvv); - } - else { - dxn += lvv; - mdxn += fabs(lvv); - } - - if (lvv >= 0.0) { - dyp += lww; - mdyp += fabs(lww); - } - else { - dyn += lww; - mdyn += fabs(lww); - } - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } -} - -//************************************************************************************* -//************************************************************************************* - -/** - * @brief This method computes the extended descriptor of the provided keypoint given the - * main orientation - * @param kpt Input keypoint - * @param desc Descriptor vector - * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional - * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and - * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 - */ -void GSURF_Descriptor_Invoker::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, float *desc) const -{ - - float len = 0.0, xf = 0.0, yf = 0.0; - float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0; - float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; - float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; - float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; - float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; - float lvv = 0.0, lww = 0.0, modg = 0.0; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; - int dsize = 0, scale = 0, level = 0; - - std::vector& evolution_ = *_evolution; - - // Set the descriptor size and the sample and pattern sizes - dsize = 128; - sample_step = 5; - pattern_size = 10; - - // Get the information from the keypoint - yf = kpt.pt.y; - xf = kpt.pt.x; - scale = fRound(kpt.size / 2.0f); - angle = kpt.angle; - level = kpt.class_id; - co = cos(angle); - si = sin(angle); - - // Calculate descriptor for this interest point - for (int i = -pattern_size; i < pattern_size; i += sample_step) { - for (int j = -pattern_size; j < pattern_size; j += sample_step) { - - dxp = dxn = mdxp = mdxn = 0.0; - dyp = dyn = mdyp = mdyn = 0.0; - - for (int k = i; k < i + sample_step; k++) { - for (int l = j; l < j + sample_step; l++) { - - // Get the coordinates of the sample point on the rotated axis - sample_y = yf + (l*scale*co + k*scale*si); - sample_x = xf + (-l*scale*si + k*scale*co); - - y1 = (int)(sample_y - 0.5f); - x1 = (int)(sample_x - 0.5f); - - checkDescriptorLimits(x1, y1, options.img_width, options.img_height); - - y2 = (int)(sample_y + 0.5f); - x2 = (int)(sample_x + 0.5f); - - checkDescriptorLimits(x2, y2, options.img_width, options.img_height); - - fx = sample_x - x1; - fy = sample_y - y1; - - res1 = *(evolution_[level].Lx.ptr(y1)+x1); - res2 = *(evolution_[level].Lx.ptr(y1)+x2); - res3 = *(evolution_[level].Lx.ptr(y2)+x1); - res4 = *(evolution_[level].Lx.ptr(y2)+x2); - rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Ly.ptr(y1)+x1); - res2 = *(evolution_[level].Ly.ptr(y1)+x2); - res3 = *(evolution_[level].Ly.ptr(y2)+x1); - res4 = *(evolution_[level].Ly.ptr(y2)+x2); - ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - modg = pow(rx, 2) + pow(ry, 2); - - if (modg != 0.0) { - res1 = *(evolution_[level].Lxx.ptr(y1)+x1); - res2 = *(evolution_[level].Lxx.ptr(y1)+x2); - res3 = *(evolution_[level].Lxx.ptr(y2)+x1); - res4 = *(evolution_[level].Lxx.ptr(y2)+x2); - rxx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lxy.ptr(y1)+x1); - res2 = *(evolution_[level].Lxy.ptr(y1)+x2); - res3 = *(evolution_[level].Lxy.ptr(y2)+x1); - res4 = *(evolution_[level].Lxy.ptr(y2)+x2); - rxy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - res1 = *(evolution_[level].Lyy.ptr(y1)+x1); - res2 = *(evolution_[level].Lyy.ptr(y1)+x2); - res3 = *(evolution_[level].Lyy.ptr(y2)+x1); - res4 = *(evolution_[level].Lyy.ptr(y2)+x2); - ryy = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4; - - // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) - lww = (pow(rx, 2)*rxx + 2.0f*rx*rxy*ry + pow(ry, 2)*ryy) / (modg); - - // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) - lvv = (-2.0f*rx*rxy*ry + rxx*pow(ry, 2) + pow(rx, 2)*ryy) / (modg); - } - else { - lww = 0.0; - lvv = 0.0; - } - - // Sum the derivatives to the cumulative descriptor - if (lww >= 0.0) { - dxp += lvv; - mdxp += fabs(lvv); - } - else { - dxn += lvv; - mdxn += fabs(lvv); - } - - if (lvv >= 0.0) { - dyp += lww; - mdyp += fabs(lww); - } - else { - dyn += lww; - mdyn += fabs(lww); - } - } - } - - // Add the values to the descriptor vector - desc[dcount++] = dxp; - desc[dcount++] = dxn; - desc[dcount++] = mdxp; - desc[dcount++] = mdxn; - desc[dcount++] = dyp; - desc[dcount++] = dyn; - desc[dcount++] = mdyp; - desc[dcount++] = mdyn; - - // Store the current length^2 of the vector - len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + - dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; - } - } - - // convert to unit vector - len = sqrt(len); - - for (int i = 0; i < dsize; i++) { - desc[i] /= len; - } - - if (options.use_clipping_normalilzation) { - clippingDescriptor(desc, dsize, options.clipping_normalization_niter, options.clipping_normalization_ratio); - } -} - -//************************************************************************************* -//************************************************************************************* - /** * @brief This method performs a scalar non-linear diffusion step using AOS schemes * @param Ld Image at a given evolution step @@ -1911,7 +1252,7 @@ void GSURF_Descriptor_Invoker::Get_GSURF_Descriptor_128(const cv::KeyPoint &kpt, * @param stepsize Stepsize for the nonlinear diffusion evolution * @note If c is constant, the diffusion will be linear * If c is a matrix of the same size as Ld, the diffusion will be nonlinear - * The stepsize can be arbitrarilly large + * The stepsize can be arbitrarily large */ void KAZEFeatures::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float& stepsize) { diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index a14a9dd72c..2a7f24ed49 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -169,11 +169,8 @@ TEST(Features2d_Detector_Keypoints_Dense, validation) TEST(Features2d_Detector_Keypoints_KAZE, validation) { - CV_FeatureDetectorKeypointsTest test_gsurf(cv::Ptr(new cv::KAZE(cv::KAZE::DESCRIPTOR_GSURF, false, false))); - test_gsurf.safe_run(); - - CV_FeatureDetectorKeypointsTest test_msurf(cv::Ptr(new cv::KAZE(cv::KAZE::DESCRIPTOR_MSURF, false, false))); - test_msurf.safe_run(); + CV_FeatureDetectorKeypointsTest test(Algorithm::create("Feature2D.KAZE")); + test.safe_run(); } TEST(Features2d_Detector_Keypoints_AKAZE, validation) From 03db61b33bed021ad227f079a84e79922f49644b Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Sat, 10 May 2014 23:06:23 +0300 Subject: [PATCH 50/52] FixFix documentation warnings --- .../doc/feature_detection_and_description.rst | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index fa18d4e3bc..f321194883 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -251,10 +251,12 @@ We notice that for keypoint matching applications, image content has little effe :param verbose: Prints pair selection informations. KAZE ------ +---- .. ocv:class:: KAZE : public Feature2D -Class implementing the KAZE keypoint detector and descriptor extractor, described in [ABD12]_. :: +Class implementing the KAZE keypoint detector and descriptor extractor, described in [ABD12]_. + +.. [ABD12] KAZE Features. Pablo F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision (ECCV), Fiorenze, Italy, October 2012. KAZE::KAZE ---------- @@ -266,14 +268,15 @@ The KAZE constructor :param upright: Set to enable use of upright descriptors (non rotation-invariant). -.. [ABD12] KAZE Features. Pablo F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision (ECCV), Fiorenze, Italy, October 2012. AKAZE ----- .. ocv:class:: AKAZE : public Feature2D -Class implementing the AKAZE keypoint detector and descriptor extractor, described in [ANB13]_. :: +Class implementing the AKAZE keypoint detector and descriptor extractor, described in [ANB13]_. + +.. [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. class CV_EXPORTS_W AKAZE : public Feature2D { @@ -301,7 +304,3 @@ The AKAZE constructor :param descriptor_type: Type of the extracted descriptor. :param descriptor_size: Size of the descriptor in bits. 0 -> Full size :param descriptor_channels: Number of channels in the descriptor (1, 2, 3). - - - -.. [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. From 87972d0d7c8b663963d13398b1b0e4dd600ef55d Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Tue, 13 May 2014 13:15:24 +0300 Subject: [PATCH 51/52] Fix "WARNING: Block quote ends without a blank line" --- modules/features2d/doc/feature_detection_and_description.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index f321194883..5be31306ee 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -288,11 +288,8 @@ Class implementing the AKAZE keypoint detector and descriptor extractor, describ DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation DESCRIPTOR_MLDB = 5 }; - CV_WRAP AKAZE(); explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3); - - ... }; AKAZE::AKAZE From 12e1d7fc2b2166b4a296410f499d66f1681d87f8 Mon Sep 17 00:00:00 2001 From: Ievgen Khvedchenia Date: Tue, 13 May 2014 21:07:30 +0300 Subject: [PATCH 52/52] Fix "WARNING: Block quote ends without a blank line" --- .../features2d/doc/feature_detection_and_description.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/modules/features2d/doc/feature_detection_and_description.rst b/modules/features2d/doc/feature_detection_and_description.rst index 5be31306ee..409fe54b7a 100644 --- a/modules/features2d/doc/feature_detection_and_description.rst +++ b/modules/features2d/doc/feature_detection_and_description.rst @@ -269,14 +269,11 @@ The KAZE constructor - AKAZE ----- .. ocv:class:: AKAZE : public Feature2D -Class implementing the AKAZE keypoint detector and descriptor extractor, described in [ANB13]_. - -.. [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. +Class implementing the AKAZE keypoint detector and descriptor extractor, described in [ANB13]_. :: class CV_EXPORTS_W AKAZE : public Feature2D { @@ -292,6 +289,8 @@ Class implementing the AKAZE keypoint detector and descriptor extractor, describ explicit AKAZE(DESCRIPTOR_TYPE descriptor_type, int descriptor_size = 0, int descriptor_channels = 3); }; +.. [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. + AKAZE::AKAZE ------------ The AKAZE constructor