Merge pull request #8869 from hrnr:akaze_part1

[GSOC] Speeding-up AKAZE, part #1 (#8869)

* ts: expand arguments before stringifications in CV_ENUM and CV_FLAGS

added protective macros to always force macro expansion of arguments. This allows using CV_ENUM and CV_FLAGS with macro arguments.

* feature2d: unify perf test

use the same test for all detectors/descriptors we have.

* added AKAZE tests

* features2d: extend perf tests

* add BRISK, KAZE, MSER
* run all extract tests on AKAZE keypoints, so that the test si more comparable for the speed of extraction

* feature2d: rework opencl perf tests

use the same configuration as cpu tests

* feature2d: fix descriptors allocation for AKAZE and KAZE

fix crash when descriptors are UMat

* feature2d: name enum to fix build with older gcc

* Revert "ts: expand arguments before stringifications in CV_ENUM and CV_FLAGS"

This reverts commit 19538cac1e.

This wasn't a great idea after all. There is a lot of flags implemented as #define, that we don't want to expand.

* feature2d: fix expansion problems with CV_ENUM in perf

* expand arguments before passing them to CV_ENUM. This does not need modifications of CV_ENUM.
* added include guards to `perf_feature2d.hpp`

* feature2d: fix crash in AKAZE when using KAZE descriptors

* out-of-bound access in Get_MSURF_Descriptor_64
* this happened reliably when running on provided keypoints (not computed by the same instance)

* feature2d: added regression tests for AKAZE

* test with both MLDB and KAZE keypoints

* feature2d: do not compute keypoints orientation twice

* always compute keypoints orientation, when computing keypoints
* do not recompute keypoint orientation when computing descriptors

this allows to test detection and extraction separately

* features2d: fix crash in AKAZE

* out-of-bound reads near the image edge
* same as the bug in KAZE descriptors

* feature2d: refactor invariance testing

* split detectors and descriptors tests
* rewrite to google test to simplify debugging
* add tests for AKAZE and one test for ORB

* stitching: add tests with AKAZE feature finder

* added basic stitching cpu and ocl tests
* fix bug in AKAZE wrapper for stitching pipeline causing lots of
! OPENCV warning: getUMat()/getMat() call chain possible problem.
!                 Base object is dead, while nested/derived object is still alive or processed.
!                 Please check lifetime of UMat/Mat objects!
This commit is contained in:
Jiri Horner 2017-06-21 13:33:09 +02:00 committed by Alexander Alekhin
parent 437ca0b62a
commit 5f20e802d2
21 changed files with 849 additions and 1085 deletions

View File

@ -1,47 +0,0 @@
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#ifdef HAVE_OPENCL
namespace cvtest {
namespace ocl {
enum { TYPE_5_8 =FastFeatureDetector::TYPE_5_8, TYPE_7_12 = FastFeatureDetector::TYPE_7_12, TYPE_9_16 = FastFeatureDetector::TYPE_9_16 };
CV_ENUM(FastType, TYPE_5_8, TYPE_7_12)
typedef std::tr1::tuple<string, FastType> File_Type_t;
typedef TestBaseWithParam<File_Type_t> FASTFixture;
#define FAST_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
OCL_PERF_TEST_P(FASTFixture, FastDetect, testing::Combine(
testing::Values(FAST_IMAGES),
FastType::all()
))
{
string filename = getDataPath(get<0>(GetParam()));
int type = get<1>(GetParam());
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
if (mframe.empty())
FAIL() << "Unable to load source image " << filename;
UMat frame;
mframe.copyTo(frame);
declare.in(frame);
Ptr<FeatureDetector> fd = FastFeatureDetector::create(20, true, type);
ASSERT_FALSE( fd.empty() );
vector<KeyPoint> points;
OCL_TEST_CYCLE() fd->detect(frame, points);
SANITY_CHECK_KEYPOINTS(points);
}
} // ocl
} // cvtest
#endif // HAVE_OPENCL

View File

@ -0,0 +1,81 @@
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#include "../perf_feature2d.hpp"
#ifdef HAVE_OPENCL
namespace cvtest {
namespace ocl {
OCL_PERF_TEST_P(feature2d, detect, testing::Combine(Feature2DType::all(), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat mimg = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(mimg.empty());
ASSERT_TRUE(detector);
UMat img, mask;
mimg.copyTo(img);
declare.in(img);
vector<KeyPoint> points;
OCL_TEST_CYCLE() detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(feature2d, extract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = AKAZE::create();
Ptr<Feature2D> extractor = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat mimg = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(mimg.empty());
ASSERT_TRUE(extractor);
UMat img, mask;
mimg.copyTo(img);
declare.in(img);
vector<KeyPoint> points;
detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
UMat descriptors;
OCL_TEST_CYCLE() extractor->compute(img, points, descriptors);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(feature2d, detectAndExtract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat mimg = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(mimg.empty());
ASSERT_TRUE(detector);
UMat img, mask;
mimg.copyTo(img);
declare.in(img);
vector<KeyPoint> points;
UMat descriptors;
OCL_TEST_CYCLE() detector->detectAndCompute(img, mask, points, descriptors, false);
EXPECT_GT(points.size(), 20u);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
} // ocl
} // cvtest
#endif // HAVE_OPENCL

View File

@ -1,87 +0,0 @@
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#ifdef HAVE_OPENCL
namespace cvtest {
namespace ocl {
typedef ::perf::TestBaseWithParam<std::string> ORBFixture;
#define ORB_IMAGES OCL_PERF_ENUM("cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png", "stitching/a3.png")
OCL_PERF_TEST_P(ORBFixture, ORB_Detect, ORB_IMAGES)
{
string filename = getDataPath(GetParam());
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
if (mframe.empty())
FAIL() << "Unable to load source image " << filename;
UMat frame, mask;
mframe.copyTo(frame);
declare.in(frame);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
OCL_TEST_CYCLE() detector->detect(frame, points, mask);
EXPECT_GT(points.size(), 20u);
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(ORBFixture, ORB_Extract, ORB_IMAGES)
{
string filename = getDataPath(GetParam());
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
if (mframe.empty())
FAIL() << "Unable to load source image " << filename;
UMat mask, frame;
mframe.copyTo(frame);
declare.in(frame);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
detector->detect(frame, points, mask);
EXPECT_GT(points.size(), 20u);
UMat descriptors;
OCL_TEST_CYCLE() detector->compute(frame, points, descriptors);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(ORBFixture, ORB_Full, ORB_IMAGES)
{
string filename = getDataPath(GetParam());
Mat mframe = imread(filename, IMREAD_GRAYSCALE);
if (mframe.empty())
FAIL() << "Unable to load source image " << filename;
UMat mask, frame;
mframe.copyTo(frame);
declare.in(frame);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
UMat descriptors;
OCL_TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
EXPECT_GT(points.size(), 20u);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
} // ocl
} // cvtest
#endif // HAVE_OPENCL

View File

@ -1,42 +0,0 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
enum { AGAST_5_8 = AgastFeatureDetector::AGAST_5_8, AGAST_7_12d = AgastFeatureDetector::AGAST_7_12d,
AGAST_7_12s = AgastFeatureDetector::AGAST_7_12s, OAST_9_16 = AgastFeatureDetector::OAST_9_16 };
CV_ENUM(AgastType, AGAST_5_8, AGAST_7_12d,
AGAST_7_12s, OAST_9_16)
typedef std::tr1::tuple<string, AgastType> File_Type_t;
typedef perf::TestBaseWithParam<File_Type_t> agast;
#define AGAST_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
PERF_TEST_P(agast, detect, testing::Combine(
testing::Values(AGAST_IMAGES),
AgastType::all()
))
{
string filename = getDataPath(get<0>(GetParam()));
int type = get<1>(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
if (frame.empty())
FAIL() << "Unable to load source image " << filename;
declare.in(frame);
Ptr<FeatureDetector> fd = AgastFeatureDetector::create(70, true, type);
ASSERT_FALSE( fd.empty() );
vector<KeyPoint> points;
TEST_CYCLE() fd->detect(frame, points);
SANITY_CHECK_KEYPOINTS(points);
}

View File

@ -1,63 +0,0 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
enum { TYPE_5_8 =FastFeatureDetector::TYPE_5_8, TYPE_7_12 = FastFeatureDetector::TYPE_7_12, TYPE_9_16 = FastFeatureDetector::TYPE_9_16 };
CV_ENUM(FastType, TYPE_5_8, TYPE_7_12, TYPE_9_16)
typedef std::tr1::tuple<string, FastType> File_Type_t;
typedef perf::TestBaseWithParam<File_Type_t> fast;
#define FAST_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
PERF_TEST_P(fast, detect, testing::Combine(
testing::Values(FAST_IMAGES),
FastType::all()
))
{
string filename = getDataPath(get<0>(GetParam()));
int type = get<1>(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
if (frame.empty())
FAIL() << "Unable to load source image " << filename;
declare.in(frame);
Ptr<FeatureDetector> fd = FastFeatureDetector::create(20, true, type);
ASSERT_FALSE( fd.empty() );
vector<KeyPoint> points;
TEST_CYCLE() fd->detect(frame, points);
SANITY_CHECK_KEYPOINTS(points);
}
PERF_TEST_P(fast, detect_ovx, testing::Combine(
testing::Values(FAST_IMAGES),
FastType::all()
))
{
string filename = getDataPath(get<0>(GetParam()));
int type = get<1>(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
if (frame.empty())
FAIL() << "Unable to load source image " << filename;
declare.in(frame);
Ptr<FeatureDetector> fd = FastFeatureDetector::create(20, false, type);
ASSERT_FALSE(fd.empty());
vector<KeyPoint> points;
TEST_CYCLE() fd->detect(frame, points);
SANITY_CHECK_KEYPOINTS(points);
}

View File

@ -0,0 +1,66 @@
#include "perf_feature2d.hpp"
PERF_TEST_P(feature2d, detect, testing::Combine(Feature2DType::all(), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(detector);
declare.in(img);
Mat mask;
vector<KeyPoint> points;
TEST_CYCLE() detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(feature2d, extract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = AKAZE::create();
Ptr<Feature2D> extractor = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(extractor);
declare.in(img);
Mat mask;
vector<KeyPoint> points;
detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
Mat descriptors;
TEST_CYCLE() extractor->compute(img, points, descriptors);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(feature2d, detectAndExtract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(detector);
declare.in(img);
Mat mask;
vector<KeyPoint> points;
Mat descriptors;
TEST_CYCLE() detector->detectAndCompute(img, mask, points, descriptors, false);
EXPECT_GT(points.size(), 20u);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}

View File

@ -0,0 +1,86 @@
#ifndef __OPENCV_PERF_FEATURE2D_HPP__
#define __OPENCV_PERF_FEATURE2D_HPP__
#include "perf_precomp.hpp"
/* cofiguration for tests of detectors/descriptors. shared between ocl and cpu tests. */
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
// detectors/descriptors configurations to test
#define DETECTORS_ONLY \
FAST_DEFAULT, FAST_20_TRUE_TYPE5_8, FAST_20_TRUE_TYPE7_12, FAST_20_TRUE_TYPE9_16, \
FAST_20_FALSE_TYPE5_8, FAST_20_FALSE_TYPE7_12, FAST_20_FALSE_TYPE9_16, \
\
AGAST_DEFAULT, AGAST_5_8, AGAST_7_12d, AGAST_7_12s, AGAST_OAST_9_16, \
\
MSER_DEFAULT
#define DETECTORS_EXTRACTORS \
ORB_DEFAULT, ORB_1500_13_1, \
AKAZE_DEFAULT, AKAZE_DESCRIPTOR_KAZE, \
BRISK_DEFAULT, \
KAZE_DEFAULT
#define CV_ENUM_EXPAND(name, ...) CV_ENUM(name, __VA_ARGS__)
enum Feature2DVals { DETECTORS_ONLY, DETECTORS_EXTRACTORS };
CV_ENUM_EXPAND(Feature2DType, DETECTORS_ONLY, DETECTORS_EXTRACTORS)
typedef std::tr1::tuple<Feature2DType, string> Feature2DType_String_t;
typedef perf::TestBaseWithParam<Feature2DType_String_t> feature2d;
#define TEST_IMAGES testing::Values(\
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png")
static inline Ptr<Feature2D> getFeature2D(Feature2DType type)
{
switch(type) {
case ORB_DEFAULT:
return ORB::create();
case ORB_1500_13_1:
return ORB::create(1500, 1.3f, 1);
case FAST_DEFAULT:
return FastFeatureDetector::create();
case FAST_20_TRUE_TYPE5_8:
return FastFeatureDetector::create(20, true, FastFeatureDetector::TYPE_5_8);
case FAST_20_TRUE_TYPE7_12:
return FastFeatureDetector::create(20, true, FastFeatureDetector::TYPE_7_12);
case FAST_20_TRUE_TYPE9_16:
return FastFeatureDetector::create(20, true, FastFeatureDetector::TYPE_9_16);
case FAST_20_FALSE_TYPE5_8:
return FastFeatureDetector::create(20, false, FastFeatureDetector::TYPE_5_8);
case FAST_20_FALSE_TYPE7_12:
return FastFeatureDetector::create(20, false, FastFeatureDetector::TYPE_7_12);
case FAST_20_FALSE_TYPE9_16:
return FastFeatureDetector::create(20, false, FastFeatureDetector::TYPE_9_16);
case AGAST_DEFAULT:
return AgastFeatureDetector::create();
case AGAST_5_8:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::AGAST_5_8);
case AGAST_7_12d:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::AGAST_7_12d);
case AGAST_7_12s:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::AGAST_7_12s);
case AGAST_OAST_9_16:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::OAST_9_16);
case AKAZE_DEFAULT:
return AKAZE::create();
case AKAZE_DESCRIPTOR_KAZE:
return AKAZE::create(AKAZE::DESCRIPTOR_KAZE);
case BRISK_DEFAULT:
return BRISK::create();
case KAZE_DEFAULT:
return KAZE::create();
case MSER_DEFAULT:
return MSER::create();
default:
return Ptr<Feature2D>();
}
}
#endif // __OPENCV_PERF_FEATURE2D_HPP__

View File

@ -1,80 +0,0 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
typedef perf::TestBaseWithParam<std::string> orb;
#define ORB_IMAGES \
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png"
PERF_TEST_P(orb, detect, testing::Values(ORB_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
if (frame.empty())
FAIL() << "Unable to load source image " << filename;
Mat mask;
declare.in(frame);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
TEST_CYCLE() detector->detect(frame, points, mask);
EXPECT_GT(points.size(), 20u);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(orb, extract, testing::Values(ORB_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
if (frame.empty())
FAIL() << "Unable to load source image " << filename;
Mat mask;
declare.in(frame);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
detector->detect(frame, points, mask);
EXPECT_GT(points.size(), 20u);
Mat descriptors;
TEST_CYCLE() detector->compute(frame, points, descriptors);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(orb, full, testing::Values(ORB_IMAGES))
{
string filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
if (frame.empty())
FAIL() << "Unable to load source image " << filename;
Mat mask;
declare.in(frame);
Ptr<ORB> detector = ORB::create(1500, 1.3f, 1);
vector<KeyPoint> points;
Mat descriptors;
TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false);
EXPECT_GT(points.size(), 20u);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}

View File

@ -200,8 +200,7 @@ namespace cv
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
if( !descriptors.needed() )
impl.Compute_Keypoints_Orientation(keypoints);
impl.Compute_Keypoints_Orientation(keypoints);
}
if (!mask.empty())
@ -211,8 +210,10 @@ namespace cv
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
Mat desc;
impl.Compute_Descriptors(keypoints, desc);
// TODO optimize this copy
desc.copyTo(descriptors);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));

View File

@ -151,8 +151,9 @@ namespace cv
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
Mat desc;
impl.Feature_Description(keypoints, desc);
desc.copyTo(descriptors);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));

View File

@ -549,7 +549,6 @@ public:
{
for (int i = range.start; i < range.end; i++)
{
AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_);
Get_MSURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr<float>(i));
}
}
@ -643,7 +642,6 @@ public:
{
for (int i = range.start; i < range.end; i++)
{
AKAZEFeatures::Compute_Main_Orientation((*keypoints_)[i], *evolution_);
Get_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr<unsigned char>(i));
}
}
@ -1055,19 +1053,28 @@ void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const KeyPoint& kpt, f
y2 = fRound(sample_y + 0.5f);
x2 = fRound(sample_x + 0.5f);
// fix crash: indexing with out-of-bounds index, this might happen near the edges of image
// clip values so they fit into the image
const MatSize& size = evolution[level].Lx.size;
y1 = min(max(0, y1), size[0] - 1);
x1 = min(max(0, x1), size[1] - 1);
y2 = min(max(0, y2), size[0] - 1);
x2 = min(max(0, x2), size[1] - 1);
CV_DbgAssert(evolution[level].Lx.size == evolution[level].Ly.size);
fx = sample_x - x1;
fy = sample_y - y1;
res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
res1 = *(evolution[level].Lx.ptr<float>(y1, x1));
res2 = *(evolution[level].Lx.ptr<float>(y1, x2));
res3 = *(evolution[level].Lx.ptr<float>(y2, x1));
res4 = *(evolution[level].Lx.ptr<float>(y2, x2));
rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
res1 = *(evolution[level].Ly.ptr<float>(y1, x1));
res2 = *(evolution[level].Ly.ptr<float>(y1, x2));
res3 = *(evolution[level].Ly.ptr<float>(y2, x1));
res4 = *(evolution[level].Ly.ptr<float>(y2, x2));
ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
// Get the x and y derivatives on the rotated axis
@ -1228,12 +1235,20 @@ void MLDB_Full_Descriptor_Invoker::MLDB_Fill_Values(float* values, int sample_st
int y1 = fRound(sample_y);
int x1 = fRound(sample_x);
float ri = *(evolution[level].Lt.ptr<float>(y1)+x1);
// fix crash: indexing with out-of-bounds index, this might happen near the edges of image
// clip values so they fit into the image
const MatSize& size = evolution[level].Lt.size;
CV_DbgAssert(size == evolution[level].Lx.size &&
size == evolution[level].Ly.size);
y1 = min(max(0, y1), size[0] - 1);
x1 = min(max(0, x1), size[1] - 1);
float ri = *(evolution[level].Lt.ptr<float>(y1, x1));
di += ri;
if(chan > 1) {
float rx = *(evolution[level].Lx.ptr<float>(y1)+x1);
float ry = *(evolution[level].Ly.ptr<float>(y1)+x1);
float rx = *(evolution[level].Lx.ptr<float>(y1, x1));
float ry = *(evolution[level].Ly.ptr<float>(y1, x1));
if (chan == 2) {
dx += sqrtf(rx*rx + ry*ry);
}

View File

@ -0,0 +1,192 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
#include "test_invariance_utils.hpp"
using namespace std;
using namespace cv;
using std::tr1::make_tuple;
using std::tr1::get;
using namespace testing;
#define SHOW_DEBUG_LOG 0
typedef std::tr1::tuple<std::string, Ptr<FeatureDetector>, Ptr<DescriptorExtractor>, float>
String_FeatureDetector_DescriptorExtractor_Float_t;
const static std::string IMAGE_TSUKUBA = "features2d/tsukuba.png";
const static std::string IMAGE_BIKES = "detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
#define Value(...) Values(String_FeatureDetector_DescriptorExtractor_Float_t(__VA_ARGS__))
static
void rotateKeyPoints(const vector<KeyPoint>& src, const Mat& H, float angle, vector<KeyPoint>& dst)
{
// suppose that H is rotation given from rotateImage() and angle has value passed to rotateImage()
vector<Point2f> srcCenters, dstCenters;
KeyPoint::convert(src, srcCenters);
perspectiveTransform(srcCenters, dstCenters, H);
dst = src;
for(size_t i = 0; i < dst.size(); i++)
{
dst[i].pt = dstCenters[i];
float dstAngle = src[i].angle + angle;
if(dstAngle >= 360.f)
dstAngle -= 360.f;
dst[i].angle = dstAngle;
}
}
class DescriptorInvariance : public TestWithParam<String_FeatureDetector_DescriptorExtractor_Float_t>
{
protected:
virtual void SetUp() {
// Read test data
const std::string filename = cvtest::TS::ptr()->get_data_path() + get<0>(GetParam());
image0 = imread(filename);
ASSERT_FALSE(image0.empty()) << "couldn't read input image";
featureDetector = get<1>(GetParam());
descriptorExtractor = get<2>(GetParam());
minInliersRatio = get<3>(GetParam());
}
Ptr<FeatureDetector> featureDetector;
Ptr<DescriptorExtractor> descriptorExtractor;
float minInliersRatio;
Mat image0;
};
typedef DescriptorInvariance DescriptorScaleInvariance;
typedef DescriptorInvariance DescriptorRotationInvariance;
TEST_P(DescriptorRotationInvariance, rotation)
{
Mat image1, mask1;
const int borderSize = 16;
Mat mask0(image0.size(), CV_8UC1, Scalar(0));
mask0(Rect(borderSize, borderSize, mask0.cols - 2*borderSize, mask0.rows - 2*borderSize)).setTo(Scalar(255));
vector<KeyPoint> keypoints0;
Mat descriptors0;
featureDetector->detect(image0, keypoints0, mask0);
std::cout << "Intial keypoints: " << keypoints0.size() << std::endl;
EXPECT_GE(keypoints0.size(), 15u);
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(descriptorExtractor->defaultNorm());
const float minIntersectRatio = 0.5f;
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
{
Mat H = rotateImage(image0, mask0, static_cast<float>(angle), image1, mask1);
vector<KeyPoint> keypoints1;
rotateKeyPoints(keypoints0, H, static_cast<float>(angle), keypoints1);
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints1[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints1[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
EXPECT_GE(descInliersRatio, minInliersRatio);
#if SHOW_DEBUG_LOG
std::cout
<< "angle = " << angle
<< ", keypoints = " << keypoints1.size()
<< ", descInliersRatio = " << static_cast<float>(descInliersCount) / keypoints0.size()
<< std::endl;
#endif
}
}
TEST_P(DescriptorScaleInvariance, scale)
{
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
EXPECT_GE(keypoints0.size(), 15u);
Mat descriptors0;
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(descriptorExtractor->defaultNorm());
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
float scale = 1.f + scaleIdx * 0.5f;
Mat image1;
resize(image0, image1, Size(), 1./scale, 1./scale);
vector<KeyPoint> keypoints1;
scaleKeyPoints(keypoints0, keypoints1, 1.0f/scale);
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
const float minIntersectRatio = 0.5f;
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints0[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints0[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
EXPECT_GE(descInliersRatio, minInliersRatio);
#if SHOW_DEBUG_LOG
std::cout
<< "scale = " << scale
<< ", descInliersRatio = " << static_cast<float>(descInliersCount) / keypoints0.size()
<< std::endl;
#endif
}
}
/*
* Descriptors's rotation invariance check
*/
INSTANTIATE_TEST_CASE_P(BRISK, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, BRISK::create(), BRISK::create(), 0.99f));
INSTANTIATE_TEST_CASE_P(ORB, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, ORB::create(), ORB::create(), 0.99f));
INSTANTIATE_TEST_CASE_P(AKAZE, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, AKAZE::create(), AKAZE::create(), 0.99f));
INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, AKAZE::create(AKAZE::DESCRIPTOR_KAZE), AKAZE::create(AKAZE::DESCRIPTOR_KAZE), 0.002f));
/*
* Descriptor's scale invariance check
*/
INSTANTIATE_TEST_CASE_P(AKAZE, DescriptorScaleInvariance,
Value(IMAGE_BIKES, AKAZE::create(), AKAZE::create(), 0.6f));
INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DescriptorScaleInvariance,
Value(IMAGE_BIKES, AKAZE::create(AKAZE::DESCRIPTOR_KAZE), AKAZE::create(AKAZE::DESCRIPTOR_KAZE), 0.0004f));

View File

@ -350,6 +350,14 @@ TEST( Features2d_DescriptorExtractor_AKAZE, regression )
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_AKAZE_DESCRIPTOR_KAZE, regression )
{
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-akaze-with-kaze-desc", 0.03f,
AKAZE::create(AKAZE::DESCRIPTOR_KAZE),
L2<float>(), AKAZE::create(AKAZE::DESCRIPTOR_KAZE));
test.safe_run();
}
TEST( Features2d_DescriptorExtractor, batch )
{
string path = string(cvtest::TS::ptr()->get_data_path() + "detectors_descriptors_evaluation/images_datasets/graf");

View File

@ -0,0 +1,255 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
#include "test_invariance_utils.hpp"
using namespace std;
using namespace cv;
using std::tr1::make_tuple;
using std::tr1::get;
using namespace testing;
#define SHOW_DEBUG_LOG 0
typedef std::tr1::tuple<std::string, Ptr<FeatureDetector>, float, float> String_FeatureDetector_Float_Float_t;
const static std::string IMAGE_TSUKUBA = "features2d/tsukuba.png";
const static std::string IMAGE_BIKES = "detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
#define Value(...) Values(String_FeatureDetector_Float_Float_t(__VA_ARGS__))
static
void matchKeyPoints(const vector<KeyPoint>& keypoints0, const Mat& H,
const vector<KeyPoint>& keypoints1,
vector<DMatch>& matches)
{
vector<Point2f> points0;
KeyPoint::convert(keypoints0, points0);
Mat points0t;
if(H.empty())
points0t = Mat(points0);
else
perspectiveTransform(Mat(points0), points0t, H);
matches.clear();
vector<uchar> usedMask(keypoints1.size(), 0);
for(int i0 = 0; i0 < static_cast<int>(keypoints0.size()); i0++)
{
int nearestPointIndex = -1;
float maxIntersectRatio = 0.f;
const float r0 = 0.5f * keypoints0[i0].size;
for(size_t i1 = 0; i1 < keypoints1.size(); i1++)
{
if(nearestPointIndex >= 0 && usedMask[i1])
continue;
float r1 = 0.5f * keypoints1[i1].size;
float intersectRatio = calcIntersectRatio(points0t.at<Point2f>(i0), r0,
keypoints1[i1].pt, r1);
if(intersectRatio > maxIntersectRatio)
{
maxIntersectRatio = intersectRatio;
nearestPointIndex = static_cast<int>(i1);
}
}
matches.push_back(DMatch(i0, nearestPointIndex, maxIntersectRatio));
if(nearestPointIndex >= 0)
usedMask[nearestPointIndex] = 1;
}
}
class DetectorInvariance : public TestWithParam<String_FeatureDetector_Float_Float_t>
{
protected:
virtual void SetUp() {
// Read test data
const std::string filename = cvtest::TS::ptr()->get_data_path() + get<0>(GetParam());
image0 = imread(filename);
ASSERT_FALSE(image0.empty()) << "couldn't read input image";
featureDetector = get<1>(GetParam());
minKeyPointMatchesRatio = get<2>(GetParam());
minInliersRatio = get<3>(GetParam());
}
Ptr<FeatureDetector> featureDetector;
float minKeyPointMatchesRatio;
float minInliersRatio;
Mat image0;
};
typedef DetectorInvariance DetectorScaleInvariance;
typedef DetectorInvariance DetectorRotationInvariance;
TEST_P(DetectorRotationInvariance, rotation)
{
Mat image1, mask1;
const int borderSize = 16;
Mat mask0(image0.size(), CV_8UC1, Scalar(0));
mask0(Rect(borderSize, borderSize, mask0.cols - 2*borderSize, mask0.rows - 2*borderSize)).setTo(Scalar(255));
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0, mask0);
EXPECT_GE(keypoints0.size(), 15u);
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
{
Mat H = rotateImage(image0, mask0, static_cast<float>(angle), image1, mask1);
vector<KeyPoint> keypoints1;
featureDetector->detect(image1, keypoints1, mask1);
vector<DMatch> matches;
matchKeyPoints(keypoints0, H, keypoints1, matches);
int angleInliersCount = 0;
const float minIntersectRatio = 0.5f;
int keyPointMatchesCount = 0;
for(size_t m = 0; m < matches.size(); m++)
{
if(matches[m].distance < minIntersectRatio)
continue;
keyPointMatchesCount++;
// Check does this inlier have consistent angles
const float maxAngleDiff = 15.f; // grad
float angle0 = keypoints0[matches[m].queryIdx].angle;
float angle1 = keypoints1[matches[m].trainIdx].angle;
ASSERT_FALSE(angle0 == -1 || angle1 == -1) << "Given FeatureDetector is not rotation invariant, it can not be tested here.";
ASSERT_GE(angle0, 0.f);
ASSERT_LT(angle0, 360.f);
ASSERT_GE(angle1, 0.f);
ASSERT_LT(angle1, 360.f);
float rotAngle0 = angle0 + angle;
if(rotAngle0 >= 360.f)
rotAngle0 -= 360.f;
float angleDiff = std::max(rotAngle0, angle1) - std::min(rotAngle0, angle1);
angleDiff = std::min(angleDiff, static_cast<float>(360.f - angleDiff));
ASSERT_GE(angleDiff, 0.f);
bool isAngleCorrect = angleDiff < maxAngleDiff;
if(isAngleCorrect)
angleInliersCount++;
}
float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints0.size();
EXPECT_GE(keyPointMatchesRatio, minKeyPointMatchesRatio) << "angle: " << angle;
if(keyPointMatchesCount)
{
float angleInliersRatio = static_cast<float>(angleInliersCount) / keyPointMatchesCount;
EXPECT_GE(angleInliersRatio, minInliersRatio) << "angle: " << angle;
}
#if SHOW_DEBUG_LOG
std::cout
<< "angle = " << angle
<< ", keypoints = " << keypoints1.size()
<< ", keyPointMatchesRatio = " << keyPointMatchesRatio
<< ", angleInliersRatio = " << (keyPointMatchesCount ? (static_cast<float>(angleInliersCount) / keyPointMatchesCount) : 0)
<< std::endl;
#endif
}
}
TEST_P(DetectorScaleInvariance, scale)
{
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
EXPECT_GE(keypoints0.size(), 15u);
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
float scale = 1.f + scaleIdx * 0.5f;
Mat image1;
resize(image0, image1, Size(), 1./scale, 1./scale);
vector<KeyPoint> keypoints1, osiKeypoints1; // osi - original size image
featureDetector->detect(image1, keypoints1);
EXPECT_GE(keypoints1.size(), 15u);
EXPECT_LE(keypoints1.size(), keypoints0.size()) << "Strange behavior of the detector. "
"It gives more points count in an image of the smaller size.";
scaleKeyPoints(keypoints1, osiKeypoints1, scale);
vector<DMatch> matches;
// image1 is query image (it's reduced image0)
// image0 is train image
matchKeyPoints(osiKeypoints1, Mat(), keypoints0, matches);
const float minIntersectRatio = 0.5f;
int keyPointMatchesCount = 0;
int scaleInliersCount = 0;
for(size_t m = 0; m < matches.size(); m++)
{
if(matches[m].distance < minIntersectRatio)
continue;
keyPointMatchesCount++;
// Check does this inlier have consistent sizes
const float maxSizeDiff = 0.8f;//0.9f; // grad
float size0 = keypoints0[matches[m].trainIdx].size;
float size1 = osiKeypoints1[matches[m].queryIdx].size;
ASSERT_GT(size0, 0);
ASSERT_GT(size1, 0);
if(std::min(size0, size1) > maxSizeDiff * std::max(size0, size1))
scaleInliersCount++;
}
float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints1.size();
EXPECT_GE(keyPointMatchesRatio, minKeyPointMatchesRatio);
if(keyPointMatchesCount)
{
float scaleInliersRatio = static_cast<float>(scaleInliersCount) / keyPointMatchesCount;
EXPECT_GE(scaleInliersRatio, minInliersRatio);
}
#if SHOW_DEBUG_LOG
std::cout
<< "scale = " << scale
<< ", keyPointMatchesRatio = " << keyPointMatchesRatio
<< ", scaleInliersRatio = " << (keyPointMatchesCount ? static_cast<float>(scaleInliersCount) / keyPointMatchesCount : 0)
<< std::endl;
#endif
}
}
/*
* Detector's rotation invariance check
*/
INSTANTIATE_TEST_CASE_P(BRISK, DetectorRotationInvariance,
Value(IMAGE_TSUKUBA, BRISK::create(), 0.45f, 0.76f));
INSTANTIATE_TEST_CASE_P(ORB, DetectorRotationInvariance,
Value(IMAGE_TSUKUBA, ORB::create(), 0.5f, 0.76f));
INSTANTIATE_TEST_CASE_P(AKAZE, DetectorRotationInvariance,
Value(IMAGE_TSUKUBA, AKAZE::create(), 0.5f, 0.76f));
INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DetectorRotationInvariance,
Value(IMAGE_TSUKUBA, AKAZE::create(AKAZE::DESCRIPTOR_KAZE), 0.5f, 0.76f));
/*
* Detector's scale invariance check
*/
INSTANTIATE_TEST_CASE_P(BRISK, DetectorScaleInvariance,
Value(IMAGE_BIKES, BRISK::create(), 0.08f, 0.49f));
INSTANTIATE_TEST_CASE_P(ORB, DetectorScaleInvariance,
Value(IMAGE_BIKES, ORB::create(), 0.08f, 0.49f));
INSTANTIATE_TEST_CASE_P(KAZE, DetectorScaleInvariance,
Value(IMAGE_BIKES, KAZE::create(), 0.08f, 0.49f));
INSTANTIATE_TEST_CASE_P(AKAZE, DetectorScaleInvariance,
Value(IMAGE_BIKES, AKAZE::create(), 0.08f, 0.49f));
INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DetectorScaleInvariance,
Value(IMAGE_BIKES, AKAZE::create(AKAZE::DESCRIPTOR_KAZE), 0.08f, 0.49f));

View File

@ -302,6 +302,13 @@ TEST( Features2d_Detector_AKAZE, regression )
test.safe_run();
}
TEST( Features2d_Detector_AKAZE_DESCRIPTOR_KAZE, regression )
{
CV_FeatureDetectorTest test( "detector-akaze-with-kaze-desc", AKAZE::create(AKAZE::DESCRIPTOR_KAZE) );
test.safe_run();
}
TEST( Features2d_Detector_AKAZE, detect_and_compute_split )
{
Mat testImg(100, 100, CV_8U);

View File

@ -0,0 +1,92 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef __OPENCV_TEST_INVARIANCE_UTILS_HPP__
#define __OPENCV_TEST_INVARIANCE_UTILS_HPP__
#include "test_precomp.hpp"
using namespace std;
using namespace cv;
static
Mat generateHomography(float angle)
{
// angle - rotation around Oz in degrees
float angleRadian = static_cast<float>(angle * CV_PI / 180);
Mat H = Mat::eye(3, 3, CV_32FC1);
H.at<float>(0,0) = H.at<float>(1,1) = std::cos(angleRadian);
H.at<float>(0,1) = -std::sin(angleRadian);
H.at<float>(1,0) = std::sin(angleRadian);
return H;
}
static
Mat rotateImage(const Mat& srcImage, const Mat& srcMask, float angle, Mat& dstImage, Mat& dstMask)
{
// angle - rotation around Oz in degrees
float diag = std::sqrt(static_cast<float>(srcImage.cols * srcImage.cols + srcImage.rows * srcImage.rows));
Mat LUShift = Mat::eye(3, 3, CV_32FC1); // left up
LUShift.at<float>(0,2) = static_cast<float>(-srcImage.cols/2);
LUShift.at<float>(1,2) = static_cast<float>(-srcImage.rows/2);
Mat RDShift = Mat::eye(3, 3, CV_32FC1); // right down
RDShift.at<float>(0,2) = diag/2;
RDShift.at<float>(1,2) = diag/2;
Size sz(cvRound(diag), cvRound(diag));
Mat H = RDShift * generateHomography(angle) * LUShift;
warpPerspective(srcImage, dstImage, H, sz);
warpPerspective(srcMask, dstMask, H, sz);
return H;
}
static
float calcCirclesIntersectArea(const Point2f& p0, float r0, const Point2f& p1, float r1)
{
float c = static_cast<float>(norm(p0 - p1)), sqr_c = c * c;
float sqr_r0 = r0 * r0;
float sqr_r1 = r1 * r1;
if(r0 + r1 <= c)
return 0;
float minR = std::min(r0, r1);
float maxR = std::max(r0, r1);
if(c + minR <= maxR)
return static_cast<float>(CV_PI * minR * minR);
float cos_halfA0 = (sqr_r0 + sqr_c - sqr_r1) / (2 * r0 * c);
float cos_halfA1 = (sqr_r1 + sqr_c - sqr_r0) / (2 * r1 * c);
float A0 = 2 * acos(cos_halfA0);
float A1 = 2 * acos(cos_halfA1);
return 0.5f * sqr_r0 * (A0 - sin(A0)) +
0.5f * sqr_r1 * (A1 - sin(A1));
}
static
float calcIntersectRatio(const Point2f& p0, float r0, const Point2f& p1, float r1)
{
float intersectArea = calcCirclesIntersectArea(p0, r0, p1, r1);
float unionArea = static_cast<float>(CV_PI) * (r0 * r0 + r1 * r1) - intersectArea;
return intersectArea / unionArea;
}
static
void scaleKeyPoints(const vector<KeyPoint>& src, vector<KeyPoint>& dst, float scale)
{
dst.resize(src.size());
for (size_t i = 0; i < src.size(); i++) {
dst[i] = src[i];
dst[i].pt.x *= scale;
dst[i].pt.y *= scale;
dst[i].size *= scale;
}
}
#endif // __OPENCV_TEST_INVARIANCE_UTILS_HPP__

View File

@ -1,717 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
using namespace std;
using namespace cv;
const string IMAGE_TSUKUBA = "/features2d/tsukuba.png";
const string IMAGE_BIKES = "/detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
#define SHOW_DEBUG_LOG 1
static
Mat generateHomography(float angle)
{
// angle - rotation around Oz in degrees
float angleRadian = static_cast<float>(angle * CV_PI / 180);
Mat H = Mat::eye(3, 3, CV_32FC1);
H.at<float>(0,0) = H.at<float>(1,1) = std::cos(angleRadian);
H.at<float>(0,1) = -std::sin(angleRadian);
H.at<float>(1,0) = std::sin(angleRadian);
return H;
}
static
Mat rotateImage(const Mat& srcImage, const Mat& srcMask, float angle, Mat& dstImage, Mat& dstMask)
{
// angle - rotation around Oz in degrees
float diag = std::sqrt(static_cast<float>(srcImage.cols * srcImage.cols + srcImage.rows * srcImage.rows));
Mat LUShift = Mat::eye(3, 3, CV_32FC1); // left up
LUShift.at<float>(0,2) = static_cast<float>(-srcImage.cols/2);
LUShift.at<float>(1,2) = static_cast<float>(-srcImage.rows/2);
Mat RDShift = Mat::eye(3, 3, CV_32FC1); // right down
RDShift.at<float>(0,2) = diag/2;
RDShift.at<float>(1,2) = diag/2;
Size sz(cvRound(diag), cvRound(diag));
Mat H = RDShift * generateHomography(angle) * LUShift;
warpPerspective(srcImage, dstImage, H, sz);
warpPerspective(srcMask, dstMask, H, sz);
return H;
}
void rotateKeyPoints(const vector<KeyPoint>& src, const Mat& H, float angle, vector<KeyPoint>& dst)
{
// suppose that H is rotation given from rotateImage() and angle has value passed to rotateImage()
vector<Point2f> srcCenters, dstCenters;
KeyPoint::convert(src, srcCenters);
perspectiveTransform(srcCenters, dstCenters, H);
dst = src;
for(size_t i = 0; i < dst.size(); i++)
{
dst[i].pt = dstCenters[i];
float dstAngle = src[i].angle + angle;
if(dstAngle >= 360.f)
dstAngle -= 360.f;
dst[i].angle = dstAngle;
}
}
void scaleKeyPoints(const vector<KeyPoint>& src, vector<KeyPoint>& dst, float scale)
{
dst.resize(src.size());
for(size_t i = 0; i < src.size(); i++)
dst[i] = KeyPoint(src[i].pt.x * scale, src[i].pt.y * scale, src[i].size * scale, src[i].angle);
}
static
float calcCirclesIntersectArea(const Point2f& p0, float r0, const Point2f& p1, float r1)
{
float c = static_cast<float>(norm(p0 - p1)), sqr_c = c * c;
float sqr_r0 = r0 * r0;
float sqr_r1 = r1 * r1;
if(r0 + r1 <= c)
return 0;
float minR = std::min(r0, r1);
float maxR = std::max(r0, r1);
if(c + minR <= maxR)
return static_cast<float>(CV_PI * minR * minR);
float cos_halfA0 = (sqr_r0 + sqr_c - sqr_r1) / (2 * r0 * c);
float cos_halfA1 = (sqr_r1 + sqr_c - sqr_r0) / (2 * r1 * c);
float A0 = 2 * acos(cos_halfA0);
float A1 = 2 * acos(cos_halfA1);
return 0.5f * sqr_r0 * (A0 - sin(A0)) +
0.5f * sqr_r1 * (A1 - sin(A1));
}
static
float calcIntersectRatio(const Point2f& p0, float r0, const Point2f& p1, float r1)
{
float intersectArea = calcCirclesIntersectArea(p0, r0, p1, r1);
float unionArea = static_cast<float>(CV_PI) * (r0 * r0 + r1 * r1) - intersectArea;
return intersectArea / unionArea;
}
static
void matchKeyPoints(const vector<KeyPoint>& keypoints0, const Mat& H,
const vector<KeyPoint>& keypoints1,
vector<DMatch>& matches)
{
vector<Point2f> points0;
KeyPoint::convert(keypoints0, points0);
Mat points0t;
if(H.empty())
points0t = Mat(points0);
else
perspectiveTransform(Mat(points0), points0t, H);
matches.clear();
vector<uchar> usedMask(keypoints1.size(), 0);
for(int i0 = 0; i0 < static_cast<int>(keypoints0.size()); i0++)
{
int nearestPointIndex = -1;
float maxIntersectRatio = 0.f;
const float r0 = 0.5f * keypoints0[i0].size;
for(size_t i1 = 0; i1 < keypoints1.size(); i1++)
{
if(nearestPointIndex >= 0 && usedMask[i1])
continue;
float r1 = 0.5f * keypoints1[i1].size;
float intersectRatio = calcIntersectRatio(points0t.at<Point2f>(i0), r0,
keypoints1[i1].pt, r1);
if(intersectRatio > maxIntersectRatio)
{
maxIntersectRatio = intersectRatio;
nearestPointIndex = static_cast<int>(i1);
}
}
matches.push_back(DMatch(i0, nearestPointIndex, maxIntersectRatio));
if(nearestPointIndex >= 0)
usedMask[nearestPointIndex] = 1;
}
}
class DetectorRotationInvarianceTest : public cvtest::BaseTest
{
public:
DetectorRotationInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
float _minKeyPointMatchesRatio,
float _minAngleInliersRatio) :
featureDetector(_featureDetector),
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minAngleInliersRatio(_minAngleInliersRatio)
{
CV_Assert(featureDetector);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_TSUKUBA;
// Read test data
Mat image0 = imread(imageFilename), image1, mask1;
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
std::cout << "Image: " << image0.size() << std::endl;
const int borderSize = 16;
Mat mask0(image0.size(), CV_8UC1, Scalar(0));
mask0(Rect(borderSize, borderSize, mask0.cols - 2*borderSize, mask0.rows - 2*borderSize)).setTo(Scalar(255));
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0, mask0);
std::cout << "Intial keypoints: " << keypoints0.size() << std::endl;
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
{
Mat H = rotateImage(image0, mask0, static_cast<float>(angle), image1, mask1);
vector<KeyPoint> keypoints1;
featureDetector->detect(image1, keypoints1, mask1);
vector<DMatch> matches;
matchKeyPoints(keypoints0, H, keypoints1, matches);
int angleInliersCount = 0;
const float minIntersectRatio = 0.5f;
int keyPointMatchesCount = 0;
for(size_t m = 0; m < matches.size(); m++)
{
if(matches[m].distance < minIntersectRatio)
continue;
keyPointMatchesCount++;
// Check does this inlier have consistent angles
const float maxAngleDiff = 15.f; // grad
float angle0 = keypoints0[matches[m].queryIdx].angle;
float angle1 = keypoints1[matches[m].trainIdx].angle;
if(angle0 == -1 || angle1 == -1)
CV_Error(Error::StsBadArg, "Given FeatureDetector is not rotation invariant, it can not be tested here.\n");
CV_Assert(angle0 >= 0.f && angle0 < 360.f);
CV_Assert(angle1 >= 0.f && angle1 < 360.f);
float rotAngle0 = angle0 + angle;
if(rotAngle0 >= 360.f)
rotAngle0 -= 360.f;
float angleDiff = std::max(rotAngle0, angle1) - std::min(rotAngle0, angle1);
angleDiff = std::min(angleDiff, static_cast<float>(360.f - angleDiff));
CV_Assert(angleDiff >= 0.f);
bool isAngleCorrect = angleDiff < maxAngleDiff;
if(isAngleCorrect)
angleInliersCount++;
}
float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints0.size();
if(keyPointMatchesRatio < minKeyPointMatchesRatio)
{
ts->printf(cvtest::TS::LOG, "Angle: %f: Incorrect keyPointMatchesRatio: curr = %f, min = %f (matched=%d total=%d - %d).\n",
(float)angle, keyPointMatchesRatio, minKeyPointMatchesRatio, (int)keyPointMatchesCount, (int)keypoints0.size(), (int)keypoints1.size());
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
}
if(keyPointMatchesCount)
{
float angleInliersRatio = static_cast<float>(angleInliersCount) / keyPointMatchesCount;
if(angleInliersRatio < minAngleInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Angle: %f: Incorrect angleInliersRatio: curr = %f, min = %f.\n",
(float)angle, angleInliersRatio, minAngleInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
}
}
#if SHOW_DEBUG_LOG
std::cout
<< "angle = " << angle
<< ", keypoints = " << keypoints1.size()
<< ", keyPointMatchesRatio = " << keyPointMatchesRatio
<< ", angleInliersRatio = " << (keyPointMatchesCount ? (static_cast<float>(angleInliersCount) / keyPointMatchesCount) : 0)
<< std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
float minKeyPointMatchesRatio;
float minAngleInliersRatio;
};
class DescriptorRotationInvarianceTest : public cvtest::BaseTest
{
public:
DescriptorRotationInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
const Ptr<DescriptorExtractor>& _descriptorExtractor,
int _normType,
float _minDescInliersRatio) :
featureDetector(_featureDetector),
descriptorExtractor(_descriptorExtractor),
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
CV_Assert(featureDetector);
CV_Assert(descriptorExtractor);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_TSUKUBA;
// Read test data
Mat image0 = imread(imageFilename), image1, mask1;
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
std::cout << "Image: " << image0.size() << std::endl;
const int borderSize = 16;
Mat mask0(image0.size(), CV_8UC1, Scalar(0));
mask0(Rect(borderSize, borderSize, mask0.cols - 2*borderSize, mask0.rows - 2*borderSize)).setTo(Scalar(255));
vector<KeyPoint> keypoints0;
Mat descriptors0;
featureDetector->detect(image0, keypoints0, mask0);
std::cout << "Intial keypoints: " << keypoints0.size() << std::endl;
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(normType);
const float minIntersectRatio = 0.5f;
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
{
Mat H = rotateImage(image0, mask0, static_cast<float>(angle), image1, mask1);
vector<KeyPoint> keypoints1;
rotateKeyPoints(keypoints0, H, static_cast<float>(angle), keypoints1);
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints1[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints1[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
if(descInliersRatio < minDescInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect descInliersRatio: curr = %f, min = %f.\n",
descInliersRatio, minDescInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
#if SHOW_DEBUG_LOG
std::cout
<< "angle = " << angle
<< ", keypoints = " << keypoints1.size()
<< ", descInliersRatio = " << static_cast<float>(descInliersCount) / keypoints0.size()
<< std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
Ptr<DescriptorExtractor> descriptorExtractor;
int normType;
float minDescInliersRatio;
};
class DetectorScaleInvarianceTest : public cvtest::BaseTest
{
public:
DetectorScaleInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
float _minKeyPointMatchesRatio,
float _minScaleInliersRatio) :
featureDetector(_featureDetector),
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minScaleInliersRatio(_minScaleInliersRatio)
{
CV_Assert(featureDetector);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_BIKES;
// Read test data
Mat image0 = imread(imageFilename);
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
float scale = 1.f + scaleIdx * 0.5f;
Mat image1;
resize(image0, image1, Size(), 1./scale, 1./scale);
vector<KeyPoint> keypoints1, osiKeypoints1; // osi - original size image
featureDetector->detect(image1, keypoints1);
if(keypoints1.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
if(keypoints1.size() > keypoints0.size())
{
ts->printf(cvtest::TS::LOG, "Strange behavior of the detector. "
"It gives more points count in an image of the smaller size.\n"
"original size (%d, %d), keypoints count = %d\n"
"reduced size (%d, %d), keypoints count = %d\n",
image0.cols, image0.rows, keypoints0.size(),
image1.cols, image1.rows, keypoints1.size());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return;
}
scaleKeyPoints(keypoints1, osiKeypoints1, scale);
vector<DMatch> matches;
// image1 is query image (it's reduced image0)
// image0 is train image
matchKeyPoints(osiKeypoints1, Mat(), keypoints0, matches);
const float minIntersectRatio = 0.5f;
int keyPointMatchesCount = 0;
int scaleInliersCount = 0;
for(size_t m = 0; m < matches.size(); m++)
{
if(matches[m].distance < minIntersectRatio)
continue;
keyPointMatchesCount++;
// Check does this inlier have consistent sizes
const float maxSizeDiff = 0.8f;//0.9f; // grad
float size0 = keypoints0[matches[m].trainIdx].size;
float size1 = osiKeypoints1[matches[m].queryIdx].size;
CV_Assert(size0 > 0 && size1 > 0);
if(std::min(size0, size1) > maxSizeDiff * std::max(size0, size1))
scaleInliersCount++;
}
float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints1.size();
if(keyPointMatchesRatio < minKeyPointMatchesRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect keyPointMatchesRatio: curr = %f, min = %f.\n",
keyPointMatchesRatio, minKeyPointMatchesRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
if(keyPointMatchesCount)
{
float scaleInliersRatio = static_cast<float>(scaleInliersCount) / keyPointMatchesCount;
if(scaleInliersRatio < minScaleInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect scaleInliersRatio: curr = %f, min = %f.\n",
scaleInliersRatio, minScaleInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
#if SHOW_DEBUG_LOG
std::cout
<< "scale = " << scale
<< ", keyPointMatchesRatio = " << keyPointMatchesRatio
<< ", scaleInliersRatio = " << (keyPointMatchesCount ? static_cast<float>(scaleInliersCount) / keyPointMatchesCount : 0)
<< std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
float minKeyPointMatchesRatio;
float minScaleInliersRatio;
};
class DescriptorScaleInvarianceTest : public cvtest::BaseTest
{
public:
DescriptorScaleInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
const Ptr<DescriptorExtractor>& _descriptorExtractor,
int _normType,
float _minDescInliersRatio) :
featureDetector(_featureDetector),
descriptorExtractor(_descriptorExtractor),
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
CV_Assert(featureDetector);
CV_Assert(descriptorExtractor);
}
protected:
void run(int)
{
const string imageFilename = string(ts->get_data_path()) + IMAGE_BIKES;
// Read test data
Mat image0 = imread(imageFilename);
if(image0.empty())
{
ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
if(keypoints0.size() < 15)
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
Mat descriptors0;
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(normType);
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
float scale = 1.f + scaleIdx * 0.5f;
Mat image1;
resize(image0, image1, Size(), 1./scale, 1./scale);
vector<KeyPoint> keypoints1;
scaleKeyPoints(keypoints0, keypoints1, 1.0f/scale);
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
const float minIntersectRatio = 0.5f;
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints0[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints0[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
if(descInliersRatio < minDescInliersRatio)
{
ts->printf(cvtest::TS::LOG, "Incorrect descInliersRatio: curr = %f, min = %f.\n",
descInliersRatio, minDescInliersRatio);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
#if SHOW_DEBUG_LOG
std::cout
<< "scale = " << scale
<< ", descInliersRatio = " << static_cast<float>(descInliersCount) / keypoints0.size()
<< std::endl;
#endif
}
ts->set_failed_test_info( cvtest::TS::OK );
}
Ptr<FeatureDetector> featureDetector;
Ptr<DescriptorExtractor> descriptorExtractor;
int normType;
float minKeyPointMatchesRatio;
float minDescInliersRatio;
};
// Tests registration
/*
* Detector's rotation invariance check
*/
TEST(Features2d_RotationInvariance_Detector_BRISK, regression)
{
DetectorRotationInvarianceTest test(BRISK::create(),
0.45f,
0.76f);
test.safe_run();
}
TEST(Features2d_RotationInvariance_Detector_ORB, regression)
{
DetectorRotationInvarianceTest test(ORB::create(),
0.5f,
0.76f);
test.safe_run();
}
/*
* Descriptors's rotation invariance check
*/
TEST(Features2d_RotationInvariance_Descriptor_BRISK, regression)
{
Ptr<Feature2D> f2d = BRISK::create();
DescriptorRotationInvarianceTest test(f2d, f2d, f2d->defaultNorm(), 0.99f);
test.safe_run();
}
TEST(Features2d_RotationInvariance_Descriptor_ORB, regression)
{
Ptr<Feature2D> f2d = ORB::create();
DescriptorRotationInvarianceTest test(f2d, f2d, f2d->defaultNorm(), 0.99f);
test.safe_run();
}
//TEST(Features2d_RotationInvariance_Descriptor_FREAK, regression)
//{
// DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK"),
// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK")->defaultNorm(),
// 0.f);
// test.safe_run();
//}
/*
* Detector's scale invariance check
*/
TEST(Features2d_ScaleInvariance_Detector_BRISK, regression)
{
DetectorScaleInvarianceTest test(BRISK::create(), 0.08f, 0.49f);
test.safe_run();
}
TEST(Features2d_ScaleInvariance_Detector_KAZE, regression)
{
DetectorScaleInvarianceTest test(KAZE::create(), 0.08f, 0.49f);
test.safe_run();
}
TEST(Features2d_ScaleInvariance_Detector_AKAZE, regression)
{
DetectorScaleInvarianceTest test(AKAZE::create(), 0.08f, 0.49f);
test.safe_run();
}
TEST(Features2d_ScaleInvariance_Detector_ORB, regression)
{
DetectorScaleInvarianceTest test(ORB::create(), 0.08f, 0.49f);
test.safe_run();
}
/*
* Descriptor's scale invariance check
*/
//TEST(Features2d_ScaleInvariance_Descriptor_BRISK, regression)
//{
// DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"),
// Algorithm::create<DescriptorExtractor>("Feature2D.BRISK"),
// Algorithm::create<DescriptorExtractor>("Feature2D.BRISK")->defaultNorm(),
// 0.99f);
// test.safe_run();
//}
//TEST(Features2d_ScaleInvariance_Descriptor_ORB, regression)
//{
// DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
// Algorithm::create<DescriptorExtractor>("Feature2D.ORB"),
// Algorithm::create<DescriptorExtractor>("Feature2D.ORB")->defaultNorm(),
// 0.01f);
// test.safe_run();
//}
//TEST(Features2d_ScaleInvariance_Descriptor_FREAK, regression)
//{
// DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK"),
// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK")->defaultNorm(),
// 0.01f);
// test.safe_run();
//}

View File

@ -24,9 +24,9 @@ using namespace std::tr1;
typedef TestBaseWithParam<string> stitch;
#ifdef HAVE_OPENCV_XFEATURES2D
#define TEST_DETECTORS testing::Values("surf", "orb")
#define TEST_DETECTORS testing::Values("surf", "orb", "akaze")
#else
#define TEST_DETECTORS testing::Values<string>("orb")
#define TEST_DETECTORS testing::Values("orb", "akaze")
#endif
OCL_PERF_TEST_P(stitch, a123, TEST_DETECTORS)
@ -39,10 +39,7 @@ OCL_PERF_TEST_P(stitch, a123, TEST_DETECTORS)
_imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
@ -76,10 +73,7 @@ OCL_PERF_TEST_P(stitch, b12, TEST_DETECTORS)
imgs.push_back( imread( getDataPath("stitching/b1.png") ) );
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
@ -118,10 +112,7 @@ OCL_PERF_TEST_P(stitch, boat, TEST_DETECTORS)
_imgs.push_back( imread( getDataPath("stitching/boat6.jpg") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);

View File

@ -16,4 +16,21 @@
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
namespace cv
{
static inline Ptr<detail::FeaturesFinder> getFeatureFinder(const std::string& name)
{
if (name == "orb")
return makePtr<detail::OrbFeaturesFinder>();
else if (name == "surf")
return makePtr<detail::SurfFeaturesFinder>();
else if (name == "akaze")
return makePtr<detail::AKAZEFeaturesFinder>();
else
return Ptr<detail::FeaturesFinder>();
}
} // namespace cv
#endif

View File

@ -18,9 +18,9 @@ typedef TestBaseWithParam<string> stitch;
typedef TestBaseWithParam<tuple<string, string> > stitchDatasets;
#ifdef HAVE_OPENCV_XFEATURES2D
#define TEST_DETECTORS testing::Values("surf", "orb")
#define TEST_DETECTORS testing::Values("surf", "orb", "akaze")
#else
#define TEST_DETECTORS testing::Values<string>("orb")
#define TEST_DETECTORS testing::Values("orb", "akaze")
#endif
#define AFFINE_DATASETS testing::Values("s", "budapest", "newspaper", "prague")
@ -33,9 +33,7 @@ PERF_TEST_P(stitch, a123, TEST_DETECTORS)
imgs.push_back( imread( getDataPath("stitching/a2.png") ) );
imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
@ -70,9 +68,7 @@ PERF_TEST_P(stitch, b12, TEST_DETECTORS)
imgs.push_back( imread( getDataPath("stitching/b1.png") ) );
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
@ -107,12 +103,7 @@ PERF_TEST_P(stitchDatasets, affine, testing::Combine(AFFINE_DATASETS, TEST_DETEC
Mat pano;
vector<Mat> imgs;
int width, height, allowed_diff = 10;
Ptr<detail::FeaturesFinder> featuresFinder;
if(detector == "orb")
featuresFinder = makePtr<detail::OrbFeaturesFinder>();
else
featuresFinder = makePtr<detail::SurfFeaturesFinder>();
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(detector);
if(dataset == "budapest")
{

View File

@ -558,10 +558,7 @@ AKAZEFeaturesFinder::AKAZEFeaturesFinder(int descriptor_type,
void AKAZEFeaturesFinder::find(InputArray image, detail::ImageFeatures &features)
{
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
Mat descriptors;
UMat uimage = image.getUMat();
akaze->detectAndCompute(uimage, UMat(), features.keypoints, descriptors);
features.descriptors = descriptors.getUMat(ACCESS_READ);
akaze->detectAndCompute(image, noArray(), features.keypoints, features.descriptors);
}
#ifdef HAVE_OPENCV_XFEATURES2D