mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 03:00:14 +08:00
Merge pull request #17119 from alalek:move_sift
This commit is contained in:
commit
8d05dab32c
@ -620,7 +620,7 @@
|
||||
volume = {1},
|
||||
publisher = {IEEE}
|
||||
}
|
||||
@article{Lowe:2004:DIF:993451.996342,
|
||||
@article{Lowe04,
|
||||
author = {Lowe, David G.},
|
||||
title = {Distinctive Image Features from Scale-Invariant Keypoints},
|
||||
journal = {Int. J. Comput. Vision},
|
||||
|
@ -44,7 +44,7 @@ img1 = cv.imread('box.png',0) # queryImage
|
||||
img2 = cv.imread('box_in_scene.png',0) # trainImage
|
||||
|
||||
# Initiate SIFT detector
|
||||
sift = cv.xfeatures2d.SIFT_create()
|
||||
sift = cv.SIFT_create()
|
||||
|
||||
# find the keypoints and descriptors with SIFT
|
||||
kp1, des1 = sift.detectAndCompute(img1,None)
|
||||
|
@ -110,7 +110,7 @@ img1 = cv.imread('box.png',cv.IMREAD_GRAYSCALE) # queryImage
|
||||
img2 = cv.imread('box_in_scene.png',cv.IMREAD_GRAYSCALE) # trainImage
|
||||
|
||||
# Initiate SIFT detector
|
||||
sift = cv.xfeatures2d.SIFT_create()
|
||||
sift = cv.SIFT_create()
|
||||
|
||||
# find the keypoints and descriptors with SIFT
|
||||
kp1, des1 = sift.detectAndCompute(img1,None)
|
||||
@ -174,7 +174,7 @@ img1 = cv.imread('box.png',cv.IMREAD_GRAYSCALE) # queryImage
|
||||
img2 = cv.imread('box_in_scene.png',cv.IMREAD_GRAYSCALE) # trainImage
|
||||
|
||||
# Initiate SIFT detector
|
||||
sift = cv.xfeatures2d.SIFT_create()
|
||||
sift = cv.SIFT_create()
|
||||
|
||||
# find the keypoints and descriptors with SIFT
|
||||
kp1, des1 = sift.detectAndCompute(img1,None)
|
||||
|
@ -119,7 +119,7 @@ import cv2 as cv
|
||||
img = cv.imread('home.jpg')
|
||||
gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
|
||||
|
||||
sift = cv.xfeatures2d.SIFT_create()
|
||||
sift = cv.SIFT_create()
|
||||
kp = sift.detect(gray,None)
|
||||
|
||||
img=cv.drawKeypoints(gray,kp,img)
|
||||
@ -151,7 +151,7 @@ Now to calculate the descriptor, OpenCV provides two methods.
|
||||
|
||||
We will see the second method:
|
||||
@code{.py}
|
||||
sift = cv.xfeatures2d.SIFT_create()
|
||||
sift = cv.SIFT_create()
|
||||
kp, des = sift.detectAndCompute(gray,None)
|
||||
@endcode
|
||||
Here kp will be a list of keypoints and des is a numpy array of shape
|
||||
|
@ -27,7 +27,7 @@ Binary descriptors (ORB, BRISK, ...) are matched using the <a href="https://en.w
|
||||
This distance is equivalent to count the number of different elements for binary strings (population count after applying a XOR operation):
|
||||
\f[ d_{hamming} \left ( a,b \right ) = \sum_{i=0}^{n-1} \left ( a_i \oplus b_i \right ) \f]
|
||||
|
||||
To filter the matches, Lowe proposed in @cite Lowe:2004:DIF:993451.996342 to use a distance ratio test to try to eliminate false matches.
|
||||
To filter the matches, Lowe proposed in @cite Lowe04 to use a distance ratio test to try to eliminate false matches.
|
||||
The distance ratio between the two nearest matches of a considered keypoint is computed and it is a good match when this value is below
|
||||
a threshold. Indeed, this ratio allows helping to discriminate between ambiguous matches (distance ratio between the two nearest neighbors
|
||||
is close to one) and well discriminated matches. The figure below from the SIFT paper illustrates the probability that a match is correct
|
||||
|
@ -244,6 +244,39 @@ typedef Feature2D DescriptorExtractor;
|
||||
//! @addtogroup features2d_main
|
||||
//! @{
|
||||
|
||||
|
||||
/** @brief Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform
|
||||
(SIFT) algorithm by D. Lowe @cite Lowe04 .
|
||||
*/
|
||||
class CV_EXPORTS_W SIFT : public Feature2D
|
||||
{
|
||||
public:
|
||||
/**
|
||||
@param nfeatures The number of best features to retain. The features are ranked by their scores
|
||||
(measured in SIFT algorithm as the local contrast)
|
||||
|
||||
@param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The
|
||||
number of octaves is computed automatically from the image resolution.
|
||||
|
||||
@param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform
|
||||
(low-contrast) regions. The larger the threshold, the less features are produced by the detector.
|
||||
|
||||
@param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning
|
||||
is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are
|
||||
filtered out (more features are retained).
|
||||
|
||||
@param sigma The sigma of the Gaussian applied to the input image at the octave \#0. If your image
|
||||
is captured with a weak camera with soft lenses, you might want to reduce the number.
|
||||
*/
|
||||
CV_WRAP static Ptr<SIFT> create(int nfeatures = 0, int nOctaveLayers = 3,
|
||||
double contrastThreshold = 0.04, double edgeThreshold = 10,
|
||||
double sigma = 1.6);
|
||||
};
|
||||
|
||||
typedef SIFT SiftFeatureDetector;
|
||||
typedef SIFT SiftDescriptorExtractor;
|
||||
|
||||
|
||||
/** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 .
|
||||
*/
|
||||
class CV_EXPORTS_W BRISK : public Feature2D
|
||||
|
@ -6,6 +6,7 @@ import org.opencv.core.MatOfKeyPoint;
|
||||
import org.opencv.core.Point;
|
||||
import org.opencv.core.Scalar;
|
||||
import org.opencv.core.KeyPoint;
|
||||
import org.opencv.features2d.SIFT;
|
||||
import org.opencv.test.OpenCVTestCase;
|
||||
import org.opencv.test.OpenCVTestRunner;
|
||||
import org.opencv.imgproc.Imgproc;
|
||||
@ -29,7 +30,7 @@ public class SIFTDescriptorExtractorTest extends OpenCVTestCase {
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
extractor = createClassInstance(XFEATURES2D+"SIFT", DEFAULT_FACTORY, null, null);
|
||||
extractor = SIFT.create();
|
||||
keypoint = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1);
|
||||
matSize = 100;
|
||||
truth = new Mat(1, 128, CvType.CV_32FC1) {
|
||||
|
85
modules/features2d/perf/perf_sift.cpp
Normal file
85
modules/features2d/perf/perf_sift.cpp
Normal file
@ -0,0 +1,85 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
namespace opencv_test { namespace {
|
||||
|
||||
typedef perf::TestBaseWithParam<std::string> SIFT_detect;
|
||||
typedef perf::TestBaseWithParam<std::string> SIFT_extract;
|
||||
typedef perf::TestBaseWithParam<std::string> SIFT_full;
|
||||
|
||||
#define SIFT_IMAGES \
|
||||
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
|
||||
"stitching/a3.png"
|
||||
|
||||
PERF_TEST_P_(SIFT_detect, SIFT)
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
Ptr<SIFT> detector = SIFT::create();
|
||||
vector<KeyPoint> points;
|
||||
|
||||
PERF_SAMPLE_BEGIN();
|
||||
detector->detect(frame, points, mask);
|
||||
PERF_SAMPLE_END();
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST_P_(SIFT_extract, SIFT)
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
|
||||
Ptr<SIFT> detector = SIFT::create();
|
||||
vector<KeyPoint> points;
|
||||
Mat descriptors;
|
||||
detector->detect(frame, points, mask);
|
||||
|
||||
PERF_SAMPLE_BEGIN();
|
||||
detector->compute(frame, points, descriptors);
|
||||
PERF_SAMPLE_END();
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST_P_(SIFT_full, SIFT)
|
||||
{
|
||||
string filename = getDataPath(GetParam());
|
||||
Mat frame = imread(filename, IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
|
||||
|
||||
Mat mask;
|
||||
declare.in(frame).time(90);
|
||||
Ptr<SIFT> detector = SIFT::create();
|
||||
vector<KeyPoint> points;
|
||||
Mat descriptors;
|
||||
|
||||
PERF_SAMPLE_BEGIN();
|
||||
detector->detectAndCompute(frame, mask, points, descriptors, false);
|
||||
PERF_SAMPLE_END();
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, SIFT_detect,
|
||||
testing::Values(SIFT_IMAGES)
|
||||
);
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, SIFT_extract,
|
||||
testing::Values(SIFT_IMAGES)
|
||||
);
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, SIFT_full,
|
||||
testing::Values(SIFT_IMAGES)
|
||||
);
|
||||
|
||||
}} // namespace
|
1190
modules/features2d/src/sift.cpp
Normal file
1190
modules/features2d/src/sift.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -167,6 +167,9 @@ TEST_P(DescriptorScaleInvariance, scale)
|
||||
* Descriptors's rotation invariance check
|
||||
*/
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(SIFT, DescriptorRotationInvariance,
|
||||
Value(IMAGE_TSUKUBA, SIFT::create(), SIFT::create(), 0.98f));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(BRISK, DescriptorRotationInvariance,
|
||||
Value(IMAGE_TSUKUBA, BRISK::create(), BRISK::create(), 0.99f));
|
||||
|
||||
@ -183,6 +186,10 @@ INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DescriptorRotationInvariance,
|
||||
* Descriptor's scale invariance check
|
||||
*/
|
||||
|
||||
// TODO: Expected: (descInliersRatio) >= (minInliersRatio), actual: 0.330378 vs 0.78
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_SIFT, DescriptorScaleInvariance,
|
||||
Value(IMAGE_BIKES, SIFT::create(), SIFT::create(), 0.78f));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(AKAZE, DescriptorScaleInvariance,
|
||||
Value(IMAGE_BIKES, AKAZE::create(), AKAZE::create(), 0.6f));
|
||||
|
||||
|
@ -342,6 +342,13 @@ private:
|
||||
* Tests registrations *
|
||||
\****************************************************************************************/
|
||||
|
||||
TEST( Features2d_DescriptorExtractor_SIFT, regression )
|
||||
{
|
||||
CV_DescriptorExtractorTest<L1<float> > test( "descriptor-sift", 1.0f,
|
||||
SIFT::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_DescriptorExtractor_BRISK, regression )
|
||||
{
|
||||
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk",
|
||||
@ -388,7 +395,7 @@ TEST( Features2d_DescriptorExtractor_AKAZE_DESCRIPTOR_KAZE, regression )
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_DescriptorExtractor, batch )
|
||||
TEST( Features2d_DescriptorExtractor, batch_ORB )
|
||||
{
|
||||
string path = string(cvtest::TS::ptr()->get_data_path() + "detectors_descriptors_evaluation/images_datasets/graf");
|
||||
vector<Mat> imgs, descriptors;
|
||||
@ -416,6 +423,35 @@ TEST( Features2d_DescriptorExtractor, batch )
|
||||
}
|
||||
}
|
||||
|
||||
TEST( Features2d_DescriptorExtractor, batch_SIFT )
|
||||
{
|
||||
string path = string(cvtest::TS::ptr()->get_data_path() + "detectors_descriptors_evaluation/images_datasets/graf");
|
||||
vector<Mat> imgs, descriptors;
|
||||
vector<vector<KeyPoint> > keypoints;
|
||||
int i, n = 6;
|
||||
Ptr<SIFT> sift = SIFT::create();
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
string imgname = format("%s/img%d.png", path.c_str(), i+1);
|
||||
Mat img = imread(imgname, 0);
|
||||
imgs.push_back(img);
|
||||
}
|
||||
|
||||
sift->detect(imgs, keypoints);
|
||||
sift->compute(imgs, keypoints, descriptors);
|
||||
|
||||
ASSERT_EQ((int)keypoints.size(), n);
|
||||
ASSERT_EQ((int)descriptors.size(), n);
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
EXPECT_GT((int)keypoints[i].size(), 100);
|
||||
EXPECT_GT(descriptors[i].rows, 100);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class DescriptorImage : public TestWithParam<std::string>
|
||||
{
|
||||
protected:
|
||||
|
@ -220,6 +220,9 @@ TEST_P(DetectorScaleInvariance, scale)
|
||||
* Detector's rotation invariance check
|
||||
*/
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(SIFT, DetectorRotationInvariance,
|
||||
Value(IMAGE_TSUKUBA, SIFT::create(), 0.45f, 0.70f));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(BRISK, DetectorRotationInvariance,
|
||||
Value(IMAGE_TSUKUBA, BRISK::create(), 0.45f, 0.76f));
|
||||
|
||||
@ -236,6 +239,10 @@ INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DetectorRotationInvariance,
|
||||
* Detector's scale invariance check
|
||||
*/
|
||||
|
||||
// TODO: Expected: (keyPointMatchesRatio) >= (minKeyPointMatchesRatio), actual: 0.596752 vs 0.69
|
||||
INSTANTIATE_TEST_CASE_P(DISABLED_SIFT, DetectorScaleInvariance,
|
||||
Value(IMAGE_BIKES, SIFT::create(), 0.69f, 0.98f));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(BRISK, DetectorScaleInvariance,
|
||||
Value(IMAGE_BIKES, BRISK::create(), 0.08f, 0.49f));
|
||||
|
||||
|
@ -245,6 +245,12 @@ void CV_FeatureDetectorTest::run( int /*start_from*/ )
|
||||
* Tests registrations *
|
||||
\****************************************************************************************/
|
||||
|
||||
TEST( Features2d_Detector_SIFT, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-sift", SIFT::create() );
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
TEST( Features2d_Detector_BRISK, regression )
|
||||
{
|
||||
CV_FeatureDetectorTest test( "detector-brisk", BRISK::create() );
|
||||
|
@ -177,4 +177,11 @@ TEST(Features2d_Detector_Keypoints_AKAZE, validation)
|
||||
test_mldb.safe_run();
|
||||
}
|
||||
|
||||
TEST(Features2d_Detector_Keypoints_SIFT, validation)
|
||||
{
|
||||
CV_FeatureDetectorKeypointsTest test(SIFT::create());
|
||||
test.safe_run();
|
||||
}
|
||||
|
||||
|
||||
}} // namespace
|
||||
|
@ -51,7 +51,6 @@ using namespace cv::cuda;
|
||||
#ifdef HAVE_OPENCV_XFEATURES2D
|
||||
#include "opencv2/xfeatures2d.hpp"
|
||||
using xfeatures2d::SURF;
|
||||
using xfeatures2d::SIFT;
|
||||
#else
|
||||
# if defined(_MSC_VER)
|
||||
# pragma warning(disable:4702) // unreachable code
|
||||
@ -487,14 +486,8 @@ void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
||||
|
||||
SiftFeaturesFinder::SiftFeaturesFinder()
|
||||
{
|
||||
#ifdef HAVE_OPENCV_XFEATURES2D
|
||||
Ptr<SIFT> sift_ = SIFT::create();
|
||||
if( !sift_ )
|
||||
CV_Error( Error::StsNotImplemented, "OpenCV was built without SIFT support" );
|
||||
sift = sift_;
|
||||
#else
|
||||
CV_Error( Error::StsNotImplemented, "OpenCV was built without SIFT support" );
|
||||
#endif
|
||||
}
|
||||
|
||||
void SiftFeaturesFinder::find(InputArray image, ImageFeatures &features)
|
||||
|
@ -323,15 +323,8 @@ void createFeatures(const std::string &featureName, int numKeypoints, cv::Ptr<cv
|
||||
}
|
||||
else if (featureName == "SIFT")
|
||||
{
|
||||
#if defined (OPENCV_ENABLE_NONFREE) && defined (HAVE_OPENCV_XFEATURES2D)
|
||||
detector = cv::xfeatures2d::SIFT::create();
|
||||
descriptor = cv::xfeatures2d::SIFT::create();
|
||||
#else
|
||||
std::cout << "xfeatures2d module is not available or nonfree is not enabled." << std::endl;
|
||||
std::cout << "Default to ORB." << std::endl;
|
||||
detector = cv::ORB::create(numKeypoints);
|
||||
descriptor = cv::ORB::create(numKeypoints);
|
||||
#endif
|
||||
detector = cv::SIFT::create();
|
||||
descriptor = cv::SIFT::create();
|
||||
}
|
||||
else if (featureName == "SURF")
|
||||
{
|
||||
|
@ -29,7 +29,7 @@ FLANN_INDEX_LSH = 6
|
||||
def init_feature(name):
|
||||
chunks = name.split('-')
|
||||
if chunks[0] == 'sift':
|
||||
detector = cv.xfeatures2d.SIFT_create()
|
||||
detector = cv.SIFT_create()
|
||||
norm = cv.NORM_L2
|
||||
elif chunks[0] == 'surf':
|
||||
detector = cv.xfeatures2d.SURF_create(800)
|
||||
|
Loading…
Reference in New Issue
Block a user