mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 14:13:15 +08:00
Added Features2D descriptor tutorial + drawMatches in rst
This commit is contained in:
parent
e6d308ca89
commit
07754b6309
10
doc/conf.py
10
doc/conf.py
@ -358,7 +358,15 @@ extlinks = {'cvt_color': ('http://opencv.willowgarage.com/documentation/cpp/imgp
|
||||
'xmlymlpers':('http://opencv.itseez.com/modules/core/doc/xml_yaml_persistence.html#%s', None),
|
||||
'huivideo' : ('http://opencv.itseez.com/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None),
|
||||
'filtering':('http://opencv.itseez.com/modules/imgproc/doc/filtering.html#%s', None),
|
||||
'point_polygon_test' : ('http://opencv.willowgarage.com/documentation/cpp/imgproc_structural_analysis_and_shape_descriptors.html#cv-pointpolygontest%s', None)
|
||||
'point_polygon_test' : ('http://opencv.willowgarage.com/documentation/cpp/imgproc_structural_analysis_and_shape_descriptors.html#cv-pointpolygontest%s', None),
|
||||
'feature_detector' : ( 'http://opencv.willowgarage.com/documentation/cpp/features2d_common_interfaces_of_feature_detectors.html#featuredetector%s', None),
|
||||
'feature_detector_detect' : ('http://opencv.willowgarage.com/documentation/cpp/features2d_common_interfaces_of_feature_detectors.html#cv-featuredetector-detect%s', None ),
|
||||
'surf_feature_detector' : ('http://opencv.willowgarage.com/documentation/cpp/features2d_common_interfaces_of_feature_detectors.html#surffeaturedetector%s', None ),
|
||||
'draw_keypoints' : ('http://opencv.willowgarage.com/documentation/cpp/features2d_drawing_function_of_keypoints_and_matches.html#cv-drawkeypoints%s', None ),
|
||||
'descriptor_extractor': ( 'http://opencv.willowgarage.com/documentation/cpp/features2d_common_interfaces_of_descriptor_extractors.html#descriptorextractor%s', None ),
|
||||
'descriptor_extractor_compute' : ( 'http://opencv.willowgarage.com/documentation/cpp/features2d_common_interfaces_of_descriptor_extractors.html#cv-descriptorextractor-compute%s', None ),
|
||||
'surf_descriptor_extractor' : ( 'http://opencv.willowgarage.com/documentation/cpp/features2d_common_interfaces_of_descriptor_extractors.html#surfdescriptorextractor%s', None ),
|
||||
'draw_matches' : ( 'http://opencv.willowgarage.com/documentation/cpp/features2d_drawing_function_of_keypoints_and_matches.html#cv-drawmatches%s', None )
|
||||
}
|
||||
|
||||
|
||||
|
@ -0,0 +1,103 @@
|
||||
.. _feature_description:
|
||||
|
||||
Feature Description
|
||||
*******************
|
||||
|
||||
Goal
|
||||
=====
|
||||
|
||||
In this tutorial you will learn how to:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
* Use the :descriptor_extractor:`DescriptorExtractor<>` interface in order to find the feature vector correspondent to the keypoints. Specifically:
|
||||
|
||||
* Use :surf_descriptor_extractor:`SurfDescriptorExtractor<>` and its function :descriptor_extractor:`compute<>` to perform the required calculations.
|
||||
* Use the function :draw_matches:`drawMatches<>` to draw the detected matches.
|
||||
|
||||
|
||||
Theory
|
||||
======
|
||||
|
||||
Code
|
||||
====
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
void readme();
|
||||
|
||||
/** @function main */
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
if( argc != 3 )
|
||||
{ return -1; }
|
||||
|
||||
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
|
||||
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
|
||||
|
||||
if( !img_1.data || !img_2.data )
|
||||
{ return -1; }
|
||||
|
||||
//-- Step 1: Detect the keypoints using SURF Detector
|
||||
int minHessian = 400;
|
||||
|
||||
SurfFeatureDetector detector( minHessian );
|
||||
|
||||
std::vector<KeyPoint> keypoints_1, keypoints_2;
|
||||
|
||||
detector.detect( img_1, keypoints_1 );
|
||||
detector.detect( img_2, keypoints_2 );
|
||||
|
||||
//-- Step 2: Calculate descriptors (feature vectors)
|
||||
SurfDescriptorExtractor extractor;
|
||||
|
||||
Mat descriptors_1, descriptors_2;
|
||||
|
||||
extractor.compute( img_1, keypoints_1, descriptors_1 );
|
||||
extractor.compute( img_2, keypoints_2, descriptors_2 );
|
||||
|
||||
//-- Step 3: Matching descriptor vectors with a brute force matcher
|
||||
BruteForceMatcher< L2<float> > matcher;
|
||||
std::vector< DMatch > matches;
|
||||
matcher.match( descriptors_1, descriptors_2, matches );
|
||||
|
||||
//-- Draw matches
|
||||
Mat img_matches;
|
||||
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
|
||||
|
||||
//-- Show detected matches
|
||||
imshow("Matches", img_matches );
|
||||
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @function readme */
|
||||
void readme()
|
||||
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
|
||||
|
||||
Explanation
|
||||
============
|
||||
|
||||
Result
|
||||
======
|
||||
|
||||
#. Here is the result after applying the BruteForce matcher between the two original images:
|
||||
|
||||
.. image:: images/Feature_Description_BruteForce_Result.jpg
|
||||
:align: center
|
||||
:height: 200pt
|
||||
|
||||
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 117 KiB |
@ -0,0 +1,97 @@
|
||||
.. _feature_detection:
|
||||
|
||||
Feature Detection
|
||||
******************
|
||||
|
||||
Goal
|
||||
=====
|
||||
|
||||
In this tutorial you will learn how to:
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
* Use the :feature_detector:`FeatureDetector<>` interface in order to find interest points. Specifically:
|
||||
|
||||
* Use the :surf_feature_detector:`SurfFeatureDetector<>` and its function :feature_detector_detect:`detect<>` to perform the detection process
|
||||
* Use the function :draw_keypoints:`drawKeypoints<>` to draw the detected keypoints
|
||||
|
||||
|
||||
Theory
|
||||
======
|
||||
|
||||
Code
|
||||
====
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from `here <https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
void readme();
|
||||
|
||||
/** @function main */
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
if( argc != 3 )
|
||||
{ readme(); return -1; }
|
||||
|
||||
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
|
||||
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
|
||||
|
||||
if( !img_1.data || !img_2.data )
|
||||
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
|
||||
|
||||
//-- Step 1: Detect the keypoints using SURF Detector
|
||||
int minHessian = 400;
|
||||
|
||||
SurfFeatureDetector detector( minHessian );
|
||||
|
||||
std::vector<KeyPoint> keypoints_1, keypoints_2;
|
||||
|
||||
detector.detect( img_1, keypoints_1 );
|
||||
detector.detect( img_2, keypoints_2 );
|
||||
|
||||
//-- Draw keypoints
|
||||
Mat img_keypoints_1; Mat img_keypoints_2;
|
||||
|
||||
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
|
||||
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
|
||||
|
||||
//-- Show detected (drawn) keypoints
|
||||
imshow("Keypoints 1", img_keypoints_1 );
|
||||
imshow("Keypoints 2", img_keypoints_2 );
|
||||
|
||||
waitKey(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @function readme */
|
||||
void readme()
|
||||
{ std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }
|
||||
|
||||
Explanation
|
||||
============
|
||||
|
||||
Result
|
||||
======
|
||||
|
||||
#. Here is the result of the feature detection applied to the first image:
|
||||
|
||||
.. image:: images/Feature_Detection_Result_a.jpg
|
||||
:align: center
|
||||
:height: 125pt
|
||||
|
||||
#. And here is the result for the second image:
|
||||
|
||||
.. image:: images/Feature_Detection_Result_b.jpg
|
||||
:align: center
|
||||
:height: 200pt
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 73 KiB |
Binary file not shown.
After Width: | Height: | Size: 34 KiB |
@ -3,7 +3,7 @@
|
||||
*feature2d* module. 2D Features framework
|
||||
-----------------------------------------------------------
|
||||
|
||||
Learn about how to use the feature points detectors, descriptors and matching framework found inside OpenCV.
|
||||
Learn about how to use the feature points detectors, descriptors and matching framework found inside OpenCV.ddddddd
|
||||
|
||||
.. include:: ../../definitions/tocDefinitions.rst
|
||||
|
||||
@ -86,6 +86,84 @@ Learn about how to use the feature points detectors, descriptors and matching f
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
+
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
===================== ==============================================
|
||||
|FeatureDetect| **Title:** :ref:`feature_detection`
|
||||
|
||||
*Compatibility:* > OpenCV 2.0
|
||||
|
||||
*Author:* |Author_AnaH|
|
||||
|
||||
In this tutorial, you will use *features2d* to detect interest points.
|
||||
|
||||
===================== ==============================================
|
||||
|
||||
.. |FeatureDetect| image:: images/Feature_Detection_Tutorial_Cover.jpg
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
|
||||
+
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
===================== ==============================================
|
||||
|FeatureDescript| **Title:** :ref:`feature_description`
|
||||
|
||||
*Compatibility:* > OpenCV 2.0
|
||||
|
||||
*Author:* |Author_AnaH|
|
||||
|
||||
In this tutorial, you will use *features2d* to calculate feature vectors.
|
||||
|
||||
===================== ==============================================
|
||||
|
||||
.. |FeatureDescript| image:: images/Feature_Description_Tutorial_Cover.jpg
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
+
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
===================== ==============================================
|
||||
|FeatureFlann| **Title:** :ref:`feature_flann_matcher`
|
||||
|
||||
*Compatibility:* > OpenCV 2.0
|
||||
|
||||
*Author:* |Author_AnaH|
|
||||
|
||||
In this tutorial, you will use *features2d* to detect interest points.
|
||||
|
||||
===================== ==============================================
|
||||
|
||||
.. |FeatureFlann| image:: images/Feature_Detection_Tutorial_Cover.jpg
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
+
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
||||
===================== ==============================================
|
||||
|FeatureHomo| **Title:** :ref:`feature_homography`
|
||||
|
||||
*Compatibility:* > OpenCV 2.0
|
||||
|
||||
*Author:* |Author_AnaH|
|
||||
|
||||
In this tutorial, you will use *features2d* to detect interest points.
|
||||
|
||||
===================== ==============================================
|
||||
|
||||
.. |FeatureHomo| image:: images/Feature_Detection_Tutorial_Cover.jpg
|
||||
:height: 90pt
|
||||
:width: 90pt
|
||||
|
||||
|
||||
+
|
||||
.. tabularcolumns:: m{100pt} m{300pt}
|
||||
.. cssclass:: toctableopencv
|
||||
|
Loading…
Reference in New Issue
Block a user