mirror of
https://github.com/opencv/opencv.git
synced 2024-11-28 05:06:29 +08:00
Added features2d section
This commit is contained in:
parent
1a0d41fb53
commit
afd42eed4b
Binary file not shown.
@ -98,8 +98,8 @@ backgroundcolor=\color{shadecolor}
|
||||
\Pyfalse
|
||||
\CPyfalse
|
||||
\def\targetlang{cpp}
|
||||
\part{C++ API Reference}
|
||||
\input{opencv_guide_body}
|
||||
\part{C++ API User Guide}
|
||||
\input{user_guide/opencv_guide_body}
|
||||
|
||||
\addcontentsline{toc}{part}{Index}
|
||||
\printindex
|
||||
|
@ -1,5 +1,7 @@
|
||||
|
||||
\chapter{cv::Mat. Operations with images.}
|
||||
\renewcommand{\curModule}{cv::Mat. Operations with images.}
|
||||
\input{user_mat}
|
||||
|
||||
\input{user_guide/user_mat}
|
||||
\chapter{Features2d.}
|
||||
\renewcommand{\curModule}{Features2d}
|
||||
\input{user_guide/user_features2d}
|
||||
|
97
doc/user_guide/user_features2d.tex
Normal file
97
doc/user_guide/user_features2d.tex
Normal file
@ -0,0 +1,97 @@
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% %
|
||||
% C++ %
|
||||
% %
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\ifCpp
|
||||
\section{Detectors}
|
||||
\section{Descriptors}
|
||||
\section{Matching keypoints}
|
||||
\subsection{The code}
|
||||
We will start with a short sample opencv/samples/cpp/matcher\_simple.cpp:
|
||||
|
||||
\begin{lstlisting}
|
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
|
||||
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
|
||||
if(img1.empty() || img2.empty())
|
||||
{
|
||||
printf("Can't read one of the images\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// detecting keypoints
|
||||
SurfFeatureDetector detector(400);
|
||||
vector<KeyPoint> keypoints1, keypoints2;
|
||||
detector.detect(img1, keypoints1);
|
||||
detector.detect(img2, keypoints2);
|
||||
|
||||
// computing descriptors
|
||||
SurfDescriptorExtractor extractor;
|
||||
Mat descriptors1, descriptors2;
|
||||
extractor.compute(img1, keypoints1, descriptors1);
|
||||
extractor.compute(img2, keypoints2, descriptors2);
|
||||
|
||||
// matching descriptors
|
||||
BruteForceMatcher<L2<float> > matcher;
|
||||
vector<DMatch> matches;
|
||||
matcher.match(descriptors1, descriptors2, matches);
|
||||
|
||||
// drawing the results
|
||||
namedWindow("matches", 1);
|
||||
Mat img_matches;
|
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
|
||||
imshow("matches", img_matches);
|
||||
waitKey(0);
|
||||
\end{lstlisting}
|
||||
|
||||
\subsection{The code explained}
|
||||
Let us break the code down.
|
||||
\begin{lstlisting}
|
||||
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
|
||||
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
|
||||
if(img1.empty() || img2.empty())
|
||||
{
|
||||
printf("Can't read one of the images\n");
|
||||
return -1;
|
||||
}
|
||||
\end{lstlisting}
|
||||
We load two images and check if they are loaded correctly.
|
||||
|
||||
\begin{lstlisting}
|
||||
// detecting keypoints
|
||||
FastFeatureDetector detector(15);
|
||||
vector<KeyPoint> keypoints1, keypoints2;
|
||||
detector.detect(img1, keypoints1);
|
||||
detector.detect(img2, keypoints2);
|
||||
\end{lstlisting}
|
||||
First, we create an instance of a keypoint detector. All detectors inherit the abstract FeatureDetector interface, but the constructors are algorithm-dependent. The first argument to each detector usually controls the balance between the amount of keypoints and their stability. The range of values is different for different detectors \footnote{For instance, FAST threshold has the meaning of pixel intensity difference and usually varies in the region \([0,40]\). SURF threshold is applied to a Hessian of an image and usually takes on values larger than \(100\).} so use defaults in case of doubt.
|
||||
|
||||
\begin{lstlisting}
|
||||
// computing descriptors
|
||||
SurfDescriptorExtractor extractor;
|
||||
Mat descriptors1, descriptors2;
|
||||
extractor.compute(img1, keypoints1, descriptors1);
|
||||
extractor.compute(img2, keypoints2, descriptors2);
|
||||
\end{lstlisting}
|
||||
We create an instance of descriptor extractor. The most of OpenCV descriptors inherit DescriptorExtractor abstract interface. Then we compute descriptors for each of the keypoints. The output \texttt{Mat} of the \texttt{DescriptorExtractor::compute} method contains a descriptor in a row \(i\) for each \(i\)-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count).
|
||||
|
||||
\begin{lstlisting}
|
||||
// matching descriptors
|
||||
BruteForceMatcher<L2<float> > matcher;
|
||||
vector<DMatch> matches;
|
||||
matcher.match(descriptors1, descriptors2, matches);
|
||||
\end{lstlisting}
|
||||
Now that we have descriptors for both images, we can match them. First, we create a matcher that for each descriptor from image 2 does exhaustive search for the nearest descriptor in image 1 using Eucledian metric. Manhattan distance is also implemented as well as a Hamming distance for Brief descriptor. The output vector \texttt{matches} contains pairs of corresponding points indices.
|
||||
|
||||
\begin{lstlisting}
|
||||
// drawing the results
|
||||
namedWindow("matches", 1);
|
||||
Mat img_matches;
|
||||
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
|
||||
imshow("matches", img_matches);
|
||||
waitKey(0);
|
||||
\end{lstlisting}
|
||||
The final part of the sample is about visualizing the matching results.
|
||||
\fi
|
Loading…
Reference in New Issue
Block a user