opencv/doc/features2d_common_detection_description.tex

1410 lines
51 KiB
TeX

\ifCpp
\section{Common Interfaces of Feature Detectors}
Feature detectors in OpenCV have wrappers with common interface that enables to switch easily
between different algorithms solving the same problem. All objects that implement keypoint detectors
inherit \cvCppCross{FeatureDetector} interface.
\cvclass{KeyPoint}
Data structure for salient point detectors.
\begin{lstlisting}
class KeyPoint
{
public:
// the default constructor
KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0),
class_id(-1) {}
// the full constructor
KeyPoint(Point2f _pt, float _size, float _angle=-1,
float _response=0, int _octave=0, int _class_id=-1)
: pt(_pt), size(_size), angle(_angle), response(_response),
octave(_octave), class_id(_class_id) {}
// another form of the full constructor
KeyPoint(float x, float y, float _size, float _angle=-1,
float _response=0, int _octave=0, int _class_id=-1)
: pt(x, y), size(_size), angle(_angle), response(_response),
octave(_octave), class_id(_class_id) {}
// converts vector of keypoints to vector of points
static void convert(const std::vector<KeyPoint>& keypoints,
std::vector<Point2f>& points2f,
const std::vector<int>& keypointIndexes=std::vector<int>());
// converts vector of points to the vector of keypoints, where each
// keypoint is assigned the same size and the same orientation
static void convert(const std::vector<Point2f>& points2f,
std::vector<KeyPoint>& keypoints,
float size=1, float response=1, int octave=0,
int class_id=-1);
// computes overlap for pair of keypoints;
// overlap is a ratio between area of keypoint regions intersection and
// area of keypoint regions union (now keypoint region is circle)
static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);
Point2f pt; // coordinates of the keypoints
float size; // diameter of the meaningfull keypoint neighborhood
float angle; // computed orientation of the keypoint (-1 if not applicable)
float response; // the response by which the most strong keypoints
// have been selected. Can be used for the further sorting
// or subsampling
int octave; // octave (pyramid layer) from which the keypoint has been extracted
int class_id; // object class (if the keypoints need to be clustered by
// an object they belong to)
};
// writes vector of keypoints to the file storage
void write(FileStorage& fs, const string& name, const vector<KeyPoint>& keypoints);
// reads vector of keypoints from the specified file storage node
void read(const FileNode& node, CV_OUT vector<KeyPoint>& keypoints);
\end{lstlisting}
\cvclass{FeatureDetector}
Abstract base class for 2D image feature detectors.
\begin{lstlisting}
class CV_EXPORTS FeatureDetector
{
public:
virtual ~FeatureDetector();
void detect( const Mat& image, vector<KeyPoint>& keypoints,
const Mat& mask=Mat() ) const;
void detect( const vector<Mat>& images,
vector<vector<KeyPoint> >& keypoints,
const vector<Mat>& masks=vector<Mat>() ) const;
virtual void read(const FileNode&);
virtual void write(FileStorage&) const;
static Ptr<FeatureDetector> create( const string& detectorType );
protected:
...
};
\end{lstlisting}
\cvCppFunc{FeatureDetector::detect}
Detect keypoints in an image (first variant) or image set (second variant).
\cvdefCpp{
void FeatureDetector::detect( const Mat\& image,
\par vector<KeyPoint>\& keypoints,
\par const Mat\& mask=Mat() ) const;
}
\begin{description}
\cvarg{image}{The image.}
\cvarg{keypoints}{The detected keypoints.}
\cvarg{mask}{Mask specifying where to look for keypoints (optional). Must be a char matrix
with non-zero values in the region of interest.}
\end{description}
\cvdefCpp{
void FeatureDetector::detect( const vector<Mat>\& images,
\par vector<vector<KeyPoint> >\& keypoints,
\par const vector<Mat>\& masks=vector<Mat>() ) const;
}
\begin{description}
\cvarg{images}{Images set.}
\cvarg{keypoints}{Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i].}
\cvarg{masks}{Masks for each input image specifying where to look for keypoints (optional). masks[i] is a mask for images[i].
Each element of \texttt{masks} vector must be a char matrix with non-zero values in the region of interest.}
\end{description}
\cvCppFunc{FeatureDetector::read}
Read feature detector object from file node.
\cvdefCpp{
void FeatureDetector::read( const FileNode\& fn );
}
\begin{description}
\cvarg{fn}{File node from which detector will be read.}
\end{description}
\cvCppFunc{FeatureDetector::write}
Write feature detector object to file storage.
\cvdefCpp{
void FeatureDetector::write( FileStorage\& fs ) const;
}
\begin{description}
\cvarg{fs}{File storage in which detector will be written.}
\end{description}
\cvCppFunc{FeatureDetector::create}
Feature detector factory that creates \cvCppCross{FeatureDetector} of given type with
default parameters (rather using default constructor).
\begin{lstlisting}
Ptr<FeatureDetector> FeatureDetector::create( const string& detectorType );
\end{lstlisting}
\begin{description}
\cvarg{detectorType}{Feature detector type.}
\end{description}
Now the following detector types are supported:\\
\texttt{"FAST"} -- \cvCppCross{FastFeatureDetector},\\
\texttt{"STAR"} -- \cvCppCross{StarFeatureDetector},\\
\texttt{"SIFT"} -- \cvCppCross{SiftFeatureDetector}, \\
\texttt{"SURF"} -- \cvCppCross{SurfFeatureDetector}, \\
\texttt{"MSER"} -- \cvCppCross{MserFeatureDetector}, \\
\texttt{"GFTT"} -- \cvCppCross{GfttFeatureDetector}, \\
\texttt{"HARRIS"} -- \cvCppCross{HarrisFeatureDetector}. \\
Also combined format is supported: feature detector adapter name (\texttt{"Grid"} --
\cvCppCross{GridAdaptedFeatureDetector}, \texttt{"Pyramid"} --
\cvCppCross{PyramidAdaptedFeatureDetector}) + feature detector name (see above),
e.g. \texttt{"GridFAST"}, \texttt{"PyramidSTAR"}, etc.
\cvclass{FastFeatureDetector}
Wrapping class for feature detection using \cvCppCross{FAST} method.
\begin{lstlisting}
class FastFeatureDetector : public FeatureDetector
{
public:
FastFeatureDetector( int threshold=1, bool nonmaxSuppression=true );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{GoodFeaturesToTrackDetector}
Wrapping class for feature detection using \cvCppCross{goodFeaturesToTrack} function.
\begin{lstlisting}
class GoodFeaturesToTrackDetector : public FeatureDetector
{
public:
class Params
{
public:
Params( int maxCorners=1000, double qualityLevel=0.01,
double minDistance=1., int blockSize=3,
bool useHarrisDetector=false, double k=0.04 );
void read( const FileNode& fn );
void write( FileStorage& fs ) const;
int maxCorners;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double k;
};
GoodFeaturesToTrackDetector( const GoodFeaturesToTrackDetector::Params& params=
GoodFeaturesToTrackDetector::Params() );
GoodFeaturesToTrackDetector( int maxCorners, double qualityLevel,
double minDistance, int blockSize=3,
bool useHarrisDetector=false, double k=0.04 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{MserFeatureDetector}
Wrapping class for feature detection using \cvCppCross{MSER} class.
\begin{lstlisting}
class MserFeatureDetector : public FeatureDetector
{
public:
MserFeatureDetector( CvMSERParams params=cvMSERParams() );
MserFeatureDetector( int delta, int minArea, int maxArea,
double maxVariation, double minDiversity,
int maxEvolution, double areaThreshold,
double minMargin, int edgeBlurSize );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{StarFeatureDetector}
Wrapping class for feature detection using \cvCppCross{StarDetector} class.
\begin{lstlisting}
class StarFeatureDetector : public FeatureDetector
{
public:
StarFeatureDetector( int maxSize=16, int responseThreshold=30,
int lineThresholdProjected = 10,
int lineThresholdBinarized=8, int suppressNonmaxSize=5 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{SiftFeatureDetector}
Wrapping class for feature detection using \cvCppCross{SIFT} class.
\begin{lstlisting}
class SiftFeatureDetector : public FeatureDetector
{
public:
SiftFeatureDetector(
const SIFT::DetectorParams& detectorParams=SIFT::DetectorParams(),
const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
SiftFeatureDetector( double threshold, double edgeThreshold,
int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{SurfFeatureDetector}
Wrapping class for feature detection using \cvCppCross{SURF} class.
\begin{lstlisting}
class SurfFeatureDetector : public FeatureDetector
{
public:
SurfFeatureDetector( double hessianThreshold = 400., int octaves = 3,
int octaveLayers = 4 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{GridAdaptedFeatureDetector}
Adapts a detector to partition the source image into a grid and detect
points in each cell.
\begin{lstlisting}
class GridAdaptedFeatureDetector : public FeatureDetector
{
public:
/*
* detector Detector that will be adapted.
* maxTotalKeypoints Maximum count of keypoints detected on the image.
* Only the strongest keypoints will be keeped.
* gridRows Grid rows count.
* gridCols Grid column count.
*/
GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
int maxTotalKeypoints, int gridRows=4,
int gridCols=4 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
\cvclass{PyramidAdaptedFeatureDetector}
Adapts a detector to detect points over multiple levels of a Gaussian
pyramid. Useful for detectors that are not inherently scaled.
\begin{lstlisting}
class PyramidAdaptedFeatureDetector : public FeatureDetector
{
public:
PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
int levels=2 );
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
protected:
...
};
\end{lstlisting}
%dynamic detectors doc
\input{features2d_dynamic_detectors}
\section{Common Interfaces of Descriptor Extractors}
Extractors of keypoint descriptors in OpenCV have wrappers with common interface that enables to switch easily
between different algorithms solving the same problem. This section is devoted to computing descriptors
that are represented as vectors in a multidimensional space. All objects that implement ''vector''
descriptor extractors inherit \cvCppCross{DescriptorExtractor} interface.
\cvclass{DescriptorExtractor}
Abstract base class for computing descriptors for image keypoints.
\begin{lstlisting}
class CV_EXPORTS DescriptorExtractor
{
public:
virtual ~DescriptorExtractor();
void compute( const Mat& image, vector<KeyPoint>& keypoints,
Mat& descriptors ) const;
void compute( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints,
vector<Mat>& descriptors ) const;
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const = 0;
virtual int descriptorType() const = 0;
static Ptr<DescriptorExtractor> create( const string& descriptorExtractorType );
protected:
...
};
\end{lstlisting}
In this interface we assume a keypoint descriptor can be represented as a
dense, fixed-dimensional vector of some basic type. Most descriptors used
in practice follow this pattern, as it makes it very easy to compute
distances between descriptors. Therefore we represent a collection of
descriptors as a \cvCppCross{Mat}, where each row is one keypoint descriptor.
\cvCppFunc{DescriptorExtractor::compute}
Compute the descriptors for a set of keypoints detected in an image (first variant)
or image set (second variant).
\cvdefCpp{
void DescriptorExtractor::compute( const Mat\& image,
\par vector<KeyPoint>\& keypoints,
\par Mat\& descriptors ) const;
}
\begin{description}
\cvarg{image}{The image.}
\cvarg{keypoints}{The keypoints. Keypoints for which a descriptor cannot be computed are removed.}
\cvarg{descriptors}{The descriptors. Row i is the descriptor for keypoint i.}
\end{description}
\cvdefCpp{
void DescriptorExtractor::compute( const vector<Mat>\& images,
\par vector<vector<KeyPoint> >\& keypoints,
\par vector<Mat>\& descriptors ) const;
}
\begin{description}
\cvarg{images}{The image set.}
\cvarg{keypoints}{Input keypoints collection. keypoints[i] is keypoints
detected in images[i]. Keypoints for which a descriptor
can not be computed are removed.}
\cvarg{descriptors}{Descriptor collection. descriptors[i] are descriptors computed for
a set keypoints[i].}
\end{description}
\cvCppFunc{DescriptorExtractor::read}
Read descriptor extractor object from file node.
\cvdefCpp{
void DescriptorExtractor::read( const FileNode\& fn );
}
\begin{description}
\cvarg{fn}{File node from which detector will be read.}
\end{description}
\cvCppFunc{DescriptorExtractor::write}
Write descriptor extractor object to file storage.
\cvdefCpp{
void DescriptorExtractor::write( FileStorage\& fs ) const;
}
\begin{description}
\cvarg{fs}{File storage in which detector will be written.}
\end{description}
\cvCppFunc{DescriptorExtractor::create}
Descriptor extractor factory that creates \cvCppCross{DescriptorExtractor} of given type with
default parameters (rather using default constructor).
\begin{lstlisting}
Ptr<DescriptorExtractor>
DescriptorExtractor::create( const string& descriptorExtractorType );
\end{lstlisting}
\begin{description}
\cvarg{descriptorExtractorType}{Descriptor extractor type.}
\end{description}
Now the following descriptor extractor types are supported:\\
\texttt{"SIFT"} -- \cvCppCross{SiftFeatureDetector},\\
\texttt{"SURF"} -- \cvCppCross{SurfFeatureDetector},\\
\texttt{"BRIEF"} -- \cvCppCross{BriefFeatureDetector}.\\
Also combined format is supported: descriptor extractor adapter name (\texttt{"Opponent"} --
\cvCppCross{OpponentColorDescriptorExtractor}) + descriptor extractor name (see above),
e.g. \texttt{"OpponentSIFT"}, etc.
\cvclass{SiftDescriptorExtractor}
Wrapping class for descriptors computing using \cvCppCross{SIFT} class.
\begin{lstlisting}
class SiftDescriptorExtractor : public DescriptorExtractor
{
public:
SiftDescriptorExtractor(
const SIFT::DescriptorParams& descriptorParams=SIFT::DescriptorParams(),
const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
SiftDescriptorExtractor( double magnification, bool isNormalize=true,
bool recalculateAngles=true, int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
\end{lstlisting}
\cvclass{SurfDescriptorExtractor}
Wrapping class for descriptors computing using \cvCppCross{SURF} class.
\begin{lstlisting}
class SurfDescriptorExtractor : public DescriptorExtractor
{
public:
SurfDescriptorExtractor( int nOctaves=4,
int nOctaveLayers=2, bool extended=false );
virtual void read (const FileNode &fn);
virtual void write (FileStorage &fs) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
\end{lstlisting}
\cvclass{CalonderDescriptorExtractor}
Wrapping class for descriptors computing using \cvCppCross{RTreeClassifier} class.
\begin{lstlisting}
template<typename T>
class CalonderDescriptorExtractor : public DescriptorExtractor
{
public:
CalonderDescriptorExtractor( const string& classifierFile );
virtual void read( const FileNode &fn );
virtual void write( FileStorage &fs ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
}
\end{lstlisting}
\cvclass{OpponentColorDescriptorExtractor}
Adapts a descriptor extractor to compute descripors in Opponent Color Space
(refer to van de Sande et al., CGIV 2008 "Color Descriptors for Object Category Recognition").
Input RGB image is transformed in Opponent Color Space. Then unadapted descriptor extractor
(set in constructor) computes descriptors on each of the three channel and concatenate
them into a single color descriptor.
\begin{lstlisting}
class OpponentColorDescriptorExtractor : public DescriptorExtractor
{
public:
OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
};
\end{lstlisting}
\cvclass{BriefDescriptorExtractor}
Class for computing BRIEF descriptors described in paper of Calonder M., Lepetit V.,
Strecha C., Fua P.: ''BRIEF: Binary Robust Independent Elementary Features.''
11th European Conference on Computer Vision (ECCV), Heraklion, Crete. LNCS Springer, September 2010.
\begin{lstlisting}
class BriefDescriptorExtractor : public DescriptorExtractor
{
public:
static const int PATCH_SIZE = 48;
static const int KERNEL_SIZE = 9;
// bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes.
BriefDescriptorExtractor( int bytes = 32 );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
protected:
...
};
\end{lstlisting}
\section{Common Interfaces of Descriptor Matchers}
Matchers of keypoint descriptors in OpenCV have wrappers with common interface that enables to switch easily
between different algorithms solving the same problem. This section is devoted to matching descriptors
that are represented as vectors in a multidimensional space. All objects that implement ''vector''
descriptor matchers inherit \cvCppCross{DescriptorMatcher} interface.
\cvclass{DMatch}
Match between two keypoint descriptors: query descriptor index,
train descriptor index, train image index and distance between descriptors.
\begin{lstlisting}
struct DMatch
{
DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1),
distance(std::numeric_limits<float>::max()) {}
DMatch( int _queryIdx, int _trainIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1),
distance(_distance) {}
DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx),
distance(_distance) {}
int queryIdx; // query descriptor index
int trainIdx; // train descriptor index
int imgIdx; // train image index
float distance;
// less is better
bool operator<( const DMatch &m ) const;
};
\end{lstlisting}
\cvclass{DescriptorMatcher}
Abstract base class for matching keypoint descriptors. It has two groups
of match methods: for matching descriptors of one image with other image or
with image set.
\begin{lstlisting}
class DescriptorMatcher
{
public:
virtual ~DescriptorMatcher();
virtual void add( const vector<Mat>& descriptors );
const vector<Mat>& getTrainDescriptors() const;
virtual void clear();
bool empty() const;
virtual bool isMaskSupported() const = 0;
virtual void train();
/*
* Group of methods to match descriptors from image pair.
*/
void match( const Mat& queryDescriptors, const Mat& trainDescriptors,
vector<DMatch>& matches, const Mat& mask=Mat() ) const;
void knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors,
vector<vector<DMatch> >& matches, int k,
const Mat& mask=Mat(), bool compactResult=false ) const;
void radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors,
vector<vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const;
/*
* Group of methods to match descriptors from one image to image set.
*/
void match( const Mat& queryDescriptors, vector<DMatch>& matches,
const vector<Mat>& masks=vector<Mat>() );
void knnMatch( const Mat& queryDescriptors, vector<vector<DMatch> >& matches,
int k, const vector<Mat>& masks=vector<Mat>(),
bool compactResult=false );
void radiusMatch( const Mat& queryDescriptors, vector<vector<DMatch> >& matches,
float maxDistance, const vector<Mat>& masks=vector<Mat>(),
bool compactResult=false );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
static Ptr<DescriptorMatcher> create( const string& descriptorMatcherType );
protected:
vector<Mat> trainDescCollection;
...
};
\end{lstlisting}
\cvCppFunc{DescriptorMatcher::add}
Add descriptors to train descriptor collection. If collection \texttt{trainDescCollection} is not empty
the new descriptors are added to existing train descriptors.
\cvdefCpp{
void add( const vector<Mat>\& descriptors );
}
\begin{description}
\cvarg{descriptors}{Descriptors to add. Each \texttt{descriptors[i]} is a set of descriptors
from the same (one) train image.}
\end{description}
\cvCppFunc{DescriptorMatcher::getTrainDescriptors}
Returns constant link to the train descriptor collection (i.e. \texttt{trainDescCollection}).
\cvdefCpp{
const vector<Mat>\& getTrainDescriptors() const;
}
\cvCppFunc{DescriptorMatcher::clear}
Clear train descriptor collection.
\cvdefCpp{
void DescriptorMatcher::clear();
}
\cvCppFunc{DescriptorMatcher::empty}
Return true if there are not train descriptors in collection.
\cvdefCpp{
bool DescriptorMatcher::empty() const;
}
\cvCppFunc{DescriptorMatcher::isMaskSupported}
Returns true if descriptor matcher supports masking permissible matches.
\cvdefCpp{
bool DescriptorMatcher::isMaskSupported();
}
\cvCppFunc{DescriptorMatcher::train}
Train descriptor matcher (e.g. train flann index). In all methods to match the method train()
is run every time before matching. Some descriptor matchers (e.g. BruteForceMatcher) have empty
implementation of this method, other matchers realy train their inner structures (e.g. FlannBasedMatcher
trains flann::Index)
\cvdefCpp{
void DescriptorMatcher::train();
}
\cvCppFunc{DescriptorMatcher::match}
Find the best match for each descriptor from a query set with train descriptors.
Supposed that the query descriptors are of keypoints detected on the same query image.
In first variant of this method train descriptors are set as input argument and
supposed that they are of keypoints detected on the same train image. In second variant
of the method train descriptors collection that was set using \texttt{add} method is used.
Optional mask (or masks) can be set to describe which descriptors can be matched.
\texttt{queryDescriptors[i]} can be matched with \texttt{trainDescriptors[j]} only if
\texttt{mask.at<uchar>(i,j)} is non-zero.
\cvdefCpp{
void DescriptorMatcher::match( const Mat\& queryDescriptors,
\par const Mat\& trainDescriptors,
\par vector<DMatch>\& matches,
\par const Mat\& mask=Mat() ) const;
}
\cvdefCpp{
void DescriptorMatcher::match( const Mat\& queryDescriptors,
\par vector<DMatch>\& matches,
\par const vector<Mat>\& masks=vector<Mat>() );
}
\begin{description}
\cvarg{queryDescriptors}{Query set of descriptors.}
\cvarg{trainDescriptors}{Train set of descriptors. This will not be added to train descriptors collection
stored in class object.}
\cvarg{matches}{Matches. If some query descriptor masked out in \texttt{mask} no match will be added for this descriptor.
So \texttt{matches} size may be less query descriptors count.}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\cvarg{masks}{The set of masks. Each \texttt{masks[i]} specifies permissible matches between input query descriptors
and stored train descriptors from i-th image (i.e. \texttt{trainDescCollection[i])}.}
\end{description}
\cvCppFunc{DescriptorMatcher::knnMatch}
Find the k best matches for each descriptor from a query set with train descriptors.
Found k (or less if not possible) matches are returned in distance increasing order.
Details about query and train descriptors see in \cvCppCross{DescriptorMatcher::match}.
\cvdefCpp{
void DescriptorMatcher::knnMatch( const Mat\& queryDescriptors,
\par const Mat\& trainDescriptors,
\par vector<vector<DMatch> >\& matches,
\par int k, const Mat\& mask=Mat(),
\par bool compactResult=false ) const;
}
\cvdefCpp{
void DescriptorMatcher::knnMatch( const Mat\& queryDescriptors,
\par vector<vector<DMatch> >\& matches, int k,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\begin{description}
\cvarg{queryDescriptors, trainDescriptors, mask, masks}{See in \cvCppCross{DescriptorMatcher::match}.}
\cvarg{matches}{Mathes. Each \texttt{matches[i]} is k or less matches for the same query descriptor.}
\cvarg{k}{Count of best matches will be found per each query descriptor (or less if it's not possible).}
\cvarg{compactResult}{It's used when mask (or masks) is not empty. If \texttt{compactResult} is false
\texttt{matches} vector will have the same size as \texttt{queryDescriptors} rows. If \texttt{compactResult}
is true \texttt{matches} vector will not contain matches for fully masked out query descriptors.}
\end{description}
\cvCppFunc{DescriptorMatcher::radiusMatch}
Find the best matches for each query descriptor which have distance less than given threshold.
Found matches are returned in distance increasing order. Details about query and train
descriptors see in \cvCppCross{DescriptorMatcher::match}.
\cvdefCpp{
void DescriptorMatcher::radiusMatch( const Mat\& queryDescriptors,
\par const Mat\& trainDescriptors,
\par vector<vector<DMatch> >\& matches,
\par float maxDistance, const Mat\& mask=Mat(),
\par bool compactResult=false ) const;
}
\cvdefCpp{
void DescriptorMatcher::radiusMatch( const Mat\& queryDescriptors,
\par vector<vector<DMatch> >\& matches,
\par float maxDistance,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\begin{description}
\cvarg{queryDescriptors, trainDescriptors, mask, masks}{See in \cvCppCross{DescriptorMatcher::match}.}
\cvarg{matches, compactResult}{See in \cvCppCross{DescriptorMatcher::knnMatch}.}
\cvarg{maxDistance}{The threshold to found match distances.}
\end{description}
\cvCppFunc{DescriptorMatcher::clone}
Clone the matcher.
\cvdefCpp{
Ptr<DescriptorMatcher> \\
DescriptorMatcher::clone( bool emptyTrainData ) const;
}
\begin{description}
\cvarg{emptyTrainData}{If emptyTrainData is false the method create deep copy of the object, i.e. copies
both parameters and train data. If emptyTrainData is true the method create object copy with current parameters
but with empty train data..}
\end{description}
\cvCppFunc{DescriptorMatcher::create}
Descriptor matcher factory that creates \cvCppCross{DescriptorMatcher} of
given type with default parameters (rather using default constructor).
\begin{lstlisting}
Ptr<DescriptorMatcher>
DescriptorMatcher::create( const string& descriptorMatcherType );
\end{lstlisting}
\begin{description}
\cvarg{descriptorMatcherType}{Descriptor matcher type.}
\end{description}
Now the following matcher types are supported: \texttt{"BruteForce"} (it uses \texttt{L2}), \texttt{"BruteForce-L1"},
\texttt{"BruteForce-Hamming"}, \texttt{"BruteForce-HammingLUT"}, \texttt{"FlannBased"}.
\cvclass{BruteForceMatcher}
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest
descriptor in the second set by trying each one. This descriptor matcher supports masking
permissible matches between descriptor sets.
\begin{lstlisting}
template<class Distance>
class BruteForceMatcher : public DescriptorMatcher
{
public:
BruteForceMatcher( Distance d = Distance() );
virtual ~BruteForceMatcher();
virtual bool isMaskSupported() const;
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
}
\end{lstlisting}
For efficiency, BruteForceMatcher is templated on the distance metric.
For float descriptors, a common choice would be \texttt{L2<float>}. Class of supported distances are:
\begin{lstlisting}
template<typename T>
struct Accumulator
{
typedef T Type;
};
template<> struct Accumulator<unsigned char> { typedef unsigned int Type; };
template<> struct Accumulator<unsigned short> { typedef unsigned int Type; };
template<> struct Accumulator<char> { typedef int Type; };
template<> struct Accumulator<short> { typedef int Type; };
/*
* Squared Euclidean distance functor
*/
template<class T>
struct L2
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
};
/*
* Manhattan distance (city block distance) functor
*/
template<class T>
struct CV_EXPORTS L1
{
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
ResultType operator()( const T* a, const T* b, int size ) const;
...
};
/*
* Hamming distance (city block distance) functor
*/
struct HammingLUT
{
typedef unsigned char ValueType;
typedef int ResultType;
ResultType operator()( const unsigned char* a, const unsigned char* b,
int size ) const;
...
};
struct Hamming
{
typedef unsigned char ValueType;
typedef int ResultType;
ResultType operator()( const unsigned char* a, const unsigned char* b,
int size ) const;
...
};
\end{lstlisting}
\cvclass{FlannBasedMatcher}
Flann based descriptor matcher. This matcher trains \cvCppCross{flann::Index} on
train descriptor collection and calls it's nearest search methods to find best matches.
So this matcher may be faster in cases of matching to large train collection than
brute force matcher. \texttt{FlannBasedMatcher} does not support masking permissible
matches between descriptor sets, because \cvCppCross{flann::Index} does not
support this.
\begin{lstlisting}
class FlannBasedMatcher : public DescriptorMatcher
{
public:
FlannBasedMatcher(
const Ptr<flann::IndexParams>& indexParams=new flann::KDTreeIndexParams(),
const Ptr<flann::SearchParams>& searchParams=new flann::SearchParams() );
virtual void add( const vector<Mat>& descriptors );
virtual void clear();
virtual void train();
virtual bool isMaskSupported() const;
virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
};
\end{lstlisting}
\section{Common Interfaces of Generic Descriptor Matchers}
Matchers of keypoint descriptors in OpenCV have wrappers with common interface that enables to switch easily
between different algorithms solving the same problem. This section is devoted to matching descriptors
that can not be represented as vectors in a multidimensional space. \texttt{GenericDescriptorMatcher}
is a more generic interface for descriptors. It does not make any assumptions about descriptor representation.
Every descriptor with \cvCppCross{DescriptorExtractor} interface has a wrapper with
\texttt{GenericDescriptorMatcher} interface (see \cvCppCross{VectorDescriptorMatcher}).
There are descriptors such as One way descriptor and Ferns that have \texttt{GenericDescriptorMatcher}
interface implemented, but do not support \cvCppCross{DescriptorExtractor}.
\cvclass{GenericDescriptorMatcher}
Abstract interface for a keypoint descriptor extracting and matching.
There is \cvCppCross{DescriptorExtractor} and \cvCppCross{DescriptorMatcher}
for these purposes too, but their interfaces are intended for descriptors
represented as vectors in a multidimensional space. \texttt{GenericDescriptorMatcher}
is a more generic interface for descriptors.
As \cvCppCross{DescriptorMatcher}, \texttt{GenericDescriptorMatcher} has two groups
of match methods: for matching keypoints of one image with other image or
with image set.
\begin{lstlisting}
class GenericDescriptorMatcher
{
public:
GenericDescriptorMatcher();
virtual ~GenericDescriptorMatcher();
virtual void add( const vector<Mat>& images,
vector<vector<KeyPoint> >& keypoints );
const vector<Mat>& getTrainImages() const;
const vector<vector<KeyPoint> >& getTrainKeypoints() const;
virtual void clear();
virtual void train() = 0;
virtual bool isMaskSupported() = 0;
void classify( const Mat& queryImage,
vector<KeyPoint>& queryKeypoints,
const Mat& trainImage,
vector<KeyPoint>& trainKeypoints ) const;
void classify( const Mat& queryImage,
vector<KeyPoint>& queryKeypoints );
/*
* Group of methods to match keypoints from image pair.
*/
void match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
vector<DMatch>& matches, const Mat& mask=Mat() ) const;
void knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
vector<vector<DMatch> >& matches, int k,
const Mat& mask=Mat(), bool compactResult=false ) const;
void radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const;
/*
* Group of methods to match keypoints from one image to image set.
*/
void match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() );
void knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, int k,
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
void radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
protected:
...
};
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::add}
Adds images and keypoints from them to the train collection (descriptors are supposed to be calculated here).
If train collection is not empty new image and keypoints from them will be added to
existing data.
\cvdefCpp{
void GenericDescriptorMatcher::add( const vector<Mat>\& images,
\par vector<vector<KeyPoint> >\& keypoints );
}
\begin{description}
\cvarg{images}{Image collection.}
\cvarg{keypoints}{Point collection. Assumes that \texttt{keypoints[i]} are keypoints
detected in an image \texttt{images[i]}. }
\end{description}
\cvCppFunc{GenericDescriptorMatcher::getTrainImages}
Returns train image collection.
\begin{lstlisting}
const vector<Mat>& GenericDescriptorMatcher::getTrainImages() const;
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::getTrainKeypoints}
Returns train keypoints collection.
\begin{lstlisting}
const vector<vector<KeyPoint> >&
GenericDescriptorMatcher::getTrainKeypoints() const;
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::clear}
Clear train collection (iamges and keypoints).
\begin{lstlisting}
void GenericDescriptorMatcher::clear();
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::train}
Train the object, e.g. tree-based structure to extract descriptors or
to optimize descriptors matching.
\begin{lstlisting}
void GenericDescriptorMatcher::train();
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::isMaskSupported}
Returns true if generic descriptor matcher supports masking permissible matches.
\begin{lstlisting}
void GenericDescriptorMatcher::isMaskSupported();
\end{lstlisting}
\cvCppFunc{GenericDescriptorMatcher::classify}
Classifies query keypoints under keypoints of one train image qiven as input argument
(first version of the method) or train image collection that set using
\cvCppCross{GenericDescriptorMatcher::add} (second version).
\cvdefCpp{
void GenericDescriptorMatcher::classify( \par const Mat\& queryImage,
\par vector<KeyPoint>\& queryKeypoints,
\par const Mat\& trainImage,
\par vector<KeyPoint>\& trainKeypoints ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::classify( const Mat\& queryImage,
\par vector<KeyPoint>\& queryKeypoints );
}
\begin{description}
\cvarg{queryImage}{The query image.}
\cvarg{queryKeypoints}{Keypoints from the query image.}
\cvarg{trainImage}{The train image.}
\cvarg{trainKeypoints}{Keypoints from the train image.}
\end{description}
\cvCppFunc{GenericDescriptorMatcher::match}
Find best match for query keypoints to the training set. In first version of method
one train image and keypoints detected on it - are input arguments. In second version
query keypoints are matched to training collectin that set using
\cvCppCross{GenericDescriptorMatcher::add}. As in \cvCppCross{DescriptorMatcher::match}
the mask can be set.
\cvdefCpp{
void GenericDescriptorMatcher::match(
\par const Mat\& queryImage, vector<KeyPoint>\& queryKeypoints,
\par const Mat\& trainImage, vector<KeyPoint>\& trainKeypoints,
\par vector<DMatch>\& matches, const Mat\& mask=Mat() ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::match(
\par const Mat\& queryImage, vector<KeyPoint>\& queryKeypoints,
\par vector<DMatch>\& matches,
\par const vector<Mat>\& masks=vector<Mat>() );
}
\begin{description}
\cvarg{queryImage}{Query image.}
\cvarg{queryKeypoints}{Keypoints detected in \texttt{queryImage}.}
\cvarg{trainImage}{Train image. This will not be added to train image collection
stored in class object.}
\cvarg{trainKeypoints}{Keypoints detected in \texttt{trainImage}. They will not be added to train points collection
stored in class object.}
\cvarg{matches}{Matches. If some query descriptor (keypoint) masked out in \texttt{mask}
no match will be added for this descriptor.
So \texttt{matches} size may be less query keypoints count.}
\cvarg{mask}{Mask specifying permissible matches between input query and train keypoints.}
\cvarg{masks}{The set of masks. Each \texttt{masks[i]} specifies permissible matches between input query keypoints
and stored train keypointss from i-th image.}
\end{description}
\cvCppFunc{GenericDescriptorMatcher::knnMatch}
Find the knn best matches for each keypoint from a query set with train keypoints.
Found knn (or less if not possible) matches are returned in distance increasing order.
Details see in \cvCppCross{GenericDescriptorMatcher::match} and \cvCppCross{DescriptorMatcher::knnMatch}.
\cvdefCpp{
void GenericDescriptorMatcher::knnMatch(
\par const Mat\& queryImage, vector<KeyPoint>\& queryKeypoints,
\par const Mat\& trainImage, vector<KeyPoint>\& trainKeypoints,
\par vector<vector<DMatch> >\& matches, int k,
\par const Mat\& mask=Mat(), bool compactResult=false ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::knnMatch(
\par const Mat\& queryImage, vector<KeyPoint>\& queryKeypoints,
\par vector<vector<DMatch> >\& matches, int k,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\cvCppFunc{GenericDescriptorMatcher::radiusMatch}
Find the best matches for each query keypoint which have distance less than given threshold.
Found matches are returned in distance increasing order. Details see in
\cvCppCross{GenericDescriptorMatcher::match} and \cvCppCross{DescriptorMatcher::radiusMatch}.
\cvdefCpp{
void GenericDescriptorMatcher::radiusMatch(
\par const Mat\& queryImage, vector<KeyPoint>\& queryKeypoints,
\par const Mat\& trainImage, vector<KeyPoint>\& trainKeypoints,
\par vector<vector<DMatch> >\& matches, float maxDistance,
\par const Mat\& mask=Mat(), bool compactResult=false ) const;
}
\cvdefCpp{
void GenericDescriptorMatcher::radiusMatch(
\par const Mat\& queryImage, vector<KeyPoint>\& queryKeypoints,
\par vector<vector<DMatch> >\& matches, float maxDistance,
\par const vector<Mat>\& masks=vector<Mat>(),
\par bool compactResult=false );
}
\cvCppFunc{GenericDescriptorMatcher::read}
Reads matcher object from a file node.
\cvdefCpp{
void GenericDescriptorMatcher::read( const FileNode\& fn );
}
\cvCppFunc{GenericDescriptorMatcher::write}
Writes match object to a file storage
\cvdefCpp{
void GenericDescriptorMatcher::write( FileStorage\& fs ) const;
}
\cvCppFunc{GenericDescriptorMatcher::clone}
Clone the matcher.
\cvdefCpp{
Ptr<GenericDescriptorMatcher>\\
GenericDescriptorMatcher::clone( bool emptyTrainData ) const;
}
\begin{description}
\cvarg{emptyTrainData}{If emptyTrainData is false the method create deep copy of the object, i.e. copies
both parameters and train data. If emptyTrainData is true the method create object copy with current parameters
but with empty train data.}
\end{description}
\cvclass{OneWayDescriptorMatcher}
Wrapping class for computing, matching and classification of descriptors using \cvCppCross{OneWayDescriptorBase} class.
\begin{lstlisting}
class OneWayDescriptorMatcher : public GenericDescriptorMatcher
{
public:
class Params
{
public:
static const int POSE_COUNT = 500;
static const int PATCH_WIDTH = 24;
static const int PATCH_HEIGHT = 24;
static float GET_MIN_SCALE() { return 0.7f; }
static float GET_MAX_SCALE() { return 1.5f; }
static float GET_STEP_SCALE() { return 1.2f; }
Params( int poseCount = POSE_COUNT,
Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
string pcaFilename = string(),
string trainPath = string(), string trainImagesList = string(),
float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
float stepScale = GET_STEP_SCALE() );
int poseCount;
Size patchSize;
string pcaFilename;
string trainPath;
string trainImagesList;
float minScale, maxScale, stepScale;
};
OneWayDescriptorMatcher( const Params& params=Params() );
virtual ~OneWayDescriptorMatcher();
void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
// Clears keypoints storing in collection and OneWayDescriptorBase
virtual void clear();
virtual void train();
virtual bool isMaskSupported();
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
};
\end{lstlisting}
\cvclass{FernDescriptorMatcher}
Wrapping class for computing, matching and classification of descriptors using \cvCppCross{FernClassifier} class.
\begin{lstlisting}
class FernDescriptorMatcher : public GenericDescriptorMatcher
{
public:
class Params
{
public:
Params( int nclasses=0,
int patchSize=FernClassifier::PATCH_SIZE,
int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
int nstructs=FernClassifier::DEFAULT_STRUCTS,
int structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
int nviews=FernClassifier::DEFAULT_VIEWS,
int compressionMethod=FernClassifier::COMPRESSION_NONE,
const PatchGenerator& patchGenerator=PatchGenerator() );
Params( const string& filename );
int nclasses;
int patchSize;
int signatureSize;
int nstructs;
int structSize;
int nviews;
int compressionMethod;
PatchGenerator patchGenerator;
string filename;
};
FernDescriptorMatcher( const Params& params=Params() );
virtual ~FernDescriptorMatcher();
virtual void clear();
virtual void train();
virtual bool isMaskSupported();
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
};
\end{lstlisting}
\cvclass{VectorDescriptorMatcher}
Class used for matching descriptors that can be described as vectors in a finite-dimensional space.
\begin{lstlisting}
class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher
{
public:
VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& extractor, const Ptr<DescriptorMatcher>& matcher );
virtual ~VectorDescriptorMatcher();
virtual void add( const vector<Mat>& imgCollection,
vector<vector<KeyPoint> >& pointCollection );
virtual void clear();
virtual void train();
virtual bool isMaskSupported();
virtual void read( const FileNode& fn );
virtual void write( FileStorage& fs ) const;
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
...
};
\end{lstlisting}
Example of creating:
\begin{lstlisting}
VectorDescriptorMatcher matcher( new SurfDescriptorExtractor,
new BruteForceMatcher<L2<float> > );
\end{lstlisting}
\section{Drawing Function of Keypoints and Matches}
\cvCppFunc{drawMatches}
This function draws matches of keypints from two images on output image.
Match is a line connecting two keypoints (circles).
\cvdefCpp{
void drawMatches( const Mat\& img1, const vector<KeyPoint>\& keypoints1,
\par const Mat\& img2, const vector<KeyPoint>\& keypoints2,
\par const vector<DMatch>\& matches1to2, Mat\& outImg,
\par const Scalar\& matchColor=Scalar::all(-1),
\par const Scalar\& singlePointColor=Scalar::all(-1),
\par const vector<char>\& matchesMask=vector<char>(),
\par int flags=DrawMatchesFlags::DEFAULT );
}
\cvdefCpp{
void drawMatches( const Mat\& img1, const vector<KeyPoint>\& keypoints1,
\par const Mat\& img2, const vector<KeyPoint>\& keypoints2,
\par const vector<vector<DMatch> >\& matches1to2, Mat\& outImg,
\par const Scalar\& matchColor=Scalar::all(-1),
\par const Scalar\& singlePointColor=Scalar::all(-1),
\par const vector<vector<char>>\& matchesMask=
\par vector<vector<char> >(),
\par int flags=DrawMatchesFlags::DEFAULT );
}
\begin{description}
\cvarg{img1}{First source image.}
\cvarg{keypoints1}{Keypoints from first source image.}
\cvarg{img2}{Second source image.}
\cvarg{keypoints2}{Keypoints from second source image.}
\cvarg{matches}{Matches from first image to second one, i.e. \texttt{keypoints1[i]}
has corresponding point \texttt{keypoints2[matches[i]]}. }
\cvarg{outImg}{Output image. Its content depends on \texttt{flags} value
what is drawn in output image. See below possible \texttt{flags} bit values. }
\cvarg{matchColor}{Color of matches (lines and connected keypoints).
If \texttt{matchColor==Scalar::all(-1)} color will be generated randomly.}
\cvarg{singlePointColor}{Color of single keypoints (circles), i.e. keypoints not having the matches.
If \texttt{singlePointColor==Scalar::all(-1)} color will be generated randomly.}
\cvarg{matchesMask}{Mask determining which matches will be drawn. If mask is empty all matches will be drawn. }
\cvarg{flags}{Each bit of \texttt{flags} sets some feature of drawing.
Possible \texttt{flags} bit values is defined by \texttt{DrawMatchesFlags}, see below. }
\end{description}
\begin{lstlisting}
struct DrawMatchesFlags
{
enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create),
// i.e. existing memory of output image may be reused.
// Two source image, matches and single keypoints
// will be drawn.
// For each keypoint only the center point will be
// drawn (without the circle around keypoint with
// keypoint size and orientation).
DRAW_OVER_OUTIMG = 1, // Output image matrix will not be
// created (Mat::create). Matches will be drawn
// on existing content of output image.
NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.
DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around
// keypoint with keypoint size and orientation will
// be drawn.
};
};
\end{lstlisting}
\cvCppFunc{drawKeypoints}
Draw keypoints.
\cvdefCpp{
void drawKeypoints( const Mat\& image,
\par const vector<KeyPoint>\& keypoints,
\par Mat\& outImg, const Scalar\& color=Scalar::all(-1),
\par int flags=DrawMatchesFlags::DEFAULT );
}
\begin{description}
\cvarg{image}{Source image.}
\cvarg{keypoints}{Keypoints from source image.}
\cvarg{outImg}{Output image. Its content depends on \texttt{flags} value
what is drawn in output image. See possible \texttt{flags} bit values. }
\cvarg{color}{Color of keypoints}.
\cvarg{flags}{Each bit of \texttt{flags} sets some feature of drawing.
Possible \texttt{flags} bit values is defined by \texttt{DrawMatchesFlags},
see above in \cvCppCross{drawMatches}. }
\end{description}
\fi