mirror of
https://github.com/opencv/opencv.git
synced 2024-11-27 20:50:25 +08:00
modified FernClassifier::train(); remove old RTreeClassifier and added new implementation CalonderClassifier; removed old find_obj_calonder and added new one
This commit is contained in:
parent
1135bc2495
commit
b5a71db742
@ -179,6 +179,17 @@ CVAPI(CvSeq*) cvGetStarKeypoints( const CvArr* img, CvMemStorage* storage,
|
|||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
struct CV_EXPORTS DefaultRngAuto
|
||||||
|
{
|
||||||
|
const static uint64 def_state = (uint64)-1;
|
||||||
|
const uint64 old_state;
|
||||||
|
|
||||||
|
DefaultRngAuto() : old_state(theRNG().state) { theRNG().state = def_state; }
|
||||||
|
~DefaultRngAuto() { theRNG().state = old_state; }
|
||||||
|
|
||||||
|
DefaultRngAuto& operator=(const DefaultRngAuto&);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
// CvAffinePose: defines a parameterized affine transformation of an image patch.
|
// CvAffinePose: defines a parameterized affine transformation of an image patch.
|
||||||
// An image patch is rotated on angle phi (in degrees), then scaled lambda1 times
|
// An image patch is rotated on angle phi (in degrees), then scaled lambda1 times
|
||||||
@ -395,10 +406,7 @@ public:
|
|||||||
CV_EXPORTS void FAST( const Mat& image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true );
|
CV_EXPORTS void FAST( const Mat& image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true );
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
The Patch Generator class
|
The Patch Generator class
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
*/
|
*/
|
||||||
class CV_EXPORTS PatchGenerator
|
class CV_EXPORTS PatchGenerator
|
||||||
{
|
{
|
||||||
@ -459,9 +467,9 @@ class CV_EXPORTS FernClassifier
|
|||||||
public:
|
public:
|
||||||
FernClassifier();
|
FernClassifier();
|
||||||
FernClassifier(const FileNode& node);
|
FernClassifier(const FileNode& node);
|
||||||
FernClassifier(const vector<Point2f>& points,
|
FernClassifier(const vector<vector<Point2f> >& points,
|
||||||
const vector<Ptr<Mat> >& refimgs,
|
const vector<Mat>& refimgs,
|
||||||
const vector<int>& labels=vector<int>(),
|
const vector<vector<int> >& labels=vector<vector<int> >(),
|
||||||
int _nclasses=0, int _patchSize=PATCH_SIZE,
|
int _nclasses=0, int _patchSize=PATCH_SIZE,
|
||||||
int _signatureSize=DEFAULT_SIGNATURE_SIZE,
|
int _signatureSize=DEFAULT_SIGNATURE_SIZE,
|
||||||
int _nstructs=DEFAULT_STRUCTS,
|
int _nstructs=DEFAULT_STRUCTS,
|
||||||
@ -481,9 +489,9 @@ public:
|
|||||||
int _nviews=DEFAULT_VIEWS,
|
int _nviews=DEFAULT_VIEWS,
|
||||||
int _compressionMethod=COMPRESSION_NONE,
|
int _compressionMethod=COMPRESSION_NONE,
|
||||||
const PatchGenerator& patchGenerator=PatchGenerator());
|
const PatchGenerator& patchGenerator=PatchGenerator());
|
||||||
virtual void train(const vector<Point2f>& points,
|
virtual void train(const vector<vector<Point2f> >& points,
|
||||||
const vector<Ptr<Mat> >& refimgs,
|
const vector<Mat>& refimgs,
|
||||||
const vector<int>& labels=vector<int>(),
|
const vector<vector<int> >& labels=vector<vector<int> >(),
|
||||||
int _nclasses=0, int _patchSize=PATCH_SIZE,
|
int _nclasses=0, int _patchSize=PATCH_SIZE,
|
||||||
int _signatureSize=DEFAULT_SIGNATURE_SIZE,
|
int _signatureSize=DEFAULT_SIGNATURE_SIZE,
|
||||||
int _nstructs=DEFAULT_STRUCTS,
|
int _nstructs=DEFAULT_STRUCTS,
|
||||||
@ -594,269 +602,126 @@ protected:
|
|||||||
FernClassifier fernClassifier;
|
FernClassifier fernClassifier;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* Calonder Descriptor *
|
* Calonder Classifier *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
|
class CV_EXPORTS CalonderClassifier
|
||||||
struct CV_EXPORTS DefaultRngAuto
|
|
||||||
{
|
|
||||||
const static uint64 def_state = (uint64)-1;
|
|
||||||
const uint64 old_state;
|
|
||||||
|
|
||||||
DefaultRngAuto() : old_state(theRNG().state) { theRNG().state = def_state; }
|
|
||||||
~DefaultRngAuto() { theRNG().state = old_state; }
|
|
||||||
|
|
||||||
DefaultRngAuto& operator=(const DefaultRngAuto&);
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
A pseudo-random number generator usable with std::random_shuffle.
|
|
||||||
*/
|
|
||||||
typedef cv::RNG CalonderRng;
|
|
||||||
typedef unsigned int int_type;
|
|
||||||
|
|
||||||
//----------------------------
|
|
||||||
//randomized_tree.h
|
|
||||||
|
|
||||||
//class RTTester;
|
|
||||||
|
|
||||||
//namespace features {
|
|
||||||
static const size_t DEFAULT_REDUCED_NUM_DIM = 176;
|
|
||||||
static const float LOWER_QUANT_PERC = .03f;
|
|
||||||
static const float UPPER_QUANT_PERC = .92f;
|
|
||||||
static const int PATCH_SIZE = 32;
|
|
||||||
static const int DEFAULT_DEPTH = 9;
|
|
||||||
static const int DEFAULT_VIEWS = 5000;
|
|
||||||
struct RTreeNode;
|
|
||||||
|
|
||||||
struct BaseKeypoint
|
|
||||||
{
|
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
IplImage* image;
|
|
||||||
|
|
||||||
BaseKeypoint()
|
|
||||||
: x(0), y(0), image(NULL)
|
|
||||||
{}
|
|
||||||
|
|
||||||
BaseKeypoint(int x, int y, IplImage* image)
|
|
||||||
: x(x), y(y), image(image)
|
|
||||||
{}
|
|
||||||
};
|
|
||||||
|
|
||||||
class CSMatrixGenerator {
|
|
||||||
public:
|
|
||||||
typedef enum { PDT_GAUSS=1, PDT_BERNOULLI, PDT_DBFRIENDLY } PHI_DISTR_TYPE;
|
|
||||||
~CSMatrixGenerator();
|
|
||||||
static float* getCSMatrix(int m, int n, PHI_DISTR_TYPE dt); // do NOT free returned pointer
|
|
||||||
|
|
||||||
private:
|
|
||||||
static float *cs_phi_; // matrix for compressive sensing
|
|
||||||
static int cs_phi_m_, cs_phi_n_;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
template< typename T >
|
|
||||||
struct AlignedMemBlock
|
|
||||||
{
|
|
||||||
AlignedMemBlock() : raw(NULL), data(NULL) { };
|
|
||||||
|
|
||||||
// Alloc's an `a` bytes-aligned block good to hold `sz` elements of class T
|
|
||||||
AlignedMemBlock(const int n, const int a)
|
|
||||||
{
|
|
||||||
alloc(n, a);
|
|
||||||
}
|
|
||||||
|
|
||||||
~AlignedMemBlock()
|
|
||||||
{
|
|
||||||
free(raw);
|
|
||||||
}
|
|
||||||
|
|
||||||
void alloc(const int n, const int a)
|
|
||||||
{
|
|
||||||
uchar* raw = (uchar*)malloc(n*sizeof(T) + a);
|
|
||||||
int delta = (a - uint64(raw)%a)%a; // # bytes required for padding s.t. we get `a`-aligned
|
|
||||||
data = reinterpret_cast<T*>(raw + delta);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Methods to access the aligned data. NEVER EVER FREE A RETURNED POINTER!
|
|
||||||
inline T* p() { return data; }
|
|
||||||
inline T* operator()() { return data; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
T *raw; // raw block, probably not aligned
|
|
||||||
T *data; // exposed data, aligned, DO NOT FREE
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef AlignedMemBlock<float> FloatSignature;
|
|
||||||
typedef AlignedMemBlock<uchar> Signature;
|
|
||||||
|
|
||||||
class CV_EXPORTS RandomizedTree
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
friend class RTreeClassifier;
|
CalonderClassifier();
|
||||||
//friend class ::RTTester;
|
CalonderClassifier( const vector<vector<Point2f> >& points, const vector<Mat>& refimgs,
|
||||||
|
const vector<vector<int> >& labels=vector<vector<int> >(), int _numClasses=0,
|
||||||
|
int _pathSize=DEFAULT_PATCH_SIZE,
|
||||||
|
int _numTrees=DEFAULT_NUM_TREES,
|
||||||
|
int _treeDepth=DEFAULT_TREE_DEPTH,
|
||||||
|
int _numViews=DEFAULT_NUM_VIEWS,
|
||||||
|
int _compressedDim=DEFAULT_COMPRESSED_DIM,
|
||||||
|
int _compressType=DEFAULT_COMPRESS_TYPE,
|
||||||
|
int _numQuantBits=DEFAULT_NUM_QUANT_BITS,
|
||||||
|
const PatchGenerator& patchGenerator=PatchGenerator() );
|
||||||
|
|
||||||
RandomizedTree();
|
virtual ~CalonderClassifier();
|
||||||
~RandomizedTree();
|
virtual void clear();
|
||||||
|
|
||||||
void train(std::vector<BaseKeypoint> const& base_set, cv::RNG &rng,
|
void train( const vector<vector<Point2f> >& points, const vector<Mat>& refimgs,
|
||||||
int depth, int views, size_t reduced_num_dim, int num_quant_bits);
|
const vector<vector<int> >& labels=vector<vector<int> >(), int _nclasses=0,
|
||||||
|
int _pathSize=DEFAULT_PATCH_SIZE,
|
||||||
|
int _numTrees=DEFAULT_NUM_TREES,
|
||||||
|
int _treeDepth=DEFAULT_TREE_DEPTH,
|
||||||
|
int _numViews=DEFAULT_NUM_VIEWS,
|
||||||
|
int _compressedDim=DEFAULT_COMPRESSED_DIM,
|
||||||
|
int _compressType=DEFAULT_COMPRESS_TYPE,
|
||||||
|
int _numQuantBits=DEFAULT_NUM_QUANT_BITS,
|
||||||
|
const PatchGenerator& patchGenerator=PatchGenerator() );
|
||||||
|
|
||||||
void train(std::vector<BaseKeypoint> const& base_set, cv::RNG &rng,
|
virtual void operator()(const Mat& img, Point2f pt, vector<float>& signature, float thresh=0.f) const;
|
||||||
PatchGenerator &make_patch, int depth, int views, size_t reduced_num_dim,
|
virtual void operator()(const Mat& patch, vector<float>& signature, float thresh=-1.f) const;
|
||||||
int num_quant_bits);
|
#define QUANTIZATION_AVAILABLE 1
|
||||||
|
#if QUANTIZATION_AVAILABLE
|
||||||
|
void quantizePosteriors( int _numQuantBits, bool isClearFloatPosteriors=false );
|
||||||
|
void clearFloatPosteriors();
|
||||||
|
virtual void operator()(const Mat& img, Point2f pt, vector<uchar>& signature, uchar thresh=-1.f) const;
|
||||||
|
virtual void operator()(const Mat& patch, vector<uchar>& signature, uchar thresh=-1.f) const;
|
||||||
|
#endif
|
||||||
|
|
||||||
// following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do)
|
void read( const FileNode& fn );
|
||||||
static void quantizeVector(float *vec, int dim, int N, float bnds[2], int clamp_mode=0);
|
void write( FileStorage& fs ) const;
|
||||||
static void quantizeVector(float *src, int dim, int N, float bnds[2], uchar *dst);
|
|
||||||
|
|
||||||
// patch_data must be a 32x32 array (no row padding)
|
bool empty() const;
|
||||||
float* getPosterior(uchar* patch_data);
|
|
||||||
const float* getPosterior(uchar* patch_data) const;
|
|
||||||
uchar* getPosterior2(uchar* patch_data);
|
|
||||||
|
|
||||||
void read(const char* file_name, int num_quant_bits);
|
void setVerbose( bool _verbose );
|
||||||
void read(std::istream &is, int num_quant_bits);
|
|
||||||
void write(const char* file_name) const;
|
|
||||||
void write(std::ostream &os) const;
|
|
||||||
|
|
||||||
inline int classes() { return classes_; }
|
int getPatchSize() const;
|
||||||
inline int depth() { return depth_; }
|
int getNumTrees() const;
|
||||||
|
int getTreeDepth() const;
|
||||||
|
int getNumViews() const;
|
||||||
|
int getSignatureSize() const;
|
||||||
|
int getCompressType() const;
|
||||||
|
int getNumQuantBits() const;
|
||||||
|
int getOrigNumClasses() const;
|
||||||
|
|
||||||
inline void applyQuantization(int num_quant_bits) { makePosteriors2(num_quant_bits); }
|
|
||||||
|
|
||||||
// debug
|
enum
|
||||||
void savePosteriors(std::string url, bool append=false);
|
|
||||||
void savePosteriors2(std::string url, bool append=false);
|
|
||||||
|
|
||||||
private:
|
|
||||||
int classes_;
|
|
||||||
int depth_;
|
|
||||||
int num_leaves_;
|
|
||||||
std::vector<RTreeNode> nodes_;
|
|
||||||
//float **posteriors_; // 16-bytes aligned posteriors
|
|
||||||
//uchar **posteriors2_; // 16-bytes aligned posteriors
|
|
||||||
FloatSignature *posteriors_;
|
|
||||||
Signature *posteriors2_;
|
|
||||||
std::vector<int> leaf_counts_;
|
|
||||||
|
|
||||||
void createNodes(int num_nodes, cv::RNG &rng);
|
|
||||||
void allocPosteriorsAligned(int num_leaves, int num_classes);
|
|
||||||
void freePosteriors(int which); // which: 1=posteriors_, 2=posteriors2_, 3=both
|
|
||||||
void init(int classes, int depth, cv::RNG &rng);
|
|
||||||
void addExample(int class_id, uchar* patch_data);
|
|
||||||
void finalize(size_t reduced_num_dim, int num_quant_bits);
|
|
||||||
int getIndex(uchar* patch_data) const;
|
|
||||||
inline float* getPosteriorByIndex(int index);
|
|
||||||
inline uchar* getPosteriorByIndex2(int index);
|
|
||||||
inline const float* getPosteriorByIndex(int index) const;
|
|
||||||
//void makeRandomMeasMatrix(float *cs_phi, PHI_DISTR_TYPE dt, size_t reduced_num_dim);
|
|
||||||
void convertPosteriorsToChar();
|
|
||||||
void makePosteriors2(int num_quant_bits);
|
|
||||||
void compressLeaves(size_t reduced_num_dim);
|
|
||||||
void estimateQuantPercForPosteriors(float perc[2]);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct RTreeNode
|
|
||||||
{
|
|
||||||
short offset1, offset2;
|
|
||||||
|
|
||||||
RTreeNode() {}
|
|
||||||
|
|
||||||
RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
|
|
||||||
: offset1(y1*PATCH_SIZE + x1),
|
|
||||||
offset2(y2*PATCH_SIZE + x2)
|
|
||||||
{}
|
|
||||||
|
|
||||||
//! Left child on 0, right child on 1
|
|
||||||
inline bool operator() (uchar* patch_data) const
|
|
||||||
{
|
{
|
||||||
return patch_data[offset1] > patch_data[offset2];
|
COMPRESS_NONE = -1,
|
||||||
}
|
COMPRESS_DISTR_GAUSS = 0,
|
||||||
};
|
COMPRESS_DISTR_BERNOULLI = 1,
|
||||||
|
COMPRESS_DISTR_DBFRIENDLY = 2,
|
||||||
|
};
|
||||||
|
|
||||||
|
static float GET_LOWER_QUANT_PERC() { return .03f; }
|
||||||
|
static float GET_UPPER_QUANT_PERC() { return .92f; }
|
||||||
|
|
||||||
|
enum
|
||||||
//} // namespace features
|
{
|
||||||
//----------------------------
|
MAX_NUM_QUANT_BITS = 8,
|
||||||
//rtree_classifier.h
|
DEFAULT_PATCH_SIZE = 32,
|
||||||
//class RTTester;
|
DEFAULT_NUM_TREES = 48,
|
||||||
|
DEFAULT_TREE_DEPTH = 9,
|
||||||
//namespace features {
|
DEFAULT_NUM_VIEWS = 500,
|
||||||
|
DEFAULT_COMPRESSED_DIM = 176,
|
||||||
class CV_EXPORTS RTreeClassifier
|
DEFAULT_COMPRESS_TYPE = COMPRESS_DISTR_BERNOULLI,
|
||||||
{
|
DEFAULT_NUM_QUANT_BITS = -1,
|
||||||
public:
|
};
|
||||||
//friend class ::RTTester;
|
|
||||||
static const int DEFAULT_TREES = 80;
|
|
||||||
static const size_t DEFAULT_NUM_QUANT_BITS = 4;
|
|
||||||
|
|
||||||
//static const int SIG_LEN = 176;
|
|
||||||
|
|
||||||
RTreeClassifier();
|
|
||||||
|
|
||||||
//modified
|
|
||||||
void train(std::vector<BaseKeypoint> const& base_set,
|
|
||||||
cv::RNG &rng,
|
|
||||||
int num_trees = RTreeClassifier::DEFAULT_TREES,
|
|
||||||
int depth = DEFAULT_DEPTH,
|
|
||||||
int views = DEFAULT_VIEWS,
|
|
||||||
size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
|
|
||||||
int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
|
|
||||||
bool print_status = true);
|
|
||||||
|
|
||||||
void train(std::vector<BaseKeypoint> const& base_set,
|
|
||||||
cv::RNG &rng,
|
|
||||||
PatchGenerator &make_patch,
|
|
||||||
int num_trees = DEFAULT_TREES,
|
|
||||||
int depth = DEFAULT_DEPTH,
|
|
||||||
int views = DEFAULT_VIEWS,
|
|
||||||
size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
|
|
||||||
int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
|
|
||||||
bool print_status = true);
|
|
||||||
|
|
||||||
// sig must point to a memory block of at least classes()*sizeof(float|uchar) bytes
|
|
||||||
void getSignature(IplImage *patch, uchar *sig);
|
|
||||||
void getSignature(IplImage *patch, float *sig);
|
|
||||||
void getSparseSignature(IplImage *patch, float *sig, float thresh);
|
|
||||||
// TODO: deprecated in favor of getSignature overload, remove
|
|
||||||
void getFloatSignature(IplImage *patch, float *sig) { getSignature(patch, sig); }
|
|
||||||
|
|
||||||
static int countNonZeroElements(float *vec, int n, double tol=1e-10);
|
|
||||||
static inline void safeSignatureAlloc(uchar **sig, int num_sig=1, int sig_len=176);
|
|
||||||
static inline uchar* safeSignatureAlloc(int num_sig=1, int sig_len=176);
|
|
||||||
|
|
||||||
inline int classes() const { return classes_; }
|
|
||||||
inline int original_num_classes() { return original_num_classes_; }
|
|
||||||
|
|
||||||
void setQuantization(int num_quant_bits);
|
|
||||||
void discardFloatPosteriors();
|
|
||||||
|
|
||||||
void read(const char* file_name);
|
|
||||||
void read(std::istream &is);
|
|
||||||
void write(const char* file_name) const;
|
|
||||||
void write(std::ostream &os) const;
|
|
||||||
|
|
||||||
// experimental and debug
|
|
||||||
void saveAllFloatPosteriors(std::string file_url);
|
|
||||||
void saveAllBytePosteriors(std::string file_url);
|
|
||||||
void setFloatPosteriorsFromTextfile_176(std::string url);
|
|
||||||
float countZeroElements();
|
|
||||||
|
|
||||||
std::vector<RandomizedTree> trees_;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int classes_;
|
void prepare( int _patchSize, int _signatureSize, int _numTrees, int _treeDepth, int _numViews );
|
||||||
int num_quant_bits_;
|
|
||||||
uchar **posteriors_;
|
|
||||||
ushort *ptemp_;
|
|
||||||
int original_num_classes_;
|
|
||||||
bool keep_floats_;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
int getLeafIdx( int treeIdx, const Mat& patch ) const;
|
||||||
|
void finalize( int _compressedDim, int _compressType, int _numQuantBits,
|
||||||
|
const vector<int>& leafSampleCounters);
|
||||||
|
|
||||||
|
void compressLeaves( int _compressedDim, int _compressType );
|
||||||
|
|
||||||
|
bool verbose;
|
||||||
|
|
||||||
|
int patchSize;
|
||||||
|
int signatureSize;
|
||||||
|
int numTrees;
|
||||||
|
int treeDepth;
|
||||||
|
int numViews;
|
||||||
|
|
||||||
|
int origNumClasses;
|
||||||
|
int compressType;
|
||||||
|
int numQuantBits;
|
||||||
|
|
||||||
|
int numLeavesPerTree;
|
||||||
|
int numNodesPerTree;
|
||||||
|
|
||||||
|
struct Node
|
||||||
|
{
|
||||||
|
uchar x1, y1, x2, y2;
|
||||||
|
Node() : x1(0), y1(0), x2(0), y2(0) {}
|
||||||
|
Node( uchar _x1, uchar _y1, uchar _x2, uchar _y2 ) : x1(_x1), y1(_y1), x2(_x2), y2(_y2)
|
||||||
|
{}
|
||||||
|
int operator() (const Mat_<uchar>& patch) const
|
||||||
|
{ return patch(y1,x1) > patch(y2, x2) ? 1 : 0; }
|
||||||
|
};
|
||||||
|
vector<Node> nodes;
|
||||||
|
vector<float> posteriors;
|
||||||
|
#if QUANTIZATION_AVAILABLE
|
||||||
|
vector<uchar> quantizedPosteriors;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* One-Way Descriptor *
|
* One-Way Descriptor *
|
||||||
@ -1004,7 +869,7 @@ protected:
|
|||||||
CvAffinePose* m_affine_poses; // an array of poses
|
CvAffinePose* m_affine_poses; // an array of poses
|
||||||
CvMat** m_transforms; // an array of affine transforms corresponding to poses
|
CvMat** m_transforms; // an array of affine transforms corresponding to poses
|
||||||
|
|
||||||
std::string m_feature_name; // the name of the feature associated with the descriptor
|
string m_feature_name; // the name of the feature associated with the descriptor
|
||||||
CvPoint m_center; // the coordinates of the feature (the center of the input image ROI)
|
CvPoint m_center; // the coordinates of the feature (the center of the input image ROI)
|
||||||
|
|
||||||
int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses
|
int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses
|
||||||
@ -1275,7 +1140,8 @@ public:
|
|||||||
*
|
*
|
||||||
* image The image.
|
* image The image.
|
||||||
* keypoints The detected keypoints.
|
* keypoints The detected keypoints.
|
||||||
* mask Mask specifying where to look for keypoints (optional). Must be a char matrix with non-zero values in the region of interest.
|
* mask Mask specifying where to look for keypoints (optional). Must be a char
|
||||||
|
* matrix with non-zero values in the region of interest.
|
||||||
*/
|
*/
|
||||||
void detect( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const
|
void detect( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const
|
||||||
{
|
{
|
||||||
@ -1430,8 +1296,8 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const = 0;
|
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const = 0;
|
||||||
|
|
||||||
virtual void read (const FileNode&) {};
|
virtual void read( const FileNode& ) {};
|
||||||
virtual void write (FileStorage&) const {};
|
virtual void write( FileStorage& ) const {};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/*
|
/*
|
||||||
@ -1451,9 +1317,9 @@ public:
|
|||||||
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
|
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
|
||||||
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
|
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
|
||||||
|
|
||||||
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const;
|
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
virtual void read (const FileNode &fn);
|
virtual void read( const FileNode &fn );
|
||||||
virtual void write (FileStorage &fs) const;
|
virtual void write( FileStorage &fs ) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
SIFT sift;
|
SIFT sift;
|
||||||
@ -1465,14 +1331,56 @@ public:
|
|||||||
SurfDescriptorExtractor( int nOctaves=4,
|
SurfDescriptorExtractor( int nOctaves=4,
|
||||||
int nOctaveLayers=2, bool extended=false );
|
int nOctaveLayers=2, bool extended=false );
|
||||||
|
|
||||||
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const;
|
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
virtual void read (const FileNode &fn);
|
virtual void read( const FileNode &fn );
|
||||||
virtual void write (FileStorage &fs) const;
|
virtual void write( FileStorage &fs ) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
SURF surf;
|
SURF surf;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
template<typename T>
|
||||||
|
class CalonderDescriptorExtractor : public DescriptorExtractor
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CalonderDescriptorExtractor( const string& classifierFile );
|
||||||
|
|
||||||
|
virtual void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
|
||||||
|
virtual void read( const FileNode &fn );
|
||||||
|
virtual void write( FileStorage &fs ) const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
RTreeClassifier classifier_;
|
||||||
|
static const int BORDER_SIZE = 16;
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
CalonderDescriptorExtractor<T>::CalonderDescriptorExtractor(const std::string& classifier_file)
|
||||||
|
{
|
||||||
|
classifier_.read( classifier_file.c_str() );
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void CalonderDescriptorExtractor<T>::compute( const cv::Mat& image,
|
||||||
|
std::vector<cv::KeyPoint>& keypoints,
|
||||||
|
cv::Mat& descriptors) const
|
||||||
|
{
|
||||||
|
// Cannot compute descriptors for keypoints on the image border.
|
||||||
|
removeBorderKeypoints(keypoints, image.size(), BORDER_SIZE);
|
||||||
|
|
||||||
|
/// @todo Check 16-byte aligned
|
||||||
|
descriptors.create(keypoints.size(), classifier_.classes(), cv::DataType<T>::type);
|
||||||
|
IplImage ipl = (IplImage)image;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < keypoints.size(); ++i) {
|
||||||
|
cv::Point2f keypt = keypoints[i].pt;
|
||||||
|
cv::WImageView1_b patch = features::extractPatch(&ipl, keypt);
|
||||||
|
classifier_.getSignature(patch.Ipl(), descriptors.ptr<T>(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
CV_EXPORTS Ptr<DescriptorExtractor> createDescriptorExtractor( const string& descriptorExtractorType );
|
CV_EXPORTS Ptr<DescriptorExtractor> createDescriptorExtractor( const string& descriptorExtractorType );
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
@ -1533,7 +1441,7 @@ struct CV_EXPORTS L1
|
|||||||
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* DMatch *
|
* DMatch *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
/*
|
/*
|
||||||
* Struct for matching: match index and distance between descriptors
|
* Struct for matching: match index and distance between descriptors
|
||||||
@ -1591,8 +1499,7 @@ public:
|
|||||||
* mask Mask specifying permissible matches.
|
* mask Mask specifying permissible matches.
|
||||||
* matches Indices of the closest matches from the training set
|
* matches Indices of the closest matches from the training set
|
||||||
*/
|
*/
|
||||||
void match( const Mat& query, const Mat& mask,
|
void match( const Mat& query, const Mat& mask, vector<int>& matches ) const;
|
||||||
vector<int>& matches ) const;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the best match for each descriptor from a query set
|
* Find the best match for each descriptor from a query set
|
||||||
@ -1613,8 +1520,7 @@ public:
|
|||||||
* mask Mask specifying permissible matches.
|
* mask Mask specifying permissible matches.
|
||||||
* matches DMatches of the closest matches from the training set
|
* matches DMatches of the closest matches from the training set
|
||||||
*/
|
*/
|
||||||
void match( const Mat& query, const Mat& mask,
|
void match( const Mat& query, const Mat& mask, vector<DMatch>& matches ) const;
|
||||||
vector<DMatch>& matches ) const;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find many matches for each descriptor from a query set
|
* Find many matches for each descriptor from a query set
|
||||||
@ -1638,7 +1544,7 @@ public:
|
|||||||
* threshold Distance threshold for descriptors matching
|
* threshold Distance threshold for descriptors matching
|
||||||
*/
|
*/
|
||||||
void match( const Mat& query, const Mat& mask,
|
void match( const Mat& query, const Mat& mask,
|
||||||
vector<vector<DMatch> >& matches, float threshold ) const;
|
vector<vector<DMatch> >& matches, float threshold ) const;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -1878,7 +1784,7 @@ void BruteForceMatcher<Distance>::matchImpl( const Mat& descriptors_1, const Mat
|
|||||||
|
|
||||||
template<>
|
template<>
|
||||||
void BruteForceMatcher<L2<float> >::matchImpl( const Mat& descriptors_1, const Mat& descriptors_2,
|
void BruteForceMatcher<L2<float> >::matchImpl( const Mat& descriptors_1, const Mat& descriptors_2,
|
||||||
const Mat& mask, vector<int>& matches ) const;
|
const Mat& mask, vector<int>& matches ) const;
|
||||||
|
|
||||||
CV_EXPORTS Ptr<DescriptorMatcher> createDescriptorMatcher( const string& descriptorMatcherType );
|
CV_EXPORTS Ptr<DescriptorMatcher> createDescriptorMatcher( const string& descriptorMatcherType );
|
||||||
|
|
||||||
@ -2036,10 +1942,10 @@ public:
|
|||||||
virtual void clear ();
|
virtual void clear ();
|
||||||
|
|
||||||
// Reads match object from a file node
|
// Reads match object from a file node
|
||||||
virtual void read (const FileNode &fn);
|
virtual void read( const FileNode &fn );
|
||||||
|
|
||||||
// Writes match object to a file storage
|
// Writes match object to a file storage
|
||||||
virtual void write (FileStorage& fs) const;
|
virtual void write( FileStorage& fs ) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Ptr<OneWayDescriptorBase> base;
|
Ptr<OneWayDescriptorBase> base;
|
||||||
@ -2049,6 +1955,7 @@ protected:
|
|||||||
/*
|
/*
|
||||||
* CalonderDescriptorMatch
|
* CalonderDescriptorMatch
|
||||||
*/
|
*/
|
||||||
|
#if 0
|
||||||
class CV_EXPORTS CalonderDescriptorMatch : public GenericDescriptorMatch
|
class CV_EXPORTS CalonderDescriptorMatch : public GenericDescriptorMatch
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -2113,6 +2020,7 @@ protected:
|
|||||||
Ptr<RTreeClassifier> classifier;
|
Ptr<RTreeClassifier> classifier;
|
||||||
Params params;
|
Params params;
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FernDescriptorMatch
|
* FernDescriptorMatch
|
||||||
@ -2178,6 +2086,7 @@ protected:
|
|||||||
};
|
};
|
||||||
|
|
||||||
CV_EXPORTS Ptr<GenericDescriptorMatch> createGenericDescriptorMatch( const string& genericDescritptorMatchType, const string ¶msFilename = string () );
|
CV_EXPORTS Ptr<GenericDescriptorMatch> createGenericDescriptorMatch( const string& genericDescritptorMatchType, const string ¶msFilename = string () );
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* VectorDescriptorMatch *
|
* VectorDescriptorMatch *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
@ -2199,63 +2108,27 @@ public:
|
|||||||
void index();
|
void index();
|
||||||
|
|
||||||
// Calculates descriptors for a set of keypoints from a single image
|
// Calculates descriptors for a set of keypoints from a single image
|
||||||
virtual void add( const Mat& image, vector<KeyPoint>& keypoints )
|
virtual void add( const Mat& image, vector<KeyPoint>& keypoints );
|
||||||
{
|
|
||||||
Mat descriptors;
|
|
||||||
extractor->compute( image, keypoints, descriptors );
|
|
||||||
matcher->add( descriptors );
|
|
||||||
|
|
||||||
collection.add( Mat(), keypoints );
|
|
||||||
};
|
|
||||||
|
|
||||||
// Matches a set of keypoints with the training set
|
// Matches a set of keypoints with the training set
|
||||||
virtual void match( const Mat& image, vector<KeyPoint>& points, vector<int>& keypointIndices )
|
virtual void match( const Mat& image, vector<KeyPoint>& points, vector<int>& keypointIndices );
|
||||||
{
|
|
||||||
Mat descriptors;
|
|
||||||
extractor->compute( image, points, descriptors );
|
|
||||||
|
|
||||||
matcher->match( descriptors, keypointIndices );
|
virtual void match( const Mat& image, vector<KeyPoint>& points, vector<DMatch>& matches );
|
||||||
};
|
|
||||||
|
|
||||||
virtual void match( const Mat& image, vector<KeyPoint>& points, vector<DMatch>& matches )
|
virtual void match( const Mat& image, vector<KeyPoint>& points,
|
||||||
{
|
vector<vector<DMatch> >& matches, float threshold );
|
||||||
Mat descriptors;
|
|
||||||
extractor->compute( image, points, descriptors );
|
|
||||||
|
|
||||||
matcher->match( descriptors, matches );
|
virtual void clear();
|
||||||
}
|
virtual void read( const FileNode& fn );
|
||||||
|
virtual void write( FileStorage& fs ) const;
|
||||||
|
|
||||||
virtual void match( const Mat& image, vector<KeyPoint>& points, vector<vector<DMatch> >& matches, float threshold )
|
|
||||||
{
|
|
||||||
Mat descriptors;
|
|
||||||
extractor->compute( image, points, descriptors );
|
|
||||||
|
|
||||||
matcher->match( descriptors, matches, threshold );
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void clear()
|
|
||||||
{
|
|
||||||
GenericDescriptorMatch::clear();
|
|
||||||
matcher->clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void read (const FileNode& fn)
|
|
||||||
{
|
|
||||||
GenericDescriptorMatch::read(fn);
|
|
||||||
extractor->read (fn);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void write (FileStorage& fs) const
|
|
||||||
{
|
|
||||||
GenericDescriptorMatch::write(fs);
|
|
||||||
extractor->write (fs);
|
|
||||||
}
|
|
||||||
protected:
|
protected:
|
||||||
Ptr<DescriptorExtractor> extractor;
|
Ptr<DescriptorExtractor> extractor;
|
||||||
Ptr<DescriptorMatcher> matcher;
|
Ptr<DescriptorMatcher> matcher;
|
||||||
//vector<int> classIds;
|
//vector<int> classIds;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
struct CV_EXPORTS DrawMatchesFlags
|
struct CV_EXPORTS DrawMatchesFlags
|
||||||
{
|
{
|
||||||
enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create),
|
enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create),
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -144,7 +144,7 @@ void DescriptorExtractor::removeBorderKeypoints( vector<KeyPoint>& keypoints,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* SiftDescriptorExtractor *
|
* SiftDescriptorExtractor *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
SiftDescriptorExtractor::SiftDescriptorExtractor( double magnification, bool isNormalize, bool recalculateAngles,
|
SiftDescriptorExtractor::SiftDescriptorExtractor( double magnification, bool isNormalize, bool recalculateAngles,
|
||||||
int nOctaves, int nOctaveLayers, int firstOctave, int angleMode )
|
int nOctaves, int nOctaveLayers, int firstOctave, int angleMode )
|
||||||
@ -188,7 +188,7 @@ void SiftDescriptorExtractor::write (FileStorage &fs) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* SurfDescriptorExtractor *
|
* SurfDescriptorExtractor *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
SurfDescriptorExtractor::SurfDescriptorExtractor( int nOctaves,
|
SurfDescriptorExtractor::SurfDescriptorExtractor( int nOctaves,
|
||||||
int nOctaveLayers, bool extended )
|
int nOctaveLayers, bool extended )
|
||||||
@ -228,6 +228,10 @@ void SurfDescriptorExtractor::write( FileStorage &fs ) const
|
|||||||
fs << "extended" << surf.extended;
|
fs << "extended" << surf.extended;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* Factory functions for descriptor extractor and matcher creating *
|
||||||
|
\****************************************************************************************/
|
||||||
|
|
||||||
Ptr<DescriptorExtractor> createDescriptorExtractor( const string& descriptorExtractorType )
|
Ptr<DescriptorExtractor> createDescriptorExtractor( const string& descriptorExtractorType )
|
||||||
{
|
{
|
||||||
DescriptorExtractor* de = 0;
|
DescriptorExtractor* de = 0;
|
||||||
@ -270,7 +274,9 @@ Ptr<DescriptorMatcher> createDescriptorMatcher( const string& descriptorMatcherT
|
|||||||
return dm;
|
return dm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* BruteForceMatcher L2 specialization *
|
||||||
|
\****************************************************************************************/
|
||||||
template<>
|
template<>
|
||||||
void BruteForceMatcher<L2<float> >::matchImpl( const Mat& descriptors_1, const Mat& descriptors_2,
|
void BruteForceMatcher<L2<float> >::matchImpl( const Mat& descriptors_1, const Mat& descriptors_2,
|
||||||
const Mat& /*mask*/, vector<int>& matches ) const
|
const Mat& /*mask*/, vector<int>& matches ) const
|
||||||
@ -317,7 +323,6 @@ void BruteForceMatcher<L2<float> >::matchImpl( const Mat& descriptors_1, const M
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* GenericDescriptorMatch *
|
* GenericDescriptorMatch *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
@ -394,6 +399,9 @@ void GenericDescriptorMatch::clear()
|
|||||||
collection.clear();
|
collection.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Factory function for GenericDescriptorMatch creating
|
||||||
|
*/
|
||||||
Ptr<GenericDescriptorMatch> createGenericDescriptorMatch( const string& genericDescritptorMatchType, const string ¶msFilename )
|
Ptr<GenericDescriptorMatch> createGenericDescriptorMatch( const string& genericDescritptorMatchType, const string ¶msFilename )
|
||||||
{
|
{
|
||||||
GenericDescriptorMatch *descriptorMatch = 0;
|
GenericDescriptorMatch *descriptorMatch = 0;
|
||||||
@ -409,7 +417,7 @@ Ptr<GenericDescriptorMatch> createGenericDescriptorMatch( const string& genericD
|
|||||||
}
|
}
|
||||||
else if( ! genericDescritptorMatchType.compare ("CALONDER") )
|
else if( ! genericDescritptorMatchType.compare ("CALONDER") )
|
||||||
{
|
{
|
||||||
descriptorMatch = new CalonderDescriptorMatch ();
|
//descriptorMatch = new CalonderDescriptorMatch ();
|
||||||
}
|
}
|
||||||
|
|
||||||
if( !paramsFilename.empty() && descriptorMatch != 0 )
|
if( !paramsFilename.empty() && descriptorMatch != 0 )
|
||||||
@ -626,6 +634,7 @@ void OneWayDescriptorMatch::clear ()
|
|||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* CalonderDescriptorMatch *
|
* CalonderDescriptorMatch *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
|
#if 0
|
||||||
CalonderDescriptorMatch::Params::Params( const RNG& _rng, const PatchGenerator& _patchGen,
|
CalonderDescriptorMatch::Params::Params( const RNG& _rng, const PatchGenerator& _patchGen,
|
||||||
int _numTrees, int _depth, int _views,
|
int _numTrees, int _depth, int _views,
|
||||||
size_t _reducedNumDim,
|
size_t _reducedNumDim,
|
||||||
@ -774,6 +783,7 @@ void CalonderDescriptorMatch::write( FileStorage& fs ) const
|
|||||||
fs << "numQuantBits" << params.numQuantBits;
|
fs << "numQuantBits" << params.numQuantBits;
|
||||||
fs << "printStatus" << params.printStatus;
|
fs << "printStatus" << params.printStatus;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* FernDescriptorMatch *
|
* FernDescriptorMatch *
|
||||||
@ -827,22 +837,13 @@ void FernDescriptorMatch::trainFernClassifier()
|
|||||||
{
|
{
|
||||||
assert( params.filename.empty() );
|
assert( params.filename.empty() );
|
||||||
|
|
||||||
vector<Point2f> points;
|
vector<vector<Point2f> > points;
|
||||||
vector<Ptr<Mat> > refimgs;
|
for( size_t imgIdx = 0; imgIdx < collection.images.size(); imgIdx++ )
|
||||||
vector<int> labels;
|
KeyPoint::convert( collection.points[imgIdx], points[imgIdx] );
|
||||||
for( size_t imageIdx = 0; imageIdx < collection.images.size(); imageIdx++ )
|
|
||||||
{
|
|
||||||
for( size_t pointIdx = 0; pointIdx < collection.points[imageIdx].size(); pointIdx++ )
|
|
||||||
{
|
|
||||||
refimgs.push_back(new Mat (collection.images[imageIdx]));
|
|
||||||
points.push_back(collection.points[imageIdx][pointIdx].pt);
|
|
||||||
labels.push_back((int)pointIdx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
classifier = new FernClassifier( points, refimgs, labels, params.nclasses, params.patchSize,
|
classifier = new FernClassifier( points, collection.images, vector<vector<int> >(), 0, // each points is a class
|
||||||
params.signatureSize, params.nstructs, params.structSize, params.nviews,
|
params.patchSize, params.signatureSize, params.nstructs, params.structSize,
|
||||||
params.compressionMethod, params.patchGenerator );
|
params.nviews, params.compressionMethod, params.patchGenerator );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -966,4 +967,59 @@ void FernDescriptorMatch::clear ()
|
|||||||
classifier.release();
|
classifier.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* VectorDescriptorMatch *
|
||||||
|
\****************************************************************************************/
|
||||||
|
void VectorDescriptorMatch::add( const Mat& image, vector<KeyPoint>& keypoints )
|
||||||
|
{
|
||||||
|
Mat descriptors;
|
||||||
|
extractor->compute( image, keypoints, descriptors );
|
||||||
|
matcher->add( descriptors );
|
||||||
|
|
||||||
|
collection.add( Mat(), keypoints );
|
||||||
|
};
|
||||||
|
|
||||||
|
void VectorDescriptorMatch::match( const Mat& image, vector<KeyPoint>& points, vector<int>& keypointIndices )
|
||||||
|
{
|
||||||
|
Mat descriptors;
|
||||||
|
extractor->compute( image, points, descriptors );
|
||||||
|
|
||||||
|
matcher->match( descriptors, keypointIndices );
|
||||||
|
};
|
||||||
|
|
||||||
|
void VectorDescriptorMatch::match( const Mat& image, vector<KeyPoint>& points, vector<DMatch>& matches )
|
||||||
|
{
|
||||||
|
Mat descriptors;
|
||||||
|
extractor->compute( image, points, descriptors );
|
||||||
|
|
||||||
|
matcher->match( descriptors, matches );
|
||||||
|
}
|
||||||
|
|
||||||
|
void VectorDescriptorMatch::match( const Mat& image, vector<KeyPoint>& points,
|
||||||
|
vector<vector<DMatch> >& matches, float threshold )
|
||||||
|
{
|
||||||
|
Mat descriptors;
|
||||||
|
extractor->compute( image, points, descriptors );
|
||||||
|
|
||||||
|
matcher->match( descriptors, matches, threshold );
|
||||||
|
}
|
||||||
|
|
||||||
|
void VectorDescriptorMatch::clear()
|
||||||
|
{
|
||||||
|
GenericDescriptorMatch::clear();
|
||||||
|
matcher->clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void VectorDescriptorMatch::read( const FileNode& fn )
|
||||||
|
{
|
||||||
|
GenericDescriptorMatch::read(fn);
|
||||||
|
extractor->read (fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VectorDescriptorMatch::write (FileStorage& fs) const
|
||||||
|
{
|
||||||
|
GenericDescriptorMatch::write(fs);
|
||||||
|
extractor->write (fs);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -692,9 +692,9 @@ Size FernClassifier::getPatchSize() const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
FernClassifier::FernClassifier(const vector<Point2f>& points,
|
FernClassifier::FernClassifier(const vector<vector<Point2f> >& points,
|
||||||
const vector<Ptr<Mat> >& refimgs,
|
const vector<Mat>& refimgs,
|
||||||
const vector<int>& labels,
|
const vector<vector<int> >& labels,
|
||||||
int _nclasses, int _patchSize,
|
int _nclasses, int _patchSize,
|
||||||
int _signatureSize, int _nstructs,
|
int _signatureSize, int _nstructs,
|
||||||
int _structSize, int _nviews, int _compressionMethod,
|
int _structSize, int _nviews, int _compressionMethod,
|
||||||
@ -829,41 +829,56 @@ void FernClassifier::prepare(int _nclasses, int _patchSize, int _signatureSize,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int calcNumPoints( const vector<vector<Point2f> >& points )
|
||||||
|
{
|
||||||
|
int count = 0;
|
||||||
|
for( size_t i = 0; i < points.size(); i++ )
|
||||||
|
count += points[i].size();
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
void FernClassifier::train(const vector<Point2f>& points,
|
void FernClassifier::train(const vector<vector<Point2f> >& points,
|
||||||
const vector<Ptr<Mat> >& refimgs,
|
const vector<Mat>& refimgs,
|
||||||
const vector<int>& labels,
|
const vector<vector<int> >& labels,
|
||||||
int _nclasses, int _patchSize,
|
int _nclasses, int _patchSize,
|
||||||
int _signatureSize, int _nstructs,
|
int _signatureSize, int _nstructs,
|
||||||
int _structSize, int _nviews, int _compressionMethod,
|
int _structSize, int _nviews, int _compressionMethod,
|
||||||
const PatchGenerator& patchGenerator)
|
const PatchGenerator& patchGenerator)
|
||||||
{
|
{
|
||||||
_nclasses = _nclasses > 0 ? _nclasses : (int)points.size();
|
CV_Assert( points.size() == refimgs.size() );
|
||||||
|
int numPoints = calcNumPoints( points );
|
||||||
|
_nclasses = (!labels.empty() && _nclasses>0) ? _nclasses : numPoints;
|
||||||
CV_Assert( labels.empty() || labels.size() == points.size() );
|
CV_Assert( labels.empty() || labels.size() == points.size() );
|
||||||
|
|
||||||
|
|
||||||
prepare(_nclasses, _patchSize, _signatureSize, _nstructs,
|
prepare(_nclasses, _patchSize, _signatureSize, _nstructs,
|
||||||
_structSize, _nviews, _compressionMethod);
|
_structSize, _nviews, _compressionMethod);
|
||||||
|
|
||||||
// pass all the views of all the samples through the generated trees and accumulate
|
// pass all the views of all the samples through the generated trees and accumulate
|
||||||
// the statistics (posterior probabilities) in leaves.
|
// the statistics (posterior probabilities) in leaves.
|
||||||
Mat patch;
|
Mat patch;
|
||||||
int i, j, nsamples = (int)points.size();
|
|
||||||
RNG& rng = theRNG();
|
RNG& rng = theRNG();
|
||||||
|
|
||||||
for( i = 0; i < nsamples; i++ )
|
int globalPointIdx = 0;
|
||||||
|
for( size_t imgIdx = 0; imgIdx < points.size(); imgIdx++ )
|
||||||
{
|
{
|
||||||
Point2f pt = points[i];
|
const Point2f* imgPoints = &points[imgIdx][0];
|
||||||
const Mat& src = *refimgs[i];
|
const int* imgLabels = labels.empty() ? 0 : &labels[imgIdx][0];
|
||||||
int classId = labels.empty() ? i : labels[i];
|
for( size_t pointIdx = 0; pointIdx < points[imgIdx].size(); pointIdx++, globalPointIdx++ )
|
||||||
if( verbose && (i+1)*progressBarSize/nsamples != i*progressBarSize/nsamples )
|
|
||||||
putchar('.');
|
|
||||||
CV_Assert( 0 <= classId && classId < nclasses );
|
|
||||||
classCounters[classId] += _nviews;
|
|
||||||
for( j = 0; j < _nviews; j++ )
|
|
||||||
{
|
{
|
||||||
patchGenerator(src, pt, patch, patchSize, rng);
|
Point2f pt = imgPoints[pointIdx];
|
||||||
for( int f = 0; f < nstructs; f++ )
|
const Mat& src = refimgs[imgIdx];
|
||||||
posteriors[getLeaf(f, patch)*nclasses + classId]++;
|
int classId = imgLabels==0 ? globalPointIdx : imgLabels[pointIdx];
|
||||||
|
if( verbose && (globalPointIdx+1)*progressBarSize/numPoints != globalPointIdx*progressBarSize/numPoints )
|
||||||
|
putchar('.');
|
||||||
|
CV_Assert( 0 <= classId && classId < nclasses );
|
||||||
|
classCounters[classId] += _nviews;
|
||||||
|
for( int v = 0; v < _nviews; v++ )
|
||||||
|
{
|
||||||
|
patchGenerator(src, pt, patch, patchSize, rng);
|
||||||
|
for( int f = 0; f < nstructs; f++ )
|
||||||
|
posteriors[getLeaf(f, patch)*nclasses + classId]++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( verbose )
|
if( verbose )
|
||||||
|
@ -1,309 +1,154 @@
|
|||||||
//Calonder descriptor sample
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
#include <cxcore.h>
|
|
||||||
#include <cv.h>
|
|
||||||
#include <cvaux.h>
|
|
||||||
#include <highgui.h>
|
#include <highgui.h>
|
||||||
#include <vector>
|
#include <opencv2/core/core.hpp>
|
||||||
|
#include <opencv2/imgproc/imgproc.hpp>
|
||||||
|
#include <opencv2/features2d/features2d.hpp>
|
||||||
|
#include <iostream>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
using namespace cv;
|
||||||
|
|
||||||
// Number of training points (set to -1 to use all points)
|
void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
|
||||||
const int n_points = -1;
|
|
||||||
|
|
||||||
//Draw the border of projection of train image calculed by averaging detected correspondences
|
|
||||||
const bool draw_border = true;
|
|
||||||
|
|
||||||
void cvmSet6(CvMat* m, int row, int col, float val1, float val2, float val3, float val4, float val5, float val6)
|
|
||||||
{
|
{
|
||||||
cvmSet(m, row, col, val1);
|
H.create(3, 3, CV_32FC1);
|
||||||
cvmSet(m, row, col + 1, val2);
|
H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
|
||||||
cvmSet(m, row, col + 2, val3);
|
H.at<float>(0,1) = rng.uniform(-0.1f, 0.1f);
|
||||||
cvmSet(m, row, col + 3, val4);
|
H.at<float>(0,2) = rng.uniform(-0.1f, 0.1f)*src.cols;
|
||||||
cvmSet(m, row, col + 4, val5);
|
H.at<float>(1,0) = rng.uniform(-0.1f, 0.1f);
|
||||||
cvmSet(m, row, col + 5, val6);
|
H.at<float>(1,1) = rng.uniform( 0.8f, 1.2f);
|
||||||
|
H.at<float>(1,2) = rng.uniform(-0.1f, 0.1f)*src.rows;
|
||||||
|
H.at<float>(2,0) = rng.uniform( -1e-4f, 1e-4f);
|
||||||
|
H.at<float>(2,1) = rng.uniform( -1e-4f, 1e-4f);
|
||||||
|
H.at<float>(2,2) = rng.uniform( 0.8f, 1.2f);
|
||||||
|
|
||||||
|
warpPerspective( src, dst, H, src.size() );
|
||||||
}
|
}
|
||||||
|
|
||||||
void FindAffineTransform(const vector<CvPoint>& p1, const vector<CvPoint>& p2, CvMat* affine)
|
int main( int argc, char **argv )
|
||||||
{
|
{
|
||||||
int eq_num = 2*(int)p1.size();
|
if( argc != 4 && argc != 3 )
|
||||||
CvMat* A = cvCreateMat(eq_num, 6, CV_32FC1);
|
|
||||||
CvMat* B = cvCreateMat(eq_num, 1, CV_32FC1);
|
|
||||||
CvMat* X = cvCreateMat(6, 1, CV_32FC1);
|
|
||||||
|
|
||||||
for(int i = 0; i < (int)p1.size(); i++)
|
|
||||||
{
|
{
|
||||||
cvmSet6(A, 2*i, 0, p1[i].x, p1[i].y, 1, 0, 0, 0);
|
cout << "Format:" << endl <<
|
||||||
cvmSet6(A, 2*i + 1, 0, 0, 0, 0, p1[i].x, p1[i].y, 1);
|
" classifier(xml to write) test_image file_with_train_images_filenames(txt)" <<
|
||||||
cvmSet(B, 2*i, 0, p2[i].x);
|
" or" << endl <<
|
||||||
cvmSet(B, 2*i + 1, 0, p2[i].y);
|
" classifier(xml to read) test_image" << endl;
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
cvSolve(A, B, X, CV_SVD);
|
|
||||||
|
|
||||||
cvmSet(affine, 0, 0, cvmGet(X, 0, 0));
|
|
||||||
cvmSet(affine, 0, 1, cvmGet(X, 1, 0));
|
|
||||||
cvmSet(affine, 0, 2, cvmGet(X, 2, 0));
|
|
||||||
cvmSet(affine, 1, 0, cvmGet(X, 3, 0));
|
|
||||||
cvmSet(affine, 1, 1, cvmGet(X, 4, 0));
|
|
||||||
cvmSet(affine, 1, 2, cvmGet(X, 5, 0));
|
|
||||||
|
|
||||||
cvReleaseMat(&A);
|
|
||||||
cvReleaseMat(&B);
|
|
||||||
cvReleaseMat(&X);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MapVectorAffine(const vector<CvPoint>& p1, vector<CvPoint>& p2, CvMat* transform)
|
CalonderClassifier classifier;
|
||||||
{
|
if( argc == 4 ) // Train
|
||||||
float a = cvmGet(transform, 0, 0);
|
|
||||||
float b = cvmGet(transform, 0, 1);
|
|
||||||
float c = cvmGet(transform, 0, 2);
|
|
||||||
float d = cvmGet(transform, 1, 0);
|
|
||||||
float e = cvmGet(transform, 1, 1);
|
|
||||||
float f = cvmGet(transform, 1, 2);
|
|
||||||
|
|
||||||
for(int i = 0; i < (int)p1.size(); i++)
|
|
||||||
{
|
{
|
||||||
float x = a*p1[i].x + b*p1[i].y + c;
|
// Read train images and test image
|
||||||
float y = d*p1[i].x + e*p1[i].y + f;
|
ifstream fst( argv[3], ifstream::in );
|
||||||
p2.push_back(cvPoint(x, y));
|
vector<Mat> trainImgs;
|
||||||
|
while( !fst.eof() )
|
||||||
|
{
|
||||||
|
string str;
|
||||||
|
getline( fst, str );
|
||||||
|
if (str.empty()) break;
|
||||||
|
Mat img = imread( str, CV_LOAD_IMAGE_GRAYSCALE );
|
||||||
|
if( !img.empty() )
|
||||||
|
trainImgs.push_back( img );
|
||||||
|
}
|
||||||
|
if( trainImgs.empty() )
|
||||||
|
{
|
||||||
|
cout << "All train images can not be read." << endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
cout << trainImgs.size() << " train images were read." << endl;
|
||||||
|
|
||||||
|
// Extract keypoints from train images
|
||||||
|
SurfFeatureDetector detector;
|
||||||
|
vector<vector<Point2f> > trainPoints( trainImgs.size() );
|
||||||
|
for( size_t i = 0; i < trainImgs.size(); i++ )
|
||||||
|
{
|
||||||
|
vector<KeyPoint> kps;
|
||||||
|
detector.detect( trainImgs[i], kps );
|
||||||
|
KeyPoint::convert( kps, trainPoints[i] );
|
||||||
|
}
|
||||||
|
|
||||||
|
// Train Calonder classifier on extracted points
|
||||||
|
classifier.setVerbose( true);
|
||||||
|
classifier.train( trainPoints, trainImgs );
|
||||||
|
|
||||||
|
// Write Calonder classifier
|
||||||
|
FileStorage fs( argv[1], FileStorage::WRITE );
|
||||||
|
classifier.write( fs );
|
||||||
}
|
}
|
||||||
}
|
else
|
||||||
|
|
||||||
|
|
||||||
float CalcAffineReprojectionError(const vector<CvPoint>& p1, const vector<CvPoint>& p2, CvMat* transform)
|
|
||||||
{
|
|
||||||
vector<CvPoint> mapped_p1;
|
|
||||||
MapVectorAffine(p1, mapped_p1, transform);
|
|
||||||
float error = 0;
|
|
||||||
for(int i = 0; i < (int)p2.size(); i++)
|
|
||||||
{
|
{
|
||||||
error += ((p2[i].x - mapped_p1[i].x)*(p2[i].x - mapped_p1[i].x)+(p2[i].y - mapped_p1[i].y)*(p2[i].y - mapped_p1[i].y));
|
// Read Calonder classifier
|
||||||
|
FileStorage fs( argv[1], FileStorage::READ );
|
||||||
|
classifier.read( fs.root() );
|
||||||
}
|
}
|
||||||
|
|
||||||
error /= p2.size();
|
if( classifier.empty() )
|
||||||
|
{
|
||||||
return error;
|
cout << "Calonder classifier is empty" << endl;
|
||||||
}
|
return -1;
|
||||||
#endif
|
}
|
||||||
|
|
||||||
int main( int, char** )
|
// Test Calonder classifier on test image and warped one
|
||||||
{
|
Mat testImg1 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE ), testImg2, H12;
|
||||||
printf("calonder_sample is under construction\n");
|
if( testImg1.empty() )
|
||||||
return 0;
|
{
|
||||||
|
cout << "Test image can not be read." << endl;
|
||||||
#if 0
|
return -1;
|
||||||
IplImage* test_image;
|
}
|
||||||
IplImage* train_image;
|
warpPerspectiveRand( testImg1, testImg2, H12, theRNG() );
|
||||||
if (argc < 3)
|
|
||||||
{
|
|
||||||
|
// Exstract keypoints from test images
|
||||||
test_image = cvLoadImage("box_in_scene.png",0);
|
SurfFeatureDetector detector;
|
||||||
train_image = cvLoadImage("box.png ",0);
|
vector<KeyPoint> testKeypoints1; detector.detect( testImg1, testKeypoints1 );
|
||||||
if (!test_image || !train_image)
|
vector<KeyPoint> testKeypoints2; detector.detect( testImg2, testKeypoints2 );
|
||||||
{
|
vector<Point2f> testPoints1; KeyPoint::convert( testKeypoints1, testPoints1 );
|
||||||
printf("Usage: calonder_sample <train_image> <test_image>");
|
vector<Point2f> testPoints2; KeyPoint::convert( testKeypoints2, testPoints2 );
|
||||||
return 0;
|
|
||||||
}
|
// Calculate Calonder descriptors
|
||||||
}
|
int signatureSize = classifier.getSignatureSize();
|
||||||
else
|
vector<float> r1(testPoints1.size()*signatureSize), r2(testPoints2.size()*signatureSize);
|
||||||
{
|
vector<float>::iterator rit = r1.begin();
|
||||||
test_image = cvLoadImage(argv[2],0);
|
for( size_t i = 0; i < testPoints1.size(); i++ )
|
||||||
train_image = cvLoadImage(argv[1],0);
|
{
|
||||||
}
|
vector<float> s;
|
||||||
|
classifier( testImg1, testPoints1[i], s );
|
||||||
|
copy( s.begin(), s.end(), rit );
|
||||||
|
rit += s.size();
|
||||||
|
}
|
||||||
|
rit = r2.begin();
|
||||||
if (!train_image)
|
for( size_t i = 0; i < testPoints2.size(); i++ )
|
||||||
{
|
{
|
||||||
printf("Unable to load train image\n");
|
vector<float> s;
|
||||||
return 0;
|
classifier( testImg2, testPoints2[i], s );
|
||||||
}
|
copy( s.begin(), s.end(), rit );
|
||||||
|
rit += s.size();
|
||||||
if (!test_image)
|
}
|
||||||
{
|
|
||||||
printf("Unable to load test image\n");
|
Mat descriptors1(testPoints1.size(), classifier.getSignatureSize(), CV_32FC1, &r1[0] ),
|
||||||
return 0;
|
descriptors2(testPoints2.size(), classifier.getSignatureSize(), CV_32FC1, &r2[0] );
|
||||||
}
|
|
||||||
|
// Match descriptors
|
||||||
|
BruteForceMatcher<L1<float> > matcher;
|
||||||
|
matcher.add( descriptors2 );
|
||||||
CvMemStorage* storage = cvCreateMemStorage(0);
|
vector<int> matches;
|
||||||
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
|
matcher.match( descriptors1, matches );
|
||||||
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
|
|
||||||
CvSURFParams params = cvSURFParams(500, 1);
|
// Draw results
|
||||||
cvExtractSURF( test_image, 0, &imageKeypoints, &imageDescriptors, storage, params );
|
// Prepare inlier mask
|
||||||
cvExtractSURF( train_image, 0, &objectKeypoints, &objectDescriptors, storage, params );
|
vector<char> matchesMask( matches.size(), 0 );
|
||||||
|
Mat points1t; perspectiveTransform(Mat(testPoints1), points1t, H12);
|
||||||
|
vector<int>::const_iterator mit = matches.begin();
|
||||||
cv::RTreeClassifier detector;
|
for( size_t mi = 0; mi < matches.size(); mi++ )
|
||||||
int patch_width = cv::PATCH_SIZE;
|
{
|
||||||
int patch_height = cv::PATCH_SIZE;
|
if( norm(testPoints2[matches[mi]] - points1t.at<Point2f>(mi,0)) < 4 ) // inlier
|
||||||
vector<cv::BaseKeypoint> base_set;
|
matchesMask[mi] = 1;
|
||||||
int i=0;
|
}
|
||||||
CvSURFPoint* point;
|
// Draw
|
||||||
|
Mat drawImg;
|
||||||
|
drawMatches( testImg1, testKeypoints1, testImg2, testKeypoints2, matches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask );
|
||||||
for (i=0;i<(n_points > 0 ? n_points : objectKeypoints->total);i++)
|
string winName = "Matches";
|
||||||
{
|
namedWindow( winName, WINDOW_AUTOSIZE );
|
||||||
point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,i);
|
imshow( winName, drawImg );
|
||||||
base_set.push_back(cv::BaseKeypoint(point->pt.x,point->pt.y,train_image));
|
waitKey();
|
||||||
}
|
|
||||||
|
|
||||||
//Detector training
|
|
||||||
cv::RNG rng( cvGetTickCount() );
|
|
||||||
cv::PatchGenerator gen(0,255,2,false,0.7,1.3,-CV_PI/3,CV_PI/3,-CV_PI/3,CV_PI/3);
|
|
||||||
|
|
||||||
printf("RTree Classifier training...\n");
|
|
||||||
detector.train(base_set,rng,gen,24,cv::DEFAULT_DEPTH,2000,(int)base_set.size(),detector.DEFAULT_NUM_QUANT_BITS);
|
|
||||||
printf("Done\n");
|
|
||||||
|
|
||||||
float* signature = new float[detector.original_num_classes()];
|
|
||||||
float* best_corr;
|
|
||||||
int* best_corr_idx;
|
|
||||||
if (imageKeypoints->total > 0)
|
|
||||||
{
|
|
||||||
best_corr = new float[imageKeypoints->total];
|
|
||||||
best_corr_idx = new int[imageKeypoints->total];
|
|
||||||
}
|
|
||||||
|
|
||||||
for(i=0; i < imageKeypoints->total; i++)
|
|
||||||
{
|
|
||||||
point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,i);
|
|
||||||
int part_idx = -1;
|
|
||||||
float prob = 0.0f;
|
|
||||||
|
|
||||||
|
|
||||||
CvRect roi = cvRect((int)(point->pt.x) - patch_width/2,(int)(point->pt.y) - patch_height/2, patch_width, patch_height);
|
|
||||||
cvSetImageROI(test_image, roi);
|
|
||||||
roi = cvGetImageROI(test_image);
|
|
||||||
if(roi.width != patch_width || roi.height != patch_height)
|
|
||||||
{
|
|
||||||
best_corr_idx[i] = part_idx;
|
|
||||||
best_corr[i] = prob;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
cvSetImageROI(test_image, roi);
|
|
||||||
IplImage* roi_image = cvCreateImage(cvSize(roi.width, roi.height), test_image->depth, test_image->nChannels);
|
|
||||||
cvCopy(test_image,roi_image);
|
|
||||||
|
|
||||||
detector.getSignature(roi_image, signature);
|
|
||||||
|
|
||||||
|
|
||||||
for (int j = 0; j< detector.original_num_classes();j++)
|
|
||||||
{
|
|
||||||
if (prob < signature[j])
|
|
||||||
{
|
|
||||||
part_idx = j;
|
|
||||||
prob = signature[j];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
best_corr_idx[i] = part_idx;
|
|
||||||
best_corr[i] = prob;
|
|
||||||
|
|
||||||
|
|
||||||
if (roi_image)
|
|
||||||
cvReleaseImage(&roi_image);
|
|
||||||
}
|
|
||||||
cvResetImageROI(test_image);
|
|
||||||
}
|
|
||||||
|
|
||||||
float min_prob = 0.0f;
|
|
||||||
vector<CvPoint> object;
|
|
||||||
vector<CvPoint> features;
|
|
||||||
|
|
||||||
for (int j=0;j<objectKeypoints->total;j++)
|
|
||||||
{
|
|
||||||
float prob = 0.0f;
|
|
||||||
int idx = -1;
|
|
||||||
for (i = 0; i<imageKeypoints->total;i++)
|
|
||||||
{
|
|
||||||
if ((best_corr_idx[i]!=j)||(best_corr[i] < min_prob))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (best_corr[i] > prob)
|
|
||||||
{
|
|
||||||
prob = best_corr[i];
|
|
||||||
idx = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (idx >=0)
|
|
||||||
{
|
|
||||||
point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,j);
|
|
||||||
object.push_back(cvPoint((int)point->pt.x,(int)point->pt.y));
|
|
||||||
point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,idx);
|
|
||||||
features.push_back(cvPoint((int)point->pt.x,(int)point->pt.y));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ((int)object.size() > 3)
|
|
||||||
{
|
|
||||||
CvMat* affine = cvCreateMat(2, 3, CV_32FC1);
|
|
||||||
FindAffineTransform(object,features,affine);
|
|
||||||
|
|
||||||
vector<CvPoint> corners;
|
|
||||||
vector<CvPoint> mapped_corners;
|
|
||||||
corners.push_back(cvPoint(0,0));
|
|
||||||
corners.push_back(cvPoint(0,train_image->height));
|
|
||||||
corners.push_back(cvPoint(train_image->width,0));
|
|
||||||
corners.push_back(cvPoint(train_image->width,train_image->height));
|
|
||||||
MapVectorAffine(corners,mapped_corners,affine);
|
|
||||||
|
|
||||||
//Drawing the result
|
|
||||||
IplImage* result = cvCreateImage(cvSize(test_image->width > train_image->width ? test_image->width : train_image->width,
|
|
||||||
train_image->height + test_image->height),
|
|
||||||
test_image->depth, test_image->nChannels);
|
|
||||||
cvSetImageROI(result,cvRect(0,0,train_image->width, train_image->height));
|
|
||||||
cvCopy(train_image,result);
|
|
||||||
cvResetImageROI(result);
|
|
||||||
cvSetImageROI(result,cvRect(0,train_image->height,test_image->width, test_image->height));
|
|
||||||
cvCopy(test_image,result);
|
|
||||||
cvResetImageROI(result);
|
|
||||||
|
|
||||||
for (int i=0;i<(int)features.size();i++)
|
|
||||||
{
|
|
||||||
cvLine(result,object[i],cvPoint(features[i].x,features[i].y+train_image->height),cvScalar(255));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (draw_border)
|
|
||||||
{
|
|
||||||
cvLine(result,cvPoint(mapped_corners[0].x, mapped_corners[0].y+train_image->height),
|
|
||||||
cvPoint(mapped_corners[1].x, mapped_corners[1].y+train_image->height),cvScalar(150),3);
|
|
||||||
cvLine(result,cvPoint(mapped_corners[0].x, mapped_corners[0].y+train_image->height),
|
|
||||||
cvPoint(mapped_corners[2].x, mapped_corners[2].y+train_image->height),cvScalar(150),3);
|
|
||||||
cvLine(result,cvPoint(mapped_corners[1].x, mapped_corners[1].y+train_image->height),
|
|
||||||
cvPoint(mapped_corners[3].x, mapped_corners[3].y+train_image->height),cvScalar(150),3);
|
|
||||||
cvLine(result,cvPoint(mapped_corners[2].x, mapped_corners[2].y+train_image->height),
|
|
||||||
cvPoint(mapped_corners[3].x, mapped_corners[3].y+train_image->height),cvScalar(150),3);
|
|
||||||
}
|
|
||||||
|
|
||||||
cvSaveImage("Result.jpg",result);
|
|
||||||
cvNamedWindow("Result",0);
|
|
||||||
cvShowImage("Result",result);
|
|
||||||
cvWaitKey();
|
|
||||||
cvReleaseMat(&affine);
|
|
||||||
cvReleaseImage(&result);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
printf("Unable to find correspondence\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if (signature)
|
|
||||||
delete[] signature;
|
|
||||||
if (best_corr)
|
|
||||||
delete[] best_corr;
|
|
||||||
cvReleaseMemStorage(&storage);
|
|
||||||
cvReleaseImage(&train_image);
|
|
||||||
cvReleaseImage(&test_image);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user