mirror of
https://github.com/opencv/opencv.git
synced 2024-11-26 04:00:30 +08:00
fixed BruteForceMatcher_GPU (fails if input data is empty)
updated BruteForceMatcher_GPU test
This commit is contained in:
parent
cecfde309c
commit
eda8416358
@ -1301,7 +1301,7 @@ namespace cv
|
|||||||
const GpuMat& maskCollection);
|
const GpuMat& maskCollection);
|
||||||
|
|
||||||
// Download trainIdx, imgIdx and distance to CPU vector with DMatch
|
// Download trainIdx, imgIdx and distance to CPU vector with DMatch
|
||||||
static void matchDownload(const GpuMat& trainIdx, GpuMat& imgIdx, const GpuMat& distance,
|
static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
|
||||||
std::vector<DMatch>& matches);
|
std::vector<DMatch>& matches);
|
||||||
|
|
||||||
// Find one best match from train collection for each query descriptor.
|
// Find one best match from train collection for each query descriptor.
|
||||||
|
@ -59,7 +59,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const Gpu
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, const GpuMat&, vector<DMatch>&, const GpuMat&) { throw_nogpu(); }
|
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, const GpuMat&, vector<DMatch>&, const GpuMat&) { throw_nogpu(); }
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat&, GpuMat&, const vector<GpuMat>&) { throw_nogpu(); }
|
void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat&, GpuMat&, const vector<GpuMat>&) { throw_nogpu(); }
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); }
|
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); }
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_nogpu(); }
|
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_nogpu(); }
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, std::vector<DMatch>&, const std::vector<GpuMat>&) { throw_nogpu(); }
|
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, std::vector<DMatch>&, const std::vector<GpuMat>&) { throw_nogpu(); }
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&) { throw_nogpu(); }
|
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&) { throw_nogpu(); }
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }
|
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||||
@ -142,6 +142,9 @@ bool cv::gpu::BruteForceMatcher_GPU_base::isMaskSupported() const
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||||
GpuMat& trainIdx, GpuMat& distance, const GpuMat& mask)
|
GpuMat& trainIdx, GpuMat& distance, const GpuMat& mask)
|
||||||
{
|
{
|
||||||
|
if (queryDescs.empty() || trainDescs.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
using namespace cv::gpu::bfmatcher;
|
using namespace cv::gpu::bfmatcher;
|
||||||
|
|
||||||
typedef void (*match_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
|
typedef void (*match_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
|
||||||
@ -159,7 +162,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& queryDescs,
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_Assert(queryDescs.channels() == 1);
|
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||||
CV_Assert(trainDescs.cols == queryDescs.cols && trainDescs.type() == queryDescs.type());
|
CV_Assert(trainDescs.cols == queryDescs.cols && trainDescs.type() == queryDescs.type());
|
||||||
|
|
||||||
const int nQuery = queryDescs.rows;
|
const int nQuery = queryDescs.rows;
|
||||||
@ -178,6 +181,12 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& queryDescs,
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& distance,
|
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& distance,
|
||||||
vector<DMatch>& matches)
|
vector<DMatch>& matches)
|
||||||
{
|
{
|
||||||
|
if (trainIdx.empty() || distance.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
CV_Assert(trainIdx.type() == CV_32SC1 && trainIdx.isContinuous());
|
||||||
|
CV_Assert(distance.type() == CV_32FC1 && distance.isContinuous() && distance.size().area() == trainIdx.size().area());
|
||||||
|
|
||||||
const int nQuery = trainIdx.cols;
|
const int nQuery = trainIdx.cols;
|
||||||
|
|
||||||
Mat trainIdxCPU = trainIdx;
|
Mat trainIdxCPU = trainIdx;
|
||||||
@ -213,6 +222,9 @@ void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& queryDescs, const
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
|
void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
|
||||||
const vector<GpuMat>& masks)
|
const vector<GpuMat>& masks)
|
||||||
{
|
{
|
||||||
|
if (empty())
|
||||||
|
return;
|
||||||
|
|
||||||
if (masks.empty())
|
if (masks.empty())
|
||||||
{
|
{
|
||||||
Mat trainCollectionCPU(1, trainDescCollection.size(), CV_8UC(sizeof(DevMem2D)));
|
Mat trainCollectionCPU(1, trainDescCollection.size(), CV_8UC(sizeof(DevMem2D)));
|
||||||
@ -238,7 +250,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollect
|
|||||||
const GpuMat& trainDescs = trainDescCollection[i];
|
const GpuMat& trainDescs = trainDescCollection[i];
|
||||||
const GpuMat& mask = masks[i];
|
const GpuMat& mask = masks[i];
|
||||||
|
|
||||||
CV_Assert(mask.empty() || (mask.type() == CV_8UC1));
|
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.cols == trainDescs.rows));
|
||||||
|
|
||||||
trainCollectionCPU.ptr<DevMem2D>(0)[i] = trainDescs;
|
trainCollectionCPU.ptr<DevMem2D>(0)[i] = trainDescs;
|
||||||
|
|
||||||
@ -253,6 +265,9 @@ void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollect
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,
|
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,
|
||||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, const GpuMat& maskCollection)
|
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, const GpuMat& maskCollection)
|
||||||
{
|
{
|
||||||
|
if (queryDescs.empty() || trainCollection.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
using namespace cv::gpu::bfmatcher;
|
using namespace cv::gpu::bfmatcher;
|
||||||
|
|
||||||
typedef void (*match_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
|
typedef void (*match_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
|
||||||
@ -273,7 +288,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& queryDes
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_Assert(queryDescs.channels() == 1);
|
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||||
|
|
||||||
const int nQuery = queryDescs.rows;
|
const int nQuery = queryDescs.rows;
|
||||||
|
|
||||||
@ -287,9 +302,16 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& queryDes
|
|||||||
func(queryDescs, trainCollection, maskCollection, trainIdx, imgIdx, distance);
|
func(queryDescs, trainCollection, maskCollection, trainIdx, imgIdx, distance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, GpuMat& imgIdx,
|
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx,
|
||||||
const GpuMat& distance, vector<DMatch>& matches)
|
const GpuMat& distance, vector<DMatch>& matches)
|
||||||
{
|
{
|
||||||
|
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
CV_Assert(trainIdx.type() == CV_32SC1 && trainIdx.isContinuous());
|
||||||
|
CV_Assert(imgIdx.type() == CV_32SC1 && imgIdx.isContinuous());
|
||||||
|
CV_Assert(distance.type() == CV_32FC1 && distance.isContinuous());
|
||||||
|
|
||||||
const int nQuery = trainIdx.cols;
|
const int nQuery = trainIdx.cols;
|
||||||
|
|
||||||
Mat trainIdxCPU = trainIdx;
|
Mat trainIdxCPU = trainIdx;
|
||||||
@ -338,6 +360,9 @@ void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& queryDescs, vector
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask)
|
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask)
|
||||||
{
|
{
|
||||||
|
if (queryDescs.empty() || trainDescs.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
using namespace cv::gpu::bfmatcher;
|
using namespace cv::gpu::bfmatcher;
|
||||||
|
|
||||||
typedef void (*match_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
|
typedef void (*match_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
|
||||||
@ -355,7 +380,8 @@ void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, con
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_Assert(queryDescs.channels() == 1);
|
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||||
|
CV_Assert(trainDescs.type() == queryDescs.type() && trainDescs.cols == queryDescs.cols);
|
||||||
|
|
||||||
const int nQuery = queryDescs.rows;
|
const int nQuery = queryDescs.rows;
|
||||||
const int nTrain = trainDescs.rows;
|
const int nTrain = trainDescs.rows;
|
||||||
@ -375,6 +401,12 @@ void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, con
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
|
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
|
||||||
vector< vector<DMatch> >& matches, bool compactResult)
|
vector< vector<DMatch> >& matches, bool compactResult)
|
||||||
{
|
{
|
||||||
|
if (trainIdx.empty() || distance.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
CV_Assert(trainIdx.type() == CV_32SC1);
|
||||||
|
CV_Assert(distance.type() == CV_32FC1 && distance.size() == trainIdx.size());
|
||||||
|
|
||||||
const int nQuery = distance.rows;
|
const int nQuery = distance.rows;
|
||||||
const int k = trainIdx.cols;
|
const int k = trainIdx.cols;
|
||||||
|
|
||||||
@ -434,6 +466,9 @@ namespace
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs,
|
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs,
|
||||||
vector< vector<DMatch> >& matches, int knn, const vector<GpuMat>& masks, bool compactResult)
|
vector< vector<DMatch> >& matches, int knn, const vector<GpuMat>& masks, bool compactResult)
|
||||||
{
|
{
|
||||||
|
if (queryDescs.empty() || empty())
|
||||||
|
return;
|
||||||
|
|
||||||
vector< vector<DMatch> > curMatches;
|
vector< vector<DMatch> > curMatches;
|
||||||
vector<DMatch> temp;
|
vector<DMatch> temp;
|
||||||
temp.reserve(2 * knn);
|
temp.reserve(2 * knn);
|
||||||
@ -476,6 +511,9 @@ void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs,
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||||
GpuMat& trainIdx, GpuMat& nMatches, GpuMat& distance, float maxDistance, const GpuMat& mask)
|
GpuMat& trainIdx, GpuMat& nMatches, GpuMat& distance, float maxDistance, const GpuMat& mask)
|
||||||
{
|
{
|
||||||
|
if (queryDescs.empty() || trainDescs.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
using namespace cv::gpu::bfmatcher;
|
using namespace cv::gpu::bfmatcher;
|
||||||
|
|
||||||
typedef void (*radiusMatch_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
|
typedef void (*radiusMatch_caller_t)(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
|
||||||
@ -498,7 +536,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs,
|
|||||||
const int nQuery = queryDescs.rows;
|
const int nQuery = queryDescs.rows;
|
||||||
const int nTrain = trainDescs.rows;
|
const int nTrain = trainDescs.rows;
|
||||||
|
|
||||||
CV_Assert(queryDescs.channels() == 1);
|
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||||
CV_Assert(trainDescs.type() == queryDescs.type() && trainDescs.cols == queryDescs.cols);
|
CV_Assert(trainDescs.type() == queryDescs.type() && trainDescs.cols == queryDescs.cols);
|
||||||
CV_Assert(trainIdx.empty() || trainIdx.rows == nQuery);
|
CV_Assert(trainIdx.empty() || trainIdx.rows == nQuery);
|
||||||
|
|
||||||
@ -519,6 +557,13 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs,
|
|||||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& nMatches,
|
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& nMatches,
|
||||||
const GpuMat& distance, std::vector< std::vector<DMatch> >& matches, bool compactResult)
|
const GpuMat& distance, std::vector< std::vector<DMatch> >& matches, bool compactResult)
|
||||||
{
|
{
|
||||||
|
if (trainIdx.empty() || nMatches.empty() || distance.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
CV_Assert(trainIdx.type() == CV_32SC1);
|
||||||
|
CV_Assert(nMatches.type() == CV_32SC1 && nMatches.isContinuous() && nMatches.size().area() == trainIdx.rows);
|
||||||
|
CV_Assert(distance.type() == CV_32FC1 && distance.size() == trainIdx.size());
|
||||||
|
|
||||||
const int nQuery = trainIdx.rows;
|
const int nQuery = trainIdx.rows;
|
||||||
|
|
||||||
Mat trainIdxCPU = trainIdx;
|
Mat trainIdxCPU = trainIdx;
|
||||||
@ -570,9 +615,11 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, vector< vector<DMatch> >& matches,
|
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, vector< vector<DMatch> >& matches,
|
||||||
float maxDistance, const vector<GpuMat>& masks, bool compactResult)
|
float maxDistance, const vector<GpuMat>& masks, bool compactResult)
|
||||||
|
|
||||||
{
|
{
|
||||||
|
if (queryDescs.empty() || empty())
|
||||||
|
return;
|
||||||
|
|
||||||
matches.resize(queryDescs.rows);
|
matches.resize(queryDescs.rows);
|
||||||
|
|
||||||
vector< vector<DMatch> > curMatches;
|
vector< vector<DMatch> > curMatches;
|
||||||
|
@ -50,131 +50,453 @@ using namespace std;
|
|||||||
class CV_GpuBruteForceMatcherTest : public CvTest
|
class CV_GpuBruteForceMatcherTest : public CvTest
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CV_GpuBruteForceMatcherTest() : CvTest( "GPU-BruteForceMatcher", "BruteForceMatcher" ) {}
|
CV_GpuBruteForceMatcherTest() :
|
||||||
|
CvTest( "GPU-BruteForceMatcher", "BruteForceMatcher" ), badPart(0.01f)
|
||||||
protected:
|
|
||||||
void run(int)
|
|
||||||
{
|
{
|
||||||
try
|
}
|
||||||
|
protected:
|
||||||
|
static const int dim = 500;
|
||||||
|
static const int queryDescCount = 300; // must be even number because we split train data in some cases in two
|
||||||
|
static const int countFactor = 4; // do not change it
|
||||||
|
const float badPart;
|
||||||
|
|
||||||
|
virtual void run(int);
|
||||||
|
void generateData(GpuMat& query, GpuMat& train);
|
||||||
|
|
||||||
|
void emptyDataTest();
|
||||||
|
void matchTest(const GpuMat& query, const GpuMat& train);
|
||||||
|
void knnMatchTest(const GpuMat& query, const GpuMat& train);
|
||||||
|
void radiusMatchTest(const GpuMat& query, const GpuMat& train);
|
||||||
|
|
||||||
|
BruteForceMatcher_GPU< L2<float> > dmatcher;
|
||||||
|
};
|
||||||
|
|
||||||
|
void CV_GpuBruteForceMatcherTest::emptyDataTest()
|
||||||
|
{
|
||||||
|
GpuMat queryDescriptors, trainDescriptors, mask;
|
||||||
|
vector<GpuMat> trainDescriptorCollection, masks;
|
||||||
|
vector<DMatch> matches;
|
||||||
|
vector< vector<DMatch> > vmatches;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
dmatcher.match(queryDescriptors, trainDescriptors, matches, mask);
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "match() on empty descriptors must not generate exception (1).\n" );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
dmatcher.knnMatch(queryDescriptors, trainDescriptors, vmatches, 2, mask);
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "knnMatch() on empty descriptors must not generate exception (1).\n" );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
dmatcher.radiusMatch(queryDescriptors, trainDescriptors, vmatches, 10.f, mask);
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "radiusMatch() on empty descriptors must not generate exception (1).\n" );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
dmatcher.add(trainDescriptorCollection);
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "add() on empty descriptors must not generate exception.\n" );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
dmatcher.match(queryDescriptors, matches, masks);
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "match() on empty descriptors must not generate exception (2).\n" );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
dmatcher.knnMatch(queryDescriptors, vmatches, 2, masks);
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "knnMatch() on empty descriptors must not generate exception (2).\n" );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
dmatcher.radiusMatch( queryDescriptors, vmatches, 10.f, masks );
|
||||||
|
}
|
||||||
|
catch(...)
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "radiusMatch() on empty descriptors must not generate exception (2).\n" );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void CV_GpuBruteForceMatcherTest::generateData( GpuMat& queryGPU, GpuMat& trainGPU )
|
||||||
|
{
|
||||||
|
Mat query, train;
|
||||||
|
RNG rng(*ts->get_rng());
|
||||||
|
|
||||||
|
// Generate query descriptors randomly.
|
||||||
|
// Descriptor vector elements are integer values.
|
||||||
|
Mat buf( queryDescCount, dim, CV_32SC1 );
|
||||||
|
rng.fill( buf, RNG::UNIFORM, Scalar::all(0), Scalar(3) );
|
||||||
|
buf.convertTo( query, CV_32FC1 );
|
||||||
|
|
||||||
|
// Generate train decriptors as follows:
|
||||||
|
// copy each query descriptor to train set countFactor times
|
||||||
|
// and perturb some one element of the copied descriptors in
|
||||||
|
// in ascending order. General boundaries of the perturbation
|
||||||
|
// are (0.f, 1.f).
|
||||||
|
train.create( query.rows*countFactor, query.cols, CV_32FC1 );
|
||||||
|
float step = 1.f / countFactor;
|
||||||
|
for( int qIdx = 0; qIdx < query.rows; qIdx++ )
|
||||||
|
{
|
||||||
|
Mat queryDescriptor = query.row(qIdx);
|
||||||
|
for( int c = 0; c < countFactor; c++ )
|
||||||
{
|
{
|
||||||
BruteForceMatcher< L2<float> > matcherCPU;
|
int tIdx = qIdx * countFactor + c;
|
||||||
BruteForceMatcher_GPU< L2<float> > matcherGPU;
|
Mat trainDescriptor = train.row(tIdx);
|
||||||
|
queryDescriptor.copyTo( trainDescriptor );
|
||||||
|
int elem = rng(dim);
|
||||||
|
float diff = rng.uniform( step*c, step*(c+1) );
|
||||||
|
trainDescriptor.at<float>(0, elem) += diff;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vector<DMatch> matchesCPU, matchesGPU;
|
queryGPU.upload(query);
|
||||||
vector< vector<DMatch> > knnMatchesCPU, knnMatchesGPU;
|
trainGPU.upload(train);
|
||||||
vector< vector<DMatch> > radiusMatchesCPU, radiusMatchesGPU;
|
}
|
||||||
|
|
||||||
RNG rng(*ts->get_rng());
|
void CV_GpuBruteForceMatcherTest::matchTest( const GpuMat& query, const GpuMat& train )
|
||||||
|
{
|
||||||
|
dmatcher.clear();
|
||||||
|
|
||||||
const int desc_len = rng.uniform(40, 300);
|
// test const version of match()
|
||||||
|
{
|
||||||
|
vector<DMatch> matches;
|
||||||
|
dmatcher.match( query, train, matches );
|
||||||
|
|
||||||
Mat queryCPU(rng.uniform(100, 300), desc_len, CV_32F);
|
if( (int)matches.size() != queryDescCount )
|
||||||
rng.fill(queryCPU, cv::RNG::UNIFORM, cv::Scalar::all(0.0), cv::Scalar::all(10.0));
|
{
|
||||||
GpuMat queryGPU(queryCPU);
|
ts->printf(CvTS::LOG, "Incorrect matches count while test match() function (1).\n");
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
const int nTrains = rng.uniform(1, 5);
|
}
|
||||||
|
else
|
||||||
vector<Mat> trainsCPU(nTrains);
|
{
|
||||||
vector<GpuMat> trainsGPU(nTrains);
|
int badCount = 0;
|
||||||
|
for( size_t i = 0; i < matches.size(); i++ )
|
||||||
vector<Mat> masksCPU(nTrains);
|
|
||||||
vector<GpuMat> masksGPU(nTrains);
|
|
||||||
|
|
||||||
for (int i = 0; i < nTrains; ++i)
|
|
||||||
{
|
{
|
||||||
Mat train(rng.uniform(100, 300), desc_len, CV_32F);
|
DMatch match = matches[i];
|
||||||
rng.fill(train, cv::RNG::UNIFORM, cv::Scalar::all(0.0), cv::Scalar::all(10.0));
|
if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor) || (match.imgIdx != 0) )
|
||||||
|
badCount++;
|
||||||
|
}
|
||||||
|
if( (float)badCount > (float)queryDescCount*badPart )
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "%f - too large bad matches part while test match() function (1).\n",
|
||||||
|
(float)badCount/(float)queryDescCount );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
trainsCPU[i] = train;
|
// test version of match() with add()
|
||||||
trainsGPU[i].upload(train);
|
{
|
||||||
|
vector<DMatch> matches;
|
||||||
|
// make add() twice to test such case
|
||||||
|
dmatcher.add( vector<GpuMat>(1,train.rowRange(0, train.rows/2)) );
|
||||||
|
dmatcher.add( vector<GpuMat>(1,train.rowRange(train.rows/2, train.rows)) );
|
||||||
|
// prepare masks (make first nearest match illegal)
|
||||||
|
vector<GpuMat> masks(2);
|
||||||
|
for(int mi = 0; mi < 2; mi++ )
|
||||||
|
{
|
||||||
|
masks[mi] = GpuMat(query.rows, train.rows/2, CV_8UC1, Scalar::all(1));
|
||||||
|
for( int di = 0; di < queryDescCount/2; di++ )
|
||||||
|
masks[mi].col(di*countFactor).setTo(Scalar::all(0));
|
||||||
|
}
|
||||||
|
|
||||||
bool with_mask = rng.uniform(0, 10) < 5;
|
dmatcher.match( query, matches, masks );
|
||||||
if (with_mask)
|
|
||||||
|
if( (int)matches.size() != queryDescCount )
|
||||||
|
{
|
||||||
|
ts->printf(CvTS::LOG, "Incorrect matches count while test match() function (2).\n");
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int badCount = 0;
|
||||||
|
for( size_t i = 0; i < matches.size(); i++ )
|
||||||
|
{
|
||||||
|
DMatch match = matches[i];
|
||||||
|
int shift = dmatcher.isMaskSupported() ? 1 : 0;
|
||||||
{
|
{
|
||||||
Mat mask(queryCPU.rows, train.rows, CV_8U);
|
if( i < queryDescCount/2 )
|
||||||
rng.fill(mask, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(200));
|
{
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor + shift) || (match.imgIdx != 0) )
|
||||||
masksCPU[i] = mask;
|
badCount++;
|
||||||
masksGPU[i].upload(mask);
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != ((int)i-queryDescCount/2)*countFactor + shift) || (match.imgIdx != 1) )
|
||||||
|
badCount++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if( (float)badCount > (float)queryDescCount*badPart )
|
||||||
matcherCPU.add(trainsCPU);
|
|
||||||
matcherGPU.add(trainsGPU);
|
|
||||||
|
|
||||||
matcherCPU.match(queryCPU, matchesCPU, masksCPU);
|
|
||||||
matcherGPU.match(queryGPU, matchesGPU, masksGPU);
|
|
||||||
|
|
||||||
if (!compareMatches(matchesCPU, matchesGPU))
|
|
||||||
{
|
{
|
||||||
ts->printf(CvTS::LOG, "Match FAIL\n");
|
ts->printf( CvTS::LOG, "%f - too large bad matches part while test match() function (2).\n",
|
||||||
ts->set_failed_test_info(CvTS::FAIL_MISMATCH);
|
(float)badCount/(float)queryDescCount );
|
||||||
return;
|
ts->set_failed_test_info( CvTS::FAIL_BAD_ACCURACY );
|
||||||
}
|
|
||||||
|
|
||||||
const int knn = rng.uniform(3, 10);
|
|
||||||
|
|
||||||
matcherCPU.knnMatch(queryCPU, knnMatchesCPU, knn, masksCPU, true);
|
|
||||||
matcherGPU.knnMatch(queryGPU, knnMatchesGPU, knn, masksGPU, true);
|
|
||||||
|
|
||||||
if (!compareMatches(knnMatchesCPU, knnMatchesGPU))
|
|
||||||
{
|
|
||||||
ts->printf(CvTS::LOG, "KNN Match FAIL\n");
|
|
||||||
ts->set_failed_test_info(CvTS::FAIL_MISMATCH);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const float maxDistance = rng.uniform(25.0f, 65.0f);
|
|
||||||
|
|
||||||
matcherCPU.radiusMatch(queryCPU, radiusMatchesCPU, maxDistance, masksCPU, true);
|
|
||||||
matcherGPU.radiusMatch(queryGPU, radiusMatchesGPU, maxDistance, masksGPU, true);
|
|
||||||
|
|
||||||
if (!compareMatches(radiusMatchesCPU, radiusMatchesGPU))
|
|
||||||
{
|
|
||||||
ts->printf(CvTS::LOG, "Radius Match FAIL\n");
|
|
||||||
ts->set_failed_test_info(CvTS::FAIL_MISMATCH);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (const cv::Exception& e)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CV_GpuBruteForceMatcherTest::knnMatchTest( const GpuMat& query, const GpuMat& train )
|
||||||
|
{
|
||||||
|
dmatcher.clear();
|
||||||
|
|
||||||
|
// test const version of knnMatch()
|
||||||
|
{
|
||||||
|
const int knn = 3;
|
||||||
|
|
||||||
|
vector< vector<DMatch> > matches;
|
||||||
|
dmatcher.knnMatch( query, train, matches, knn );
|
||||||
|
|
||||||
|
if( (int)matches.size() != queryDescCount )
|
||||||
{
|
{
|
||||||
if (!check_and_treat_gpu_exception(e, ts))
|
ts->printf(CvTS::LOG, "Incorrect matches count while test knnMatch() function (1).\n");
|
||||||
throw;
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
else
|
||||||
ts->set_failed_test_info(CvTS::OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
static void convertMatches(const vector< vector<DMatch> >& knnMatches, vector<DMatch>& matches)
|
|
||||||
{
|
|
||||||
matches.clear();
|
|
||||||
for (size_t i = 0; i < knnMatches.size(); ++i)
|
|
||||||
copy(knnMatches[i].begin(), knnMatches[i].end(), back_inserter(matches));
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DMatchEqual : public binary_function<DMatch, DMatch, bool>
|
|
||||||
{
|
|
||||||
bool operator()(const DMatch& m1, const DMatch& m2) const
|
|
||||||
{
|
{
|
||||||
return m1.imgIdx == m2.imgIdx && m1.queryIdx == m2.queryIdx && m1.trainIdx == m2.trainIdx;
|
int badCount = 0;
|
||||||
|
for( size_t i = 0; i < matches.size(); i++ )
|
||||||
|
{
|
||||||
|
if( (int)matches[i].size() != knn )
|
||||||
|
badCount++;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int localBadCount = 0;
|
||||||
|
for( int k = 0; k < knn; k++ )
|
||||||
|
{
|
||||||
|
DMatch match = matches[i][k];
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor+k) || (match.imgIdx != 0) )
|
||||||
|
localBadCount++;
|
||||||
|
}
|
||||||
|
badCount += localBadCount > 0 ? 1 : 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if( (float)badCount > (float)queryDescCount*badPart )
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "%f - too large bad matches part while test knnMatch() function (1).\n",
|
||||||
|
(float)badCount/(float)queryDescCount );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
static bool compareMatches(const vector<DMatch>& matches1, const vector<DMatch>& matches2)
|
|
||||||
{
|
|
||||||
if (matches1.size() != matches2.size())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return equal(matches1.begin(), matches1.end(), matches2.begin(), DMatchEqual());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool compareMatches(const vector< vector<DMatch> >& matches1, const vector< vector<DMatch> >& matches2)
|
// test version of knnMatch() with add()
|
||||||
{
|
{
|
||||||
vector<DMatch> m1, m2;
|
const int knn = 2;
|
||||||
convertMatches(matches1, m1);
|
vector<vector<DMatch> > matches;
|
||||||
convertMatches(matches2, m2);
|
// make add() twice to test such case
|
||||||
return compareMatches(m1, m2);
|
dmatcher.add( vector<GpuMat>(1,train.rowRange(0, train.rows/2)) );
|
||||||
|
dmatcher.add( vector<GpuMat>(1,train.rowRange(train.rows/2, train.rows)) );
|
||||||
|
// prepare masks (make first nearest match illegal)
|
||||||
|
vector<GpuMat> masks(2);
|
||||||
|
for(int mi = 0; mi < 2; mi++ )
|
||||||
|
{
|
||||||
|
masks[mi] = GpuMat(query.rows, train.rows/2, CV_8UC1, Scalar::all(1));
|
||||||
|
for( int di = 0; di < queryDescCount/2; di++ )
|
||||||
|
masks[mi].col(di*countFactor).setTo(Scalar::all(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
dmatcher.knnMatch( query, matches, knn, masks );
|
||||||
|
|
||||||
|
if( (int)matches.size() != queryDescCount )
|
||||||
|
{
|
||||||
|
ts->printf(CvTS::LOG, "Incorrect matches count while test knnMatch() function (2).\n");
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int badCount = 0;
|
||||||
|
int shift = dmatcher.isMaskSupported() ? 1 : 0;
|
||||||
|
for( size_t i = 0; i < matches.size(); i++ )
|
||||||
|
{
|
||||||
|
if( (int)matches[i].size() != knn )
|
||||||
|
badCount++;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int localBadCount = 0;
|
||||||
|
for( int k = 0; k < knn; k++ )
|
||||||
|
{
|
||||||
|
DMatch match = matches[i][k];
|
||||||
|
{
|
||||||
|
if( i < queryDescCount/2 )
|
||||||
|
{
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor + k + shift) ||
|
||||||
|
(match.imgIdx != 0) )
|
||||||
|
localBadCount++;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != ((int)i-queryDescCount/2)*countFactor + k + shift) ||
|
||||||
|
(match.imgIdx != 1) )
|
||||||
|
localBadCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
badCount += localBadCount > 0 ? 1 : 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if( (float)badCount > (float)queryDescCount*badPart )
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "%f - too large bad matches part while test knnMatch() function (2).\n",
|
||||||
|
(float)badCount/(float)queryDescCount );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_BAD_ACCURACY );
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} brute_force_matcher_test;
|
}
|
||||||
|
|
||||||
|
void CV_GpuBruteForceMatcherTest::radiusMatchTest( const GpuMat& query, const GpuMat& train )
|
||||||
|
{
|
||||||
|
dmatcher.clear();
|
||||||
|
// test const version of match()
|
||||||
|
{
|
||||||
|
const float radius = 1.f/countFactor;
|
||||||
|
vector< vector<DMatch> > matches;
|
||||||
|
dmatcher.radiusMatch( query, train, matches, radius );
|
||||||
|
|
||||||
|
if( (int)matches.size() != queryDescCount )
|
||||||
|
{
|
||||||
|
ts->printf(CvTS::LOG, "Incorrect matches count while test radiusMatch() function (1).\n");
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int badCount = 0;
|
||||||
|
for( size_t i = 0; i < matches.size(); i++ )
|
||||||
|
{
|
||||||
|
if( (int)matches[i].size() != 1 )
|
||||||
|
badCount++;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
DMatch match = matches[i][0];
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor) || (match.imgIdx != 0) )
|
||||||
|
badCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if( (float)badCount > (float)queryDescCount*badPart )
|
||||||
|
{
|
||||||
|
ts->printf( CvTS::LOG, "%f - too large bad matches part while test radiusMatch() function (1).\n",
|
||||||
|
(float)badCount/(float)queryDescCount );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test version of match() with add()
|
||||||
|
{
|
||||||
|
int n = 3;
|
||||||
|
const float radius = 1.f/countFactor * n;
|
||||||
|
vector< vector<DMatch> > matches;
|
||||||
|
// make add() twice to test such case
|
||||||
|
dmatcher.add( vector<GpuMat>(1,train.rowRange(0, train.rows/2)) );
|
||||||
|
dmatcher.add( vector<GpuMat>(1,train.rowRange(train.rows/2, train.rows)) );
|
||||||
|
// prepare masks (make first nearest match illegal)
|
||||||
|
vector<GpuMat> masks(2);
|
||||||
|
for(int mi = 0; mi < 2; mi++ )
|
||||||
|
{
|
||||||
|
masks[mi] = GpuMat(query.rows, train.rows/2, CV_8UC1, Scalar::all(1));
|
||||||
|
for( int di = 0; di < queryDescCount/2; di++ )
|
||||||
|
masks[mi].col(di*countFactor).setTo(Scalar::all(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
dmatcher.radiusMatch( query, matches, radius, masks );
|
||||||
|
|
||||||
|
int curRes = CvTS::OK;
|
||||||
|
if( (int)matches.size() != queryDescCount )
|
||||||
|
{
|
||||||
|
ts->printf(CvTS::LOG, "Incorrect matches count while test radiusMatch() function (1).\n");
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
|
||||||
|
}
|
||||||
|
|
||||||
|
int badCount = 0;
|
||||||
|
int shift = dmatcher.isMaskSupported() ? 1 : 0;
|
||||||
|
int needMatchCount = dmatcher.isMaskSupported() ? n-1 : n;
|
||||||
|
for( size_t i = 0; i < matches.size(); i++ )
|
||||||
|
{
|
||||||
|
if( (int)matches[i].size() != needMatchCount )
|
||||||
|
badCount++;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int localBadCount = 0;
|
||||||
|
for( int k = 0; k < needMatchCount; k++ )
|
||||||
|
{
|
||||||
|
DMatch match = matches[i][k];
|
||||||
|
{
|
||||||
|
if( i < queryDescCount/2 )
|
||||||
|
{
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor + k + shift) ||
|
||||||
|
(match.imgIdx != 0) )
|
||||||
|
localBadCount++;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if( (match.queryIdx != (int)i) || (match.trainIdx != ((int)i-queryDescCount/2)*countFactor + k + shift) ||
|
||||||
|
(match.imgIdx != 1) )
|
||||||
|
localBadCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
badCount += localBadCount > 0 ? 1 : 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if( (float)badCount > (float)queryDescCount*badPart )
|
||||||
|
{
|
||||||
|
curRes = CvTS::FAIL_INVALID_OUTPUT;
|
||||||
|
ts->printf( CvTS::LOG, "%f - too large bad matches part while test radiusMatch() function (2).\n",
|
||||||
|
(float)badCount/(float)queryDescCount );
|
||||||
|
ts->set_failed_test_info( CvTS::FAIL_BAD_ACCURACY );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CV_GpuBruteForceMatcherTest::run( int )
|
||||||
|
{
|
||||||
|
emptyDataTest();
|
||||||
|
|
||||||
|
GpuMat query, train;
|
||||||
|
generateData( query, train );
|
||||||
|
|
||||||
|
matchTest( query, train );
|
||||||
|
|
||||||
|
knnMatchTest( query, train );
|
||||||
|
|
||||||
|
radiusMatchTest( query, train );
|
||||||
|
|
||||||
|
dmatcher.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
CV_GpuBruteForceMatcherTest CV_GpuBruteForceMatcher_test;
|
||||||
|
Loading…
Reference in New Issue
Block a user