mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 19:20:28 +08:00
new optimized implementation of BruteForceMatcher_GPU (~2-3x faster)
This commit is contained in:
parent
89be84a396
commit
d3c4e90760
@ -1221,25 +1221,23 @@ namespace cv
|
||||
|
||||
explicit BruteForceMatcher_GPU_base(DistType distType = L2Dist);
|
||||
|
||||
// Add descriptors to train descriptor collection.
|
||||
// Add descriptors to train descriptor collection
|
||||
void add(const std::vector<GpuMat>& descCollection);
|
||||
|
||||
// Get train descriptors collection.
|
||||
// Get train descriptors collection
|
||||
const std::vector<GpuMat>& getTrainDescriptors() const;
|
||||
|
||||
// Clear train descriptors collection.
|
||||
// Clear train descriptors collection
|
||||
void clear();
|
||||
|
||||
// Return true if there are not train descriptors in collection.
|
||||
// Return true if there are not train descriptors in collection
|
||||
bool empty() const;
|
||||
|
||||
// Return true if the matcher supports mask in match methods.
|
||||
// Return true if the matcher supports mask in match methods
|
||||
bool isMaskSupported() const;
|
||||
|
||||
// Find one best match for each query descriptor.
|
||||
// trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx
|
||||
// distance.at<float>(0, queryIdx) will contain distance
|
||||
void matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
// Find one best match for each query descriptor
|
||||
void matchSingle(const GpuMat& query, const GpuMat& train,
|
||||
GpuMat& trainIdx, GpuMat& distance,
|
||||
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
|
||||
|
||||
@ -1248,21 +1246,16 @@ namespace cv
|
||||
// Convert trainIdx and distance to vector with DMatch
|
||||
static void matchConvert(const Mat& trainIdx, const Mat& distance, std::vector<DMatch>& matches);
|
||||
|
||||
// Find one best match for each query descriptor.
|
||||
void match(const GpuMat& queryDescs, const GpuMat& trainDescs, std::vector<DMatch>& matches,
|
||||
const GpuMat& mask = GpuMat());
|
||||
// Find one best match for each query descriptor
|
||||
void match(const GpuMat& query, const GpuMat& train, std::vector<DMatch>& matches, const GpuMat& mask = GpuMat());
|
||||
|
||||
// Make gpu collection of trains and masks in suitable format for matchCollection function
|
||||
void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
|
||||
const vector<GpuMat>& masks = std::vector<GpuMat>());
|
||||
void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection, const std::vector<GpuMat>& masks = std::vector<GpuMat>());
|
||||
|
||||
// Find one best match from train collection for each query descriptor.
|
||||
// trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx
|
||||
// imgIdx.at<int>(0, queryIdx) will contain best image index for queryIdx
|
||||
// distance.at<float>(0, queryIdx) will contain distance
|
||||
void matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,
|
||||
// Find one best match from train collection for each query descriptor
|
||||
void matchCollection(const GpuMat& query, const GpuMat& trainCollection,
|
||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
|
||||
const GpuMat& maskCollection, Stream& stream = Stream::Null());
|
||||
const GpuMat& masks = GpuMat(), Stream& stream = Stream::Null());
|
||||
|
||||
// Download trainIdx, imgIdx and distance and convert it to vector with DMatch
|
||||
static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector<DMatch>& matches);
|
||||
@ -1270,17 +1263,12 @@ namespace cv
|
||||
static void matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector<DMatch>& matches);
|
||||
|
||||
// Find one best match from train collection for each query descriptor.
|
||||
void match(const GpuMat& queryDescs, std::vector<DMatch>& matches, const std::vector<GpuMat>& masks = std::vector<GpuMat>());
|
||||
void match(const GpuMat& query, std::vector<DMatch>& matches, const std::vector<GpuMat>& masks = std::vector<GpuMat>());
|
||||
|
||||
// Find k best matches for each query descriptor (in increasing order of distances).
|
||||
// trainIdx.at<int>(queryIdx, i) will contain index of i'th best trains (i < k).
|
||||
// distance.at<float>(queryIdx, i) will contain distance.
|
||||
// allDist is a buffer to store all distance between query descriptors and train descriptors
|
||||
// it have size (nQuery,nTrain) and CV_32F type
|
||||
// allDist.at<float>(queryIdx, trainIdx) will contain FLT_MAX, if trainIdx is one from k best,
|
||||
// otherwise it will contain distance between queryIdx and trainIdx descriptors
|
||||
void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
|
||||
// Find k best matches for each query descriptor (in increasing order of distances)
|
||||
void knnMatchSingle(const GpuMat& query, const GpuMat& train,
|
||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
|
||||
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
|
||||
|
||||
// Download trainIdx and distance and convert it to vector with DMatch
|
||||
// compactResult is used when mask is not empty. If compactResult is false matches
|
||||
@ -1296,27 +1284,40 @@ namespace cv
|
||||
// compactResult is used when mask is not empty. If compactResult is false matches
|
||||
// vector will have the same size as queryDescriptors rows. If compactResult is true
|
||||
// matches vector will not contain matches for fully masked out query descriptors.
|
||||
void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
void knnMatch(const GpuMat& query, const GpuMat& train,
|
||||
std::vector< std::vector<DMatch> >& matches, int k, const GpuMat& mask = GpuMat(),
|
||||
bool compactResult = false);
|
||||
|
||||
// Find k best matches from train collection for each query descriptor (in increasing order of distances)
|
||||
void knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,
|
||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
|
||||
const GpuMat& maskCollection = GpuMat(), Stream& stream = Stream::Null());
|
||||
|
||||
// Download trainIdx and distance and convert it to vector with DMatch
|
||||
// compactResult is used when mask is not empty. If compactResult is false matches
|
||||
// vector will have the same size as queryDescriptors rows. If compactResult is true
|
||||
// matches vector will not contain matches for fully masked out query descriptors.
|
||||
static void knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
|
||||
std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
|
||||
// Convert trainIdx and distance to vector with DMatch
|
||||
static void knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,
|
||||
std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
|
||||
|
||||
// Find k best matches for each query descriptor (in increasing order of distances).
|
||||
// compactResult is used when mask is not empty. If compactResult is false matches
|
||||
// vector will have the same size as queryDescriptors rows. If compactResult is true
|
||||
// matches vector will not contain matches for fully masked out query descriptors.
|
||||
void knnMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, int knn,
|
||||
const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false );
|
||||
void knnMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, int k,
|
||||
const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false);
|
||||
|
||||
// Find best matches for each query descriptor which have distance less than maxDistance.
|
||||
// nMatches.at<int>(0, queryIdx) will contain matches count for queryIdx.
|
||||
// carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,
|
||||
// because it didn't have enough memory.
|
||||
// trainIdx.at<int>(queruIdx, i) will contain ith train index (i < min(nMatches.at<int>(0, queruIdx), trainIdx.cols))
|
||||
// distance.at<int>(queruIdx, i) will contain ith distance (i < min(nMatches.at<int>(0, queruIdx), trainIdx.cols))
|
||||
// If trainIdx is empty, then trainIdx and distance will be created with size nQuery x (nTrain / 2),
|
||||
// If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10),
|
||||
// otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
|
||||
// Matches doesn't sorted.
|
||||
void radiusMatchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
void radiusMatchSingle(const GpuMat& query, const GpuMat& train,
|
||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
|
||||
const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
|
||||
|
||||
@ -1333,15 +1334,16 @@ namespace cv
|
||||
|
||||
// Find best matches for each query descriptor which have distance less than maxDistance
|
||||
// in increasing order of distances).
|
||||
void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
void radiusMatch(const GpuMat& query, const GpuMat& train,
|
||||
std::vector< std::vector<DMatch> >& matches, float maxDistance,
|
||||
const GpuMat& mask = GpuMat(), bool compactResult = false);
|
||||
|
||||
// Find best matches for each query descriptor which have distance less than maxDistance.
|
||||
// If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10),
|
||||
// otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
|
||||
// Matches doesn't sorted.
|
||||
void radiusMatchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,
|
||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
|
||||
const GpuMat& maskCollection, Stream& stream = Stream::Null());
|
||||
void radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
|
||||
const std::vector<GpuMat>& masks = std::vector<GpuMat>(), Stream& stream = Stream::Null());
|
||||
|
||||
// Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch.
|
||||
// matches will be sorted in increasing order of distances.
|
||||
@ -1356,7 +1358,7 @@ namespace cv
|
||||
|
||||
// Find best matches from train collection for each query descriptor which have distance less than
|
||||
// maxDistance (in increasing order of distances).
|
||||
void radiusMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, float maxDistance,
|
||||
void radiusMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, float maxDistance,
|
||||
const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false);
|
||||
|
||||
DistType distType;
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
PERF_TEST_P(DevInfo_DescSize, BruteForceMatcher_match, testing::Combine(testing::ValuesIn(devices()),
|
||||
testing::Values(64, 128)))
|
||||
testing::Values(64, 128, 256)))
|
||||
{
|
||||
DeviceInfo devInfo = std::tr1::get<0>(GetParam());
|
||||
int desc_size = std::tr1::get<1>(GetParam());
|
||||
@ -19,7 +19,7 @@ PERF_TEST_P(DevInfo_DescSize, BruteForceMatcher_match, testing::Combine(testing:
|
||||
|
||||
BruteForceMatcher_GPU< L2<float> > matcher;
|
||||
|
||||
declare.time(0.5).iterations(100);
|
||||
declare.time(3.0);
|
||||
|
||||
SIMPLE_TEST_CYCLE()
|
||||
{
|
||||
@ -35,7 +35,7 @@ PERF_TEST_P(DevInfo_DescSize, BruteForceMatcher_match, testing::Combine(testing:
|
||||
|
||||
PERF_TEST_P(DevInfo_K_DescSize, BruteForceMatcher_knnMatch, testing::Combine(testing::ValuesIn(devices()),
|
||||
testing::Values(2, 3),
|
||||
testing::Values(64, 128)))
|
||||
testing::Values(64, 128, 256)))
|
||||
{
|
||||
DeviceInfo devInfo = std::tr1::get<0>(GetParam());
|
||||
int k = std::tr1::get<1>(GetParam());
|
||||
@ -54,11 +54,11 @@ PERF_TEST_P(DevInfo_K_DescSize, BruteForceMatcher_knnMatch, testing::Combine(tes
|
||||
|
||||
BruteForceMatcher_GPU< L2<float> > matcher;
|
||||
|
||||
declare.time(0.5).iterations(100);
|
||||
declare.time(3.0);
|
||||
|
||||
SIMPLE_TEST_CYCLE()
|
||||
{
|
||||
matcher.knnMatch(query, train, trainIdx, distance, allDist, k);
|
||||
matcher.knnMatchSingle(query, train, trainIdx, distance, allDist, k);
|
||||
}
|
||||
|
||||
Mat trainIdx_host(trainIdx);
|
||||
@ -69,7 +69,7 @@ PERF_TEST_P(DevInfo_K_DescSize, BruteForceMatcher_knnMatch, testing::Combine(tes
|
||||
}
|
||||
|
||||
PERF_TEST_P(DevInfo_DescSize, BruteForceMatcher_radiusMatch, testing::Combine(testing::ValuesIn(devices(SHARED_ATOMICS)),
|
||||
testing::Values(64, 128)))
|
||||
testing::Values(64, 128, 256)))
|
||||
{
|
||||
DeviceInfo devInfo = std::tr1::get<0>(GetParam());
|
||||
int desc_size = std::tr1::get<1>(GetParam());
|
||||
@ -85,7 +85,7 @@ PERF_TEST_P(DevInfo_DescSize, BruteForceMatcher_radiusMatch, testing::Combine(te
|
||||
|
||||
BruteForceMatcher_GPU< L2<float> > matcher;
|
||||
|
||||
declare.time(0.5).iterations(100);
|
||||
declare.time(3.0);
|
||||
|
||||
SIMPLE_TEST_CYCLE()
|
||||
{
|
||||
|
@ -56,86 +56,101 @@ bool cv::gpu::BruteForceMatcher_GPU_base::empty() const { throw_nogpu(); return
|
||||
bool cv::gpu::BruteForceMatcher_GPU_base::isMaskSupported() const { throw_nogpu(); return true; }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const GpuMat&, vector<DMatch>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, std::vector<DMatch>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, vector<DMatch>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, const GpuMat&, vector<DMatch>&, const GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat&, GpuMat&, const vector<GpuMat>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector<DMatch>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, const Mat&, std::vector<DMatch>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, std::vector<DMatch>&, const std::vector<GpuMat>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchConvert(const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, int, const GpuMat&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, int, const std::vector<GpuMat>&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat&, const GpuMat&, const GpuMat&, vector<DMatch>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat&, const Mat&, const Mat&, vector<DMatch>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat&, vector<DMatch>&, const vector<GpuMat>&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, const GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchConvert(const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, int, const GpuMat&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Collection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Download(const GpuMat&, const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Convert(const Mat&, const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat&, vector< vector<DMatch> >&, int, const vector<GpuMat>&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchSingle(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat&, const Mat&, const Mat&, std::vector< std::vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, const GpuMat&, std::vector< std::vector<DMatch> >&, float, const GpuMat&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat&, const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, float, const GpuMat&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, float, const vector<GpuMat>&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat&, const GpuMat&, const GpuMat&, const GpuMat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat&, const Mat&, const Mat&, const Mat&, vector< vector<DMatch> >&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, std::vector< std::vector<DMatch> >&, float, const std::vector<GpuMat>&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat&, vector< vector<DMatch> >&, float, const vector<GpuMat>&, bool) { throw_nogpu(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
namespace cv { namespace gpu { namespace bf_match
|
||||
{
|
||||
template <typename T> void matchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance,
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void matchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance,
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void matchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance,
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void matchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void matchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
}}}
|
||||
|
||||
namespace cv { namespace gpu { namespace bf_knnmatch
|
||||
{
|
||||
template <typename T> void knnMatchL1_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist,
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void knnMatchL2_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist,
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void knnMatchHamming_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist,
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void match2L1_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void match2L2_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void match2Hamming_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
}}}
|
||||
|
||||
namespace cv { namespace gpu { namespace bf_radius_match
|
||||
{
|
||||
template <typename T> void radiusMatchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
template <typename T> void radiusMatchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
template <typename T> void radiusMatchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void radiusMatchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
template <typename T> void radiusMatchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
template <typename T> void radiusMatchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
}}}
|
||||
|
||||
cv::gpu::BruteForceMatcher_GPU_base::BruteForceMatcher_GPU_base(DistType distType_) : distType(distType_)
|
||||
@ -173,52 +188,53 @@ bool cv::gpu::BruteForceMatcher_GPU_base::isMaskSupported() const
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Match
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
GpuMat& trainIdx, GpuMat& distance, const GpuMat& mask, Stream& stream)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchSingle(const GpuMat& query, const GpuMat& train,
|
||||
GpuMat& trainIdx, GpuMat& distance,
|
||||
const GpuMat& mask, Stream& stream)
|
||||
{
|
||||
if (queryDescs.empty() || trainDescs.empty())
|
||||
if (query.empty() || train.empty())
|
||||
return;
|
||||
|
||||
using namespace cv::gpu::bf_match;
|
||||
|
||||
typedef void (*match_caller_t)(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
static const match_caller_t match_callers[3][8] =
|
||||
static const caller_t callers[3][6] =
|
||||
{
|
||||
{
|
||||
matchSingleL1_gpu<unsigned char>, 0/*matchSingleL1_gpu<signed char>*/,
|
||||
matchSingleL1_gpu<unsigned short>, matchSingleL1_gpu<short>,
|
||||
matchSingleL1_gpu<int>, matchSingleL1_gpu<float>, 0, 0
|
||||
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
|
||||
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
|
||||
matchL1_gpu<int>, matchL1_gpu<float>
|
||||
},
|
||||
{
|
||||
0/*matchSingleL2_gpu<unsigned char>*/, 0/*matchSingleL2_gpu<signed char>*/,
|
||||
0/*matchSingleL2_gpu<unsigned short>*/, 0/*matchSingleL2_gpu<short>*/,
|
||||
0/*matchSingleL2_gpu<int>*/, matchSingleL2_gpu<float>, 0, 0
|
||||
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
|
||||
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
|
||||
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
|
||||
},
|
||||
{
|
||||
matchSingleHamming_gpu<unsigned char>, 0/*matchSingleHamming_gpu<signed char>*/,
|
||||
matchSingleHamming_gpu<unsigned short>, 0/*matchSingleHamming_gpu<short>*/,
|
||||
matchSingleHamming_gpu<int>, 0, 0, 0
|
||||
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
|
||||
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
|
||||
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
|
||||
}
|
||||
};
|
||||
|
||||
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||
CV_Assert(trainDescs.cols == queryDescs.cols && trainDescs.type() == queryDescs.type());
|
||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||
CV_Assert(train.cols == query.cols && train.type() == query.type());
|
||||
|
||||
const int nQuery = queryDescs.rows;
|
||||
const int nQuery = query.rows;
|
||||
|
||||
ensureSizeIsEnough(1, nQuery, CV_32S, trainIdx);
|
||||
ensureSizeIsEnough(1, nQuery, CV_32F, distance);
|
||||
|
||||
match_caller_t func = match_callers[distType][queryDescs.depth()];
|
||||
caller_t func = callers[distType][query.depth()];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
DeviceInfo info;
|
||||
int cc = info.majorVersion() * 10 + info.minorVersion();
|
||||
|
||||
func(queryDescs, trainDescs, mask, trainIdx, distance, cc, StreamAccessor::getStream(stream));
|
||||
func(query, train, mask, trainIdx, distance, cc, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& distance, vector<DMatch>& matches)
|
||||
@ -232,13 +248,13 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx,
|
||||
matchConvert(trainIdxCPU, distanceCPU, matches);
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, const Mat& distance, std::vector<DMatch>& matches)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, const Mat& distance, vector<DMatch>& matches)
|
||||
{
|
||||
if (trainIdx.empty() || distance.empty())
|
||||
return;
|
||||
|
||||
CV_Assert(trainIdx.type() == CV_32SC1 && trainIdx.isContinuous());
|
||||
CV_Assert(distance.type() == CV_32FC1 && distance.isContinuous() && distance.cols == trainIdx.cols);
|
||||
CV_Assert(trainIdx.type() == CV_32SC1);
|
||||
CV_Assert(distance.type() == CV_32FC1 && distance.cols == trainIdx.cols);
|
||||
|
||||
const int nQuery = trainIdx.cols;
|
||||
|
||||
@ -250,6 +266,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, cons
|
||||
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx, ++trainIdx_ptr, ++distance_ptr)
|
||||
{
|
||||
int trainIdx = *trainIdx_ptr;
|
||||
|
||||
if (trainIdx == -1)
|
||||
continue;
|
||||
|
||||
@ -261,11 +278,11 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, cons
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& query, const GpuMat& train,
|
||||
vector<DMatch>& matches, const GpuMat& mask)
|
||||
{
|
||||
GpuMat trainIdx, distance;
|
||||
matchSingle(queryDescs, trainDescs, trainIdx, distance, mask);
|
||||
matchSingle(query, train, trainIdx, distance, mask);
|
||||
matchDownload(trainIdx, distance, matches);
|
||||
}
|
||||
|
||||
@ -279,14 +296,13 @@ void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollect
|
||||
{
|
||||
Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2D)));
|
||||
|
||||
for (size_t i = 0; i < trainDescCollection.size(); ++i)
|
||||
{
|
||||
const GpuMat& trainDescs = trainDescCollection[i];
|
||||
DevMem2D* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2D>();
|
||||
|
||||
trainCollectionCPU.ptr<DevMem2D>(0)[i] = trainDescs;
|
||||
}
|
||||
for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr)
|
||||
*trainCollectionCPU_ptr = trainDescCollection[i];
|
||||
|
||||
trainCollection.upload(trainCollectionCPU);
|
||||
maskCollection.release();
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -295,16 +311,18 @@ void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollect
|
||||
Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2D)));
|
||||
Mat maskCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(PtrStep)));
|
||||
|
||||
for (size_t i = 0; i < trainDescCollection.size(); ++i)
|
||||
DevMem2D* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2D>();
|
||||
PtrStep* maskCollectionCPU_ptr = maskCollectionCPU.ptr<PtrStep>();
|
||||
|
||||
for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr, ++maskCollectionCPU_ptr)
|
||||
{
|
||||
const GpuMat& trainDescs = trainDescCollection[i];
|
||||
const GpuMat& train = trainDescCollection[i];
|
||||
const GpuMat& mask = masks[i];
|
||||
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.cols == trainDescs.rows));
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.cols == train.rows));
|
||||
|
||||
trainCollectionCPU.ptr<DevMem2D>(0)[i] = trainDescs;
|
||||
|
||||
maskCollectionCPU.ptr<PtrStep>(0)[i] = mask;
|
||||
*trainCollectionCPU_ptr = train;
|
||||
*maskCollectionCPU_ptr = mask;
|
||||
}
|
||||
|
||||
trainCollection.upload(trainCollectionCPU);
|
||||
@ -312,52 +330,53 @@ void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollect
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,
|
||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, const GpuMat& maskCollection, Stream& stream)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& query, const GpuMat& trainCollection,
|
||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
|
||||
const GpuMat& masks, Stream& stream)
|
||||
{
|
||||
if (queryDescs.empty() || trainCollection.empty())
|
||||
if (query.empty() || trainCollection.empty())
|
||||
return;
|
||||
|
||||
using namespace cv::gpu::bf_match;
|
||||
|
||||
typedef void (*match_caller_t)(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
static const match_caller_t match_callers[3][8] =
|
||||
static const caller_t callers[3][6] =
|
||||
{
|
||||
{
|
||||
matchCollectionL1_gpu<unsigned char>, 0/*matchCollectionL1_gpu<signed char>*/,
|
||||
matchCollectionL1_gpu<unsigned short>, matchCollectionL1_gpu<short>,
|
||||
matchCollectionL1_gpu<int>, matchCollectionL1_gpu<float>, 0, 0
|
||||
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
|
||||
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
|
||||
matchL1_gpu<int>, matchL1_gpu<float>
|
||||
},
|
||||
{
|
||||
0/*matchCollectionL2_gpu<unsigned char>*/, 0/*matchCollectionL2_gpu<signed char>*/,
|
||||
0/*matchCollectionL2_gpu<unsigned short>*/, 0/*matchCollectionL2_gpu<short>*/,
|
||||
0/*matchCollectionL2_gpu<int>*/, matchCollectionL2_gpu<float>, 0, 0
|
||||
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
|
||||
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
|
||||
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
|
||||
},
|
||||
{
|
||||
matchCollectionHamming_gpu<unsigned char>, 0/*matchCollectionHamming_gpu<signed char>*/,
|
||||
matchCollectionHamming_gpu<unsigned short>, 0/*matchCollectionHamming_gpu<short>*/,
|
||||
matchCollectionHamming_gpu<int>, 0, 0, 0
|
||||
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
|
||||
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
|
||||
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
|
||||
}
|
||||
};
|
||||
|
||||
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||
|
||||
const int nQuery = queryDescs.rows;
|
||||
const int nQuery = query.rows;
|
||||
|
||||
ensureSizeIsEnough(1, nQuery, CV_32S, trainIdx);
|
||||
ensureSizeIsEnough(1, nQuery, CV_32S, imgIdx);
|
||||
ensureSizeIsEnough(1, nQuery, CV_32F, distance);
|
||||
|
||||
match_caller_t func = match_callers[distType][queryDescs.depth()];
|
||||
caller_t func = callers[distType][query.depth()];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
DeviceInfo info;
|
||||
int cc = info.majorVersion() * 10 + info.minorVersion();
|
||||
|
||||
func(queryDescs, trainCollection, maskCollection, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));
|
||||
func(query, trainCollection, masks, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, vector<DMatch>& matches)
|
||||
@ -377,9 +396,9 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, cons
|
||||
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
|
||||
return;
|
||||
|
||||
CV_Assert(trainIdx.type() == CV_32SC1 && trainIdx.isContinuous());
|
||||
CV_Assert(imgIdx.type() == CV_32SC1 && imgIdx.isContinuous() && imgIdx.cols == trainIdx.cols);
|
||||
CV_Assert(distance.type() == CV_32FC1 && distance.isContinuous() && imgIdx.cols == trainIdx.cols);
|
||||
CV_Assert(trainIdx.type() == CV_32SC1);
|
||||
CV_Assert(imgIdx.type() == CV_32SC1 && imgIdx.cols == trainIdx.cols);
|
||||
CV_Assert(distance.type() == CV_32FC1 && distance.cols == trainIdx.cols);
|
||||
|
||||
const int nQuery = trainIdx.cols;
|
||||
|
||||
@ -392,6 +411,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, cons
|
||||
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)
|
||||
{
|
||||
int trainIdx = *trainIdx_ptr;
|
||||
|
||||
if (trainIdx == -1)
|
||||
continue;
|
||||
|
||||
@ -405,7 +425,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::matchConvert(const Mat& trainIdx, cons
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& queryDescs, vector<DMatch>& matches, const vector<GpuMat>& masks)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& query, vector<DMatch>& matches, const vector<GpuMat>& masks)
|
||||
{
|
||||
GpuMat trainCollection;
|
||||
GpuMat maskCollection;
|
||||
@ -414,46 +434,50 @@ void cv::gpu::BruteForceMatcher_GPU_base::match(const GpuMat& queryDescs, vector
|
||||
|
||||
GpuMat trainIdx, imgIdx, distance;
|
||||
|
||||
matchCollection(queryDescs, trainCollection, trainIdx, imgIdx, distance, maskCollection);
|
||||
matchCollection(query, trainCollection, trainIdx, imgIdx, distance, maskCollection);
|
||||
matchDownload(trainIdx, imgIdx, distance, matches);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// KnnMatch
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask, Stream& stream)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchSingle(const GpuMat& query, const GpuMat& train,
|
||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
|
||||
const GpuMat& mask, Stream& stream)
|
||||
{
|
||||
if (queryDescs.empty() || trainDescs.empty())
|
||||
if (query.empty() || train.empty())
|
||||
return;
|
||||
|
||||
using namespace cv::gpu::bf_knnmatch;
|
||||
|
||||
typedef void (*match_caller_t)(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& allDist,
|
||||
int cc, cudaStream_t stream);
|
||||
typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& train, int k, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2Df& allDist,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
static const match_caller_t match_callers[3][8] =
|
||||
static const caller_t callers[3][6] =
|
||||
{
|
||||
{
|
||||
knnMatchL1_gpu<unsigned char>, 0/*knnMatchL1_gpu<signed char>*/, knnMatchL1_gpu<unsigned short>,
|
||||
knnMatchL1_gpu<short>, knnMatchL1_gpu<int>, knnMatchL1_gpu<float>, 0, 0
|
||||
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
|
||||
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
|
||||
matchL1_gpu<int>, matchL1_gpu<float>
|
||||
},
|
||||
{
|
||||
0/*knnMatchL2_gpu<unsigned char>*/, 0/*knnMatchL2_gpu<signed char>*/, 0/*knnMatchL2_gpu<unsigned short>*/,
|
||||
0/*knnMatchL2_gpu<short>*/, 0/*knnMatchL2_gpu<int>*/, knnMatchL2_gpu<float>, 0, 0
|
||||
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
|
||||
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
|
||||
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
|
||||
},
|
||||
{
|
||||
knnMatchHamming_gpu<unsigned char>, 0/*knnMatchHamming_gpu<signed char>*/, knnMatchHamming_gpu<unsigned short>,
|
||||
0/*knnMatchHamming_gpu<short>*/, knnMatchHamming_gpu<int>, 0, 0, 0
|
||||
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
|
||||
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
|
||||
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
|
||||
}
|
||||
};
|
||||
|
||||
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||
CV_Assert(trainDescs.type() == queryDescs.type() && trainDescs.cols == queryDescs.cols);
|
||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||
CV_Assert(train.type() == query.type() && train.cols == query.cols);
|
||||
|
||||
const int nQuery = queryDescs.rows;
|
||||
const int nTrain = trainDescs.rows;
|
||||
const int nQuery = query.rows;
|
||||
const int nTrain = train.rows;
|
||||
|
||||
if (k == 2)
|
||||
{
|
||||
@ -468,25 +492,17 @@ void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, con
|
||||
}
|
||||
|
||||
if (stream)
|
||||
{
|
||||
stream.enqueueMemSet(trainIdx, Scalar::all(-1));
|
||||
if (k != 2)
|
||||
stream.enqueueMemSet(allDist, Scalar::all(numeric_limits<float>::max()));
|
||||
}
|
||||
else
|
||||
{
|
||||
trainIdx.setTo(Scalar::all(-1));
|
||||
if (k != 2)
|
||||
allDist.setTo(Scalar::all(numeric_limits<float>::max()));
|
||||
}
|
||||
|
||||
match_caller_t func = match_callers[distType][queryDescs.depth()];
|
||||
caller_t func = callers[distType][query.depth()];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
DeviceInfo info;
|
||||
int cc = info.majorVersion() * 10 + info.minorVersion();
|
||||
|
||||
func(queryDescs, trainDescs, k, mask, trainIdx, distance, allDist, cc, StreamAccessor::getStream(stream));
|
||||
func(query, train, k, mask, trainIdx, distance, allDist, cc, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
|
||||
@ -502,7 +518,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::knnMatchDownload(const GpuMat& trainId
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchConvert(const Mat& trainIdx, const Mat& distance,
|
||||
std::vector< std::vector<DMatch> >& matches, bool compactResult)
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
{
|
||||
if (trainIdx.empty() || distance.empty())
|
||||
return;
|
||||
@ -546,14 +562,127 @@ void cv::gpu::BruteForceMatcher_GPU_base::knnMatchConvert(const Mat& trainIdx, c
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& query, const GpuMat& train,
|
||||
vector< vector<DMatch> >& matches, int k, const GpuMat& mask, bool compactResult)
|
||||
{
|
||||
GpuMat trainIdx, distance, allDist;
|
||||
knnMatch(queryDescs, trainDescs, trainIdx, distance, allDist, k, mask);
|
||||
knnMatchSingle(query, train, trainIdx, distance, allDist, k, mask);
|
||||
knnMatchDownload(trainIdx, distance, matches, compactResult);
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,
|
||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
|
||||
const GpuMat& maskCollection, Stream& stream)
|
||||
{
|
||||
if (query.empty() || trainCollection.empty())
|
||||
return;
|
||||
|
||||
using namespace cv::gpu::bf_knnmatch;
|
||||
|
||||
typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
static const caller_t callers[3][6] =
|
||||
{
|
||||
{
|
||||
match2L1_gpu<unsigned char>, 0/*match2L1_gpu<signed char>*/,
|
||||
match2L1_gpu<unsigned short>, match2L1_gpu<short>,
|
||||
match2L1_gpu<int>, match2L1_gpu<float>
|
||||
},
|
||||
{
|
||||
0/*match2L2_gpu<unsigned char>*/, 0/*match2L2_gpu<signed char>*/,
|
||||
0/*match2L2_gpu<unsigned short>*/, 0/*match2L2_gpu<short>*/,
|
||||
0/*match2L2_gpu<int>*/, match2L2_gpu<float>
|
||||
},
|
||||
{
|
||||
match2Hamming_gpu<unsigned char>, 0/*match2Hamming_gpu<signed char>*/,
|
||||
match2Hamming_gpu<unsigned short>, 0/*match2Hamming_gpu<short>*/,
|
||||
match2Hamming_gpu<int>, 0/*match2Hamming_gpu<float>*/
|
||||
}
|
||||
};
|
||||
|
||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||
|
||||
const int nQuery = query.rows;
|
||||
|
||||
ensureSizeIsEnough(1, nQuery, CV_32SC2, trainIdx);
|
||||
ensureSizeIsEnough(1, nQuery, CV_32SC2, imgIdx);
|
||||
ensureSizeIsEnough(1, nQuery, CV_32FC2, distance);
|
||||
|
||||
if (stream)
|
||||
stream.enqueueMemSet(trainIdx, Scalar::all(-1));
|
||||
else
|
||||
trainIdx.setTo(Scalar::all(-1));
|
||||
|
||||
caller_t func = callers[distType][query.depth()];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
DeviceInfo info;
|
||||
int cc = info.majorVersion() * 10 + info.minorVersion();
|
||||
|
||||
func(query, trainCollection, maskCollection, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
{
|
||||
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
|
||||
return;
|
||||
|
||||
Mat trainIdxCPU = trainIdx;
|
||||
Mat imgIdxCPU = imgIdx;
|
||||
Mat distanceCPU = distance;
|
||||
|
||||
knnMatch2Convert(trainIdxCPU, imgIdxCPU, distanceCPU, matches, compactResult);
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
{
|
||||
if (trainIdx.empty() || imgIdx.empty() || distance.empty())
|
||||
return;
|
||||
|
||||
CV_Assert(trainIdx.type() == CV_32SC2);
|
||||
CV_Assert(imgIdx.type() == CV_32SC2 && imgIdx.cols == trainIdx.cols);
|
||||
CV_Assert(distance.type() == CV_32FC2 && distance.cols == trainIdx.cols);
|
||||
|
||||
const int nQuery = trainIdx.cols;
|
||||
|
||||
matches.clear();
|
||||
matches.reserve(nQuery);
|
||||
|
||||
const int* trainIdx_ptr = trainIdx.ptr<int>();
|
||||
const int* imgIdx_ptr = imgIdx.ptr<int>();
|
||||
const float* distance_ptr = distance.ptr<float>();
|
||||
|
||||
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)
|
||||
{
|
||||
matches.push_back(vector<DMatch>());
|
||||
vector<DMatch>& curMatches = matches.back();
|
||||
curMatches.reserve(2);
|
||||
|
||||
for (int i = 0; i < 2; ++i, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)
|
||||
{
|
||||
int trainIdx = *trainIdx_ptr;
|
||||
|
||||
if (trainIdx != -1)
|
||||
{
|
||||
int imgIdx = *imgIdx_ptr;
|
||||
|
||||
float distance = *distance_ptr;
|
||||
|
||||
DMatch m(queryIdx, trainIdx, imgIdx, distance);
|
||||
|
||||
curMatches.push_back(m);
|
||||
}
|
||||
}
|
||||
|
||||
if (compactResult && curMatches.empty())
|
||||
matches.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
struct ImgIdxSetter
|
||||
@ -564,103 +693,123 @@ namespace
|
||||
};
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& queryDescs,
|
||||
vector< vector<DMatch> >& matches, int knn, const vector<GpuMat>& masks, bool compactResult)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::knnMatch(const GpuMat& query, vector< vector<DMatch> >& matches, int k,
|
||||
const vector<GpuMat>& masks, bool compactResult)
|
||||
{
|
||||
if (queryDescs.empty() || empty())
|
||||
return;
|
||||
|
||||
vector< vector<DMatch> > curMatches;
|
||||
vector<DMatch> temp;
|
||||
temp.reserve(2 * knn);
|
||||
|
||||
matches.resize(queryDescs.rows);
|
||||
for_each(matches.begin(), matches.end(), bind2nd(mem_fun_ref(&vector<DMatch>::reserve), knn));
|
||||
|
||||
for (size_t imgIdx = 0; imgIdx < trainDescCollection.size(); ++imgIdx)
|
||||
if (k == 2)
|
||||
{
|
||||
knnMatch(queryDescs, trainDescCollection[imgIdx], curMatches, knn,
|
||||
masks.empty() ? GpuMat() : masks[imgIdx]);
|
||||
GpuMat trainCollection;
|
||||
GpuMat maskCollection;
|
||||
|
||||
for (int queryIdx = 0; queryIdx < queryDescs.rows; ++queryIdx)
|
||||
{
|
||||
vector<DMatch>& localMatch = curMatches[queryIdx];
|
||||
vector<DMatch>& globalMatch = matches[queryIdx];
|
||||
makeGpuCollection(trainCollection, maskCollection, masks);
|
||||
|
||||
for_each(localMatch.begin(), localMatch.end(), ImgIdxSetter(static_cast<int>(imgIdx)));
|
||||
GpuMat trainIdx, imgIdx, distance;
|
||||
|
||||
temp.clear();
|
||||
merge(globalMatch.begin(), globalMatch.end(), localMatch.begin(), localMatch.end(), back_inserter(temp));
|
||||
|
||||
globalMatch.clear();
|
||||
const size_t count = std::min((size_t)knn, temp.size());
|
||||
copy(temp.begin(), temp.begin() + count, back_inserter(globalMatch));
|
||||
}
|
||||
knnMatch2Collection(query, trainCollection, trainIdx, imgIdx, distance, maskCollection);
|
||||
knnMatch2Download(trainIdx, imgIdx, distance, matches);
|
||||
}
|
||||
|
||||
if (compactResult)
|
||||
else
|
||||
{
|
||||
vector< vector<DMatch> >::iterator new_end = remove_if(matches.begin(), matches.end(),
|
||||
mem_fun_ref(&vector<DMatch>::empty));
|
||||
matches.erase(new_end, matches.end());
|
||||
if (query.empty() || empty())
|
||||
return;
|
||||
|
||||
vector< vector<DMatch> > curMatches;
|
||||
vector<DMatch> temp;
|
||||
temp.reserve(2 * k);
|
||||
|
||||
matches.resize(query.rows);
|
||||
for_each(matches.begin(), matches.end(), bind2nd(mem_fun_ref(&vector<DMatch>::reserve), k));
|
||||
|
||||
for (size_t imgIdx = 0, size = trainDescCollection.size(); imgIdx < size; ++imgIdx)
|
||||
{
|
||||
knnMatch(query, trainDescCollection[imgIdx], curMatches, k, masks.empty() ? GpuMat() : masks[imgIdx]);
|
||||
|
||||
for (int queryIdx = 0; queryIdx < query.rows; ++queryIdx)
|
||||
{
|
||||
vector<DMatch>& localMatch = curMatches[queryIdx];
|
||||
vector<DMatch>& globalMatch = matches[queryIdx];
|
||||
|
||||
for_each(localMatch.begin(), localMatch.end(), ImgIdxSetter(static_cast<int>(imgIdx)));
|
||||
|
||||
temp.clear();
|
||||
merge(globalMatch.begin(), globalMatch.end(), localMatch.begin(), localMatch.end(), back_inserter(temp));
|
||||
|
||||
globalMatch.clear();
|
||||
const size_t count = std::min((size_t)k, temp.size());
|
||||
copy(temp.begin(), temp.begin() + count, back_inserter(globalMatch));
|
||||
}
|
||||
}
|
||||
|
||||
if (compactResult)
|
||||
{
|
||||
vector< vector<DMatch> >::iterator new_end = remove_if(matches.begin(), matches.end(), mem_fun_ref(&vector<DMatch>::empty));
|
||||
matches.erase(new_end, matches.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// RadiusMatch
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance, const GpuMat& mask, Stream& stream)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchSingle(const GpuMat& query, const GpuMat& train,
|
||||
GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
|
||||
const GpuMat& mask, Stream& stream)
|
||||
{
|
||||
if (queryDescs.empty() || trainDescs.empty())
|
||||
if (query.empty() || train.empty())
|
||||
return;
|
||||
|
||||
using namespace cv::gpu::bf_radius_match;
|
||||
|
||||
typedef void (*radiusMatch_caller_t)(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
typedef void (*caller_t)(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
static const radiusMatch_caller_t radiusMatch_callers[3][8] =
|
||||
static const caller_t callers[3][6] =
|
||||
{
|
||||
{
|
||||
radiusMatchSingleL1_gpu<unsigned char>, 0/*radiusMatchSingleL1_gpu<signed char>*/, radiusMatchSingleL1_gpu<unsigned short>,
|
||||
radiusMatchSingleL1_gpu<short>, radiusMatchSingleL1_gpu<int>, radiusMatchSingleL1_gpu<float>, 0, 0
|
||||
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
|
||||
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
|
||||
matchL1_gpu<int>, matchL1_gpu<float>
|
||||
},
|
||||
{
|
||||
0/*radiusMatchSingleL2_gpu<unsigned char>*/, 0/*radiusMatchSingleL2_gpu<signed char>*/, 0/*radiusMatchSingleL2_gpu<unsigned short>*/,
|
||||
0/*radiusMatchSingleL2_gpu<short>*/, 0/*radiusMatchSingleL2_gpu<int>*/, radiusMatchSingleL2_gpu<float>, 0, 0
|
||||
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
|
||||
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
|
||||
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
|
||||
},
|
||||
{
|
||||
radiusMatchSingleHamming_gpu<unsigned char>, 0/*radiusMatchSingleHamming_gpu<signed char>*/, radiusMatchSingleHamming_gpu<unsigned short>,
|
||||
0/*radiusMatchSingleHamming_gpu<short>*/, radiusMatchSingleHamming_gpu<int>, 0, 0, 0
|
||||
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
|
||||
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
|
||||
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
|
||||
}
|
||||
};
|
||||
|
||||
CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
|
||||
DeviceInfo info;
|
||||
int cc = info.majorVersion() * 10 + info.minorVersion();
|
||||
|
||||
const int nQuery = queryDescs.rows;
|
||||
const int nTrain = trainDescs.rows;
|
||||
CV_Assert(TargetArchs::builtWith(GLOBAL_ATOMICS) && info.supports(GLOBAL_ATOMICS));
|
||||
|
||||
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||
CV_Assert(trainDescs.type() == queryDescs.type() && trainDescs.cols == queryDescs.cols);
|
||||
const int nQuery = query.rows;
|
||||
const int nTrain = train.rows;
|
||||
|
||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||
CV_Assert(train.type() == query.type() && train.cols == query.cols);
|
||||
CV_Assert(trainIdx.empty() || (trainIdx.rows == nQuery && trainIdx.size() == distance.size()));
|
||||
|
||||
ensureSizeIsEnough(1, nQuery, CV_32SC1, nMatches);
|
||||
if (trainIdx.empty())
|
||||
{
|
||||
ensureSizeIsEnough(nQuery, nTrain / 2, CV_32SC1, trainIdx);
|
||||
ensureSizeIsEnough(nQuery, nTrain / 2, CV_32FC1, distance);
|
||||
ensureSizeIsEnough(nQuery, std::max((nTrain / 100), 10), CV_32SC1, trainIdx);
|
||||
ensureSizeIsEnough(nQuery, std::max((nTrain / 100), 10), CV_32FC1, distance);
|
||||
}
|
||||
|
||||
radiusMatch_caller_t func = radiusMatch_callers[distType][queryDescs.depth()];
|
||||
caller_t func = callers[distType][query.depth()];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
func(queryDescs, trainDescs, maxDistance, mask, trainIdx, distance, nMatches, StreamAccessor::getStream(stream));
|
||||
func(query, train, maxDistance, mask, trainIdx, distance, nMatches, cc, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches,
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
{
|
||||
if (trainIdx.empty() || distance.empty() || nMatches.empty())
|
||||
return;
|
||||
@ -673,14 +822,14 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat& trai
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches,
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
{
|
||||
if (trainIdx.empty() || distance.empty() || nMatches.empty())
|
||||
return;
|
||||
|
||||
CV_Assert(trainIdx.type() == CV_32SC1);
|
||||
CV_Assert(nMatches.type() == CV_32SC1 && nMatches.isContinuous() && nMatches.cols >= trainIdx.rows);
|
||||
CV_Assert(distance.type() == CV_32FC1 && distance.size() == trainIdx.size());
|
||||
CV_Assert(nMatches.type() == CV_32SC1 && nMatches.cols == trainIdx.rows);
|
||||
|
||||
const int nQuery = trainIdx.rows;
|
||||
|
||||
@ -688,6 +837,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat& trainIdx
|
||||
matches.reserve(nQuery);
|
||||
|
||||
const int* nMatches_ptr = nMatches.ptr<int>();
|
||||
|
||||
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)
|
||||
{
|
||||
const int* trainIdx_ptr = trainIdx.ptr<int>(queryIdx);
|
||||
@ -720,66 +870,75 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat& trainIdx
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& query, const GpuMat& train,
|
||||
vector< vector<DMatch> >& matches, float maxDistance, const GpuMat& mask, bool compactResult)
|
||||
{
|
||||
GpuMat trainIdx, distance, nMatches;
|
||||
radiusMatchSingle(queryDescs, trainDescs, trainIdx, distance, nMatches, maxDistance, mask);
|
||||
radiusMatchSingle(query, train, trainIdx, distance, nMatches, maxDistance, mask);
|
||||
radiusMatchDownload(trainIdx, distance, nMatches, matches, compactResult);
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,
|
||||
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
|
||||
const GpuMat& maskCollection, Stream& stream)
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches,
|
||||
float maxDistance, const vector<GpuMat>& masks, Stream& stream)
|
||||
{
|
||||
if (queryDescs.empty() || trainCollection.empty())
|
||||
if (query.empty() || empty())
|
||||
return;
|
||||
|
||||
using namespace cv::gpu::bf_radius_match;
|
||||
|
||||
typedef void (*radiusMatch_caller_t)(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream);
|
||||
typedef void (*caller_t)(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream);
|
||||
|
||||
static const radiusMatch_caller_t radiusMatch_callers[3][8] =
|
||||
static const caller_t callers[3][6] =
|
||||
{
|
||||
{
|
||||
radiusMatchCollectionL1_gpu<unsigned char>, 0/*radiusMatchCollectionL1_gpu<signed char>*/, radiusMatchCollectionL1_gpu<unsigned short>,
|
||||
radiusMatchCollectionL1_gpu<short>, radiusMatchCollectionL1_gpu<int>, radiusMatchCollectionL1_gpu<float>, 0, 0
|
||||
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
|
||||
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
|
||||
matchL1_gpu<int>, matchL1_gpu<float>
|
||||
},
|
||||
{
|
||||
0/*radiusMatchCollectionL2_gpu<unsigned char>*/, 0/*radiusMatchCollectionL2_gpu<signed char>*/, 0/*radiusMatchCollectionL2_gpu<unsigned short>*/,
|
||||
0/*radiusMatchCollectionL2_gpu<short>*/, 0/*radiusMatchCollectionL2_gpu<int>*/, radiusMatchCollectionL2_gpu<float>, 0, 0
|
||||
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
|
||||
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
|
||||
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
|
||||
},
|
||||
{
|
||||
radiusMatchCollectionHamming_gpu<unsigned char>, 0/*radiusMatchCollectionHamming_gpu<signed char>*/, radiusMatchCollectionHamming_gpu<unsigned short>,
|
||||
0/*radiusMatchCollectionHamming_gpu<short>*/, radiusMatchCollectionHamming_gpu<int>, 0, 0, 0
|
||||
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
|
||||
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
|
||||
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
|
||||
}
|
||||
};
|
||||
|
||||
CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
|
||||
DeviceInfo info;
|
||||
int cc = info.majorVersion() * 10 + info.minorVersion();
|
||||
|
||||
const int nQuery = queryDescs.rows;
|
||||
CV_Assert(TargetArchs::builtWith(GLOBAL_ATOMICS) && info.supports(GLOBAL_ATOMICS));
|
||||
|
||||
CV_Assert(queryDescs.channels() == 1 && queryDescs.depth() < CV_64F);
|
||||
const int nQuery = query.rows;
|
||||
|
||||
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
|
||||
CV_Assert(trainIdx.empty() || (trainIdx.rows == nQuery && trainIdx.size() == distance.size() && trainIdx.size() == imgIdx.size()));
|
||||
|
||||
ensureSizeIsEnough(1, nQuery, CV_32SC1, nMatches);
|
||||
if (trainIdx.empty())
|
||||
{
|
||||
ensureSizeIsEnough(nQuery, nQuery / 2, CV_32SC1, trainIdx);
|
||||
ensureSizeIsEnough(nQuery, nQuery / 2, CV_32SC1, imgIdx);
|
||||
ensureSizeIsEnough(nQuery, nQuery / 2, CV_32FC1, distance);
|
||||
ensureSizeIsEnough(nQuery, std::max((nQuery / 100), 10), CV_32SC1, trainIdx);
|
||||
ensureSizeIsEnough(nQuery, std::max((nQuery / 100), 10), CV_32SC1, imgIdx);
|
||||
ensureSizeIsEnough(nQuery, std::max((nQuery / 100), 10), CV_32FC1, distance);
|
||||
}
|
||||
|
||||
radiusMatch_caller_t func = radiusMatch_callers[distType][queryDescs.depth()];
|
||||
caller_t func = callers[distType][query.depth()];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
func(queryDescs, trainCollection, maxDistance, maskCollection, trainIdx, imgIdx, distance, nMatches, StreamAccessor::getStream(stream));
|
||||
vector<DevMem2D> trains_(trainDescCollection.begin(), trainDescCollection.end());
|
||||
vector<DevMem2D> masks_(masks.begin(), masks.end());
|
||||
|
||||
func(query, &trains_[0], static_cast<int>(trains_.size()), maxDistance, masks_.size() == 0 ? 0 : &masks_[0],
|
||||
trainIdx, imgIdx, distance, nMatches, cc, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches,
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
vector< vector<DMatch> >& matches, bool compactResult)
|
||||
{
|
||||
if (trainIdx.empty() || imgIdx.empty() || distance.empty() || nMatches.empty())
|
||||
return;
|
||||
@ -801,7 +960,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat& trainIdx
|
||||
CV_Assert(trainIdx.type() == CV_32SC1);
|
||||
CV_Assert(imgIdx.type() == CV_32SC1 && imgIdx.size() == trainIdx.size());
|
||||
CV_Assert(distance.type() == CV_32FC1 && distance.size() == trainIdx.size());
|
||||
CV_Assert(nMatches.type() == CV_32SC1 && nMatches.isContinuous() && nMatches.cols >= trainIdx.rows);
|
||||
CV_Assert(nMatches.type() == CV_32SC1 && nMatches.cols == trainIdx.rows);
|
||||
|
||||
const int nQuery = trainIdx.rows;
|
||||
|
||||
@ -809,6 +968,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat& trainIdx
|
||||
matches.reserve(nQuery);
|
||||
|
||||
const int* nMatches_ptr = nMatches.ptr<int>();
|
||||
|
||||
for (int queryIdx = 0; queryIdx < nQuery; ++queryIdx)
|
||||
{
|
||||
const int* trainIdx_ptr = trainIdx.ptr<int>(queryIdx);
|
||||
@ -843,18 +1003,11 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatchConvert(const Mat& trainIdx
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs, vector< vector<DMatch> >& matches,
|
||||
void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& query, vector< vector<DMatch> >& matches,
|
||||
float maxDistance, const vector<GpuMat>& masks, bool compactResult)
|
||||
{
|
||||
GpuMat trainCollection;
|
||||
GpuMat maskCollection;
|
||||
|
||||
makeGpuCollection(trainCollection, maskCollection, masks);
|
||||
|
||||
GpuMat trainIdx, imgIdx, distance, nMatches;
|
||||
|
||||
radiusMatchCollection(queryDescs, trainCollection, trainIdx, imgIdx, distance, nMatches, maxDistance, maskCollection);
|
||||
|
||||
radiusMatchCollection(query, trainIdx, imgIdx, distance, nMatches, maxDistance, masks);
|
||||
radiusMatchDownload(trainIdx, imgIdx, distance, nMatches, matches, compactResult);
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -49,184 +49,192 @@ using namespace cv::gpu::device;
|
||||
|
||||
namespace cv { namespace gpu { namespace bf_match
|
||||
{
|
||||
template <int BLOCK_DIM_Y, typename T>
|
||||
__device__ void findBestMatch(T& myDist, int2& myIdx, T* smin, int2* sIdx)
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Reduction
|
||||
|
||||
template <int BLOCK_SIZE>
|
||||
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)
|
||||
{
|
||||
if (threadIdx.x == 0)
|
||||
{
|
||||
smin[threadIdx.y] = myDist;
|
||||
sIdx[threadIdx.y] = myIdx;
|
||||
}
|
||||
s_distance += threadIdx.y * BLOCK_SIZE;
|
||||
s_trainIdx += threadIdx.y * BLOCK_SIZE;
|
||||
|
||||
s_distance[threadIdx.x] = bestDistance;
|
||||
s_trainIdx[threadIdx.x] = bestTrainIdx;
|
||||
|
||||
__syncthreads();
|
||||
|
||||
reducePredVal<BLOCK_DIM_Y>(smin, myDist, sIdx, myIdx, threadIdx.y * blockDim.x + threadIdx.x, less<volatile T>());
|
||||
reducePredVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<volatile float>());
|
||||
}
|
||||
|
||||
template <typename Dist, typename VecDiff, typename T, typename Mask>
|
||||
__device__ void matchDescs(int queryIdx, int imgIdx, const DevMem2D_<T>& train, const Mask& m, const VecDiff& vecDiff,
|
||||
typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* sdiff_row)
|
||||
template <int BLOCK_SIZE>
|
||||
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)
|
||||
{
|
||||
for (int trainIdx = threadIdx.y; trainIdx < train.rows; trainIdx += blockDim.y)
|
||||
s_distance += threadIdx.y * BLOCK_SIZE;
|
||||
s_trainIdx += threadIdx.y * BLOCK_SIZE;
|
||||
s_imgIdx += threadIdx.y * BLOCK_SIZE;
|
||||
|
||||
s_distance[threadIdx.x] = bestDistance;
|
||||
s_trainIdx[threadIdx.x] = bestTrainIdx;
|
||||
s_imgIdx [threadIdx.x] = bestImgIdx;
|
||||
|
||||
__syncthreads();
|
||||
|
||||
reducePredVal2<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, s_imgIdx, bestImgIdx, threadIdx.x, less<volatile float>());
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match Unrolled Cached
|
||||
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
|
||||
__device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query)
|
||||
{
|
||||
#pragma unroll
|
||||
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
|
||||
{
|
||||
if (m(queryIdx, trainIdx))
|
||||
const int loadX = threadIdx.x + i * BLOCK_SIZE;
|
||||
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(min(queryIdx, query.rows - 1))[loadX] : 0;
|
||||
}
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
__device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
|
||||
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
|
||||
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
|
||||
{
|
||||
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
|
||||
{
|
||||
Dist dist;
|
||||
|
||||
#pragma unroll
|
||||
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
|
||||
{
|
||||
const T* trainDescs = train.ptr(trainIdx);
|
||||
const int loadX = threadIdx.x + i * BLOCK_SIZE;
|
||||
|
||||
Dist dist;
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = loadX < train.cols ? train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX] : 0;
|
||||
|
||||
vecDiff.calc(trainDescs, train.cols, dist, sdiff_row, threadIdx.x);
|
||||
__syncthreads();
|
||||
|
||||
const typename Dist::result_type res = dist;
|
||||
#pragma unroll
|
||||
for (int j = 0; j < BLOCK_SIZE; ++j)
|
||||
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
|
||||
|
||||
if (res < myDist)
|
||||
{
|
||||
myDist = res;
|
||||
myIdx.x = trainIdx;
|
||||
myIdx.y = imgIdx;
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
typename Dist::result_type distVal = dist;
|
||||
|
||||
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
|
||||
|
||||
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
|
||||
{
|
||||
bestImgIdx = imgIdx;
|
||||
bestDistance = distVal;
|
||||
bestTrainIdx = trainIdx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> struct SingleTrain
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
|
||||
{
|
||||
explicit SingleTrain(const DevMem2D_<T>& train_) : train(train_)
|
||||
extern __shared__ int smem[];
|
||||
|
||||
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
|
||||
|
||||
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
|
||||
|
||||
float myBestDistance = numeric_limits<float>::max();
|
||||
int myBestTrainIdx = -1;
|
||||
|
||||
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
|
||||
|
||||
__syncthreads();
|
||||
|
||||
float* s_distance = (float*)(smem);
|
||||
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
|
||||
|
||||
if (queryIdx < query.rows && threadIdx.x == 0)
|
||||
{
|
||||
bestTrainIdx[queryIdx] = myBestTrainIdx;
|
||||
bestDistance[queryIdx] = myBestDistance;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dist, typename VecDiff, typename Mask>
|
||||
__device__ __forceinline__ void loop(int queryIdx, Mask& m, const VecDiff& vecDiff,
|
||||
typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* sdiff_row) const
|
||||
{
|
||||
matchDescs<Dist>(queryIdx, 0, train, m, vecDiff, myDist, myIdx, sdiff_row);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int desc_len() const
|
||||
{
|
||||
return train.cols;
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ void storeResult(float* distance, int* trainIdx, int* imgIdx,
|
||||
float myDist, const int2& myIdx, int queryIdx)
|
||||
{
|
||||
trainIdx[queryIdx] = myIdx.x;
|
||||
distance[queryIdx] = myDist;
|
||||
}
|
||||
|
||||
const DevMem2D_<T> train;
|
||||
};
|
||||
|
||||
template <typename T> struct TrainCollection
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
TrainCollection(const DevMem2D_<T>* trainCollection_, int nImg_, int desclen_) :
|
||||
trainCollection(trainCollection_), nImg(nImg_), desclen(desclen_)
|
||||
{
|
||||
}
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
template <typename Dist, typename VecDiff, typename Mask>
|
||||
__device__ void loop(int queryIdx, Mask& m, const VecDiff& vecDiff,
|
||||
typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* sdiff_row) const
|
||||
{
|
||||
for (int imgIdx = 0; imgIdx < nImg; ++imgIdx)
|
||||
{
|
||||
const DevMem2D_<T> train = trainCollection[imgIdx];
|
||||
m.next();
|
||||
matchDescs<Dist>(queryIdx, imgIdx, train, m, vecDiff, myDist, myIdx, sdiff_row);
|
||||
}
|
||||
}
|
||||
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
__device__ __forceinline__ int desc_len() const
|
||||
{
|
||||
return desclen;
|
||||
}
|
||||
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
static __device__ __forceinline__ void storeResult(float* distance, int* trainIdx, int* imgIdx,
|
||||
float myDist, const int2& myIdx, int queryIdx)
|
||||
{
|
||||
trainIdx[queryIdx] = myIdx.x;
|
||||
imgIdx[queryIdx] = myIdx.y;
|
||||
distance[queryIdx] = myDist;
|
||||
}
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
const DevMem2D_<T>* trainCollection;
|
||||
const int nImg;
|
||||
const int desclen;
|
||||
};
|
||||
|
||||
template <typename VecDiff, typename Dist, typename T, typename Train, typename Mask>
|
||||
__device__ void distanceCalcLoop(const PtrStep_<T>& query, const Train& train, const Mask& mask, int queryIdx,
|
||||
typename Dist::result_type& myDist, int2& myIdx, typename Dist::result_type* smem)
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
|
||||
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
|
||||
{
|
||||
const VecDiff vecDiff(query.ptr(queryIdx), train.desc_len(), (typename Dist::value_type*)smem, threadIdx.y * blockDim.x + threadIdx.x, threadIdx.x);
|
||||
extern __shared__ int smem[];
|
||||
|
||||
typename Dist::result_type* sdiff_row = smem + blockDim.x * threadIdx.y;
|
||||
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
|
||||
|
||||
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
|
||||
|
||||
float myBestDistance = numeric_limits<float>::max();
|
||||
int myBestTrainIdx = -1;
|
||||
int myBestImgIdx = -1;
|
||||
|
||||
Mask m = mask;
|
||||
|
||||
myIdx.x = -1;
|
||||
myIdx.y = -1;
|
||||
myDist = numeric_limits<typename Dist::result_type>::max();
|
||||
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
|
||||
{
|
||||
const DevMem2D_<T> train = trains[imgIdx];
|
||||
m.next();
|
||||
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
|
||||
}
|
||||
|
||||
train.template loop<Dist>(queryIdx, m, vecDiff, myDist, myIdx, sdiff_row);
|
||||
}
|
||||
|
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename VecDiff, typename Dist, typename T, typename Train, typename Mask>
|
||||
__global__ void match(const PtrStep_<T> query, const Train train, const Mask mask, int* trainIdx, int* imgIdx, float* distance)
|
||||
{
|
||||
__shared__ typename Dist::result_type smem[BLOCK_DIM_X * BLOCK_DIM_Y];
|
||||
|
||||
const int queryIdx = blockIdx.x;
|
||||
|
||||
int2 myIdx;
|
||||
typename Dist::result_type myDist;
|
||||
|
||||
distanceCalcLoop<VecDiff, Dist>(query, train, mask, queryIdx, myDist, myIdx, smem);
|
||||
__syncthreads();
|
||||
|
||||
typename Dist::result_type* smin = smem;
|
||||
int2* sIdx = (int2*)(smin + BLOCK_DIM_Y);
|
||||
float* s_distance = (float*)(smem);
|
||||
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
findBestMatch<BLOCK_DIM_Y>(myDist, myIdx, smin, sIdx);
|
||||
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);
|
||||
|
||||
if (threadIdx.x == 0 && threadIdx.y == 0)
|
||||
Train::storeResult(distance, trainIdx, imgIdx, myDist, myIdx, queryIdx);
|
||||
if (queryIdx < query.rows && threadIdx.x == 0)
|
||||
{
|
||||
bestTrainIdx[queryIdx] = myBestTrainIdx;
|
||||
bestImgIdx[queryIdx] = myBestImgIdx;
|
||||
bestDistance[queryIdx] = myBestDistance;
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match kernel caller
|
||||
|
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Train, typename Mask>
|
||||
void matchSimple_caller(const DevMem2D_<T>& query, const Train& train, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
const dim3 grid(query.rows, 1, 1);
|
||||
const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
|
||||
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
match<BLOCK_DIM_X, BLOCK_DIM_Y, VecDiffGlobal<BLOCK_DIM_X, T>, Dist, T>
|
||||
<<<grid, threads, 0, stream>>>(query, train, mask, trainIdx.data, imgIdx.data, distance.data);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T, typename Train, typename Mask>
|
||||
void matchCached_caller(const DevMem2D_<T>& query, const Train& train, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp
|
||||
StaticAssert<BLOCK_DIM_X * BLOCK_DIM_Y >= MAX_LEN>::check(); // block size must be greter than descriptors length
|
||||
StaticAssert<MAX_LEN % BLOCK_DIM_X == 0>::check(); // max descriptors length must divide to blockDimX
|
||||
|
||||
const dim3 grid(query.rows, 1, 1);
|
||||
const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
|
||||
|
||||
match<BLOCK_DIM_X, BLOCK_DIM_Y, VecDiffCachedRegister<BLOCK_DIM_X, MAX_LEN, LEN_EQ_MAX_LEN, typename Dist::value_type>, Dist, T>
|
||||
<<<grid, threads, 0, stream>>>(query, train, mask, trainIdx.data, imgIdx.data, distance.data);
|
||||
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
@ -234,170 +242,522 @@ namespace cv { namespace gpu { namespace bf_match
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match Dispatcher
|
||||
// Match Unrolled
|
||||
|
||||
template <typename Dist, typename T, typename Train, typename Mask>
|
||||
void matchDispatcher(const DevMem2D_<T>& query, const Train& train, const Mask& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
__device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
|
||||
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
|
||||
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
|
||||
{
|
||||
if (query.cols < 64)
|
||||
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
|
||||
{
|
||||
matchCached_caller<16, 16, 64, false, Dist>(
|
||||
query, train, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance),
|
||||
stream);
|
||||
Dist dist;
|
||||
|
||||
#pragma unroll
|
||||
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
|
||||
{
|
||||
const int loadX = threadIdx.x + i * BLOCK_SIZE;
|
||||
|
||||
if (loadX < query.cols)
|
||||
{
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
|
||||
}
|
||||
else
|
||||
{
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
#pragma unroll
|
||||
for (int j = 0; j < BLOCK_SIZE; ++j)
|
||||
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
|
||||
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
typename Dist::result_type distVal = dist;
|
||||
|
||||
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
|
||||
|
||||
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
|
||||
{
|
||||
bestImgIdx = imgIdx;
|
||||
bestDistance = distVal;
|
||||
bestTrainIdx = trainIdx;
|
||||
}
|
||||
}
|
||||
else if (query.cols == 64)
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
|
||||
{
|
||||
extern __shared__ int smem[];
|
||||
|
||||
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
|
||||
|
||||
float myBestDistance = numeric_limits<float>::max();
|
||||
int myBestTrainIdx = -1;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
|
||||
|
||||
__syncthreads();
|
||||
|
||||
float* s_distance = (float*)(smem);
|
||||
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
|
||||
|
||||
if (queryIdx < query.rows && threadIdx.x == 0)
|
||||
{
|
||||
matchCached_caller<16, 16, 64, true, Dist>(
|
||||
query, train, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance),
|
||||
stream);
|
||||
bestTrainIdx[queryIdx] = myBestTrainIdx;
|
||||
bestDistance[queryIdx] = myBestDistance;
|
||||
}
|
||||
else if (query.cols < 128)
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
|
||||
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
|
||||
{
|
||||
extern __shared__ int smem[];
|
||||
|
||||
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
|
||||
|
||||
float myBestDistance = numeric_limits<float>::max();
|
||||
int myBestTrainIdx = -1;
|
||||
int myBestImgIdx = -1;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
Mask m = mask;
|
||||
|
||||
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
|
||||
{
|
||||
matchCached_caller<16, 16, 128, false, Dist>(
|
||||
query, train, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance),
|
||||
stream);
|
||||
const DevMem2D_<T> train = trains[imgIdx];
|
||||
m.next();
|
||||
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
|
||||
}
|
||||
else if (query.cols == 128 && cc >= 12)
|
||||
|
||||
__syncthreads();
|
||||
|
||||
float* s_distance = (float*)(smem);
|
||||
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
|
||||
|
||||
if (queryIdx < query.rows && threadIdx.x == 0)
|
||||
{
|
||||
matchCached_caller<16, 16, 128, true, Dist>(
|
||||
query, train, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance),
|
||||
stream);
|
||||
bestTrainIdx[queryIdx] = myBestTrainIdx;
|
||||
bestImgIdx[queryIdx] = myBestImgIdx;
|
||||
bestDistance[queryIdx] = myBestDistance;
|
||||
}
|
||||
else if (query.cols < 256 && cc >= 12)
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match
|
||||
|
||||
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
|
||||
__device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
|
||||
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
|
||||
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
|
||||
{
|
||||
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
|
||||
{
|
||||
matchCached_caller<16, 16, 256, false, Dist>(
|
||||
query, train, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance),
|
||||
stream);
|
||||
Dist dist;
|
||||
|
||||
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
|
||||
{
|
||||
const int loadX = threadIdx.x + i * BLOCK_SIZE;
|
||||
|
||||
if (loadX < query.cols)
|
||||
{
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
|
||||
}
|
||||
else
|
||||
{
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
#pragma unroll
|
||||
for (int j = 0; j < BLOCK_SIZE; ++j)
|
||||
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
|
||||
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
typename Dist::result_type distVal = dist;
|
||||
|
||||
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
|
||||
|
||||
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
|
||||
{
|
||||
bestImgIdx = imgIdx;
|
||||
bestDistance = distVal;
|
||||
bestTrainIdx = trainIdx;
|
||||
}
|
||||
}
|
||||
else if (query.cols == 256 && cc >= 12)
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
|
||||
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
|
||||
{
|
||||
extern __shared__ int smem[];
|
||||
|
||||
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
|
||||
|
||||
float myBestDistance = numeric_limits<float>::max();
|
||||
int myBestTrainIdx = -1;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
|
||||
|
||||
__syncthreads();
|
||||
|
||||
float* s_distance = (float*)(smem);
|
||||
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
|
||||
|
||||
if (queryIdx < query.rows && threadIdx.x == 0)
|
||||
{
|
||||
matchCached_caller<16, 16, 256, true, Dist>(
|
||||
query, train, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance),
|
||||
stream);
|
||||
bestTrainIdx[queryIdx] = myBestTrainIdx;
|
||||
bestDistance[queryIdx] = myBestDistance;
|
||||
}
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
|
||||
void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
|
||||
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,
|
||||
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
|
||||
{
|
||||
extern __shared__ int smem[];
|
||||
|
||||
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
|
||||
|
||||
float myBestDistance = numeric_limits<float>::max();
|
||||
int myBestTrainIdx = -1;
|
||||
int myBestImgIdx = -1;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
Mask m = mask;
|
||||
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
|
||||
{
|
||||
const DevMem2D_<T> train = trains[imgIdx];
|
||||
m.next();
|
||||
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
float* s_distance = (float*)(smem);
|
||||
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
|
||||
|
||||
if (queryIdx < query.rows && threadIdx.x == 0)
|
||||
{
|
||||
bestTrainIdx[queryIdx] = myBestTrainIdx;
|
||||
bestImgIdx[queryIdx] = myBestImgIdx;
|
||||
bestDistance[queryIdx] = myBestDistance;
|
||||
}
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
|
||||
void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match dispatcher
|
||||
|
||||
template <typename Dist, typename T, typename Mask>
|
||||
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
if (query.cols <= 64)
|
||||
{
|
||||
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 128)
|
||||
{
|
||||
matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 256)
|
||||
{
|
||||
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 512)
|
||||
{
|
||||
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 1024)
|
||||
{
|
||||
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
matchSimple_caller<16, 16, Dist>(
|
||||
query, train, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance),
|
||||
stream);
|
||||
match<16, Dist>(query, train, mask, trainIdx, distance, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dist, typename T, typename Mask>
|
||||
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
if (query.cols <= 64)
|
||||
{
|
||||
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 128)
|
||||
{
|
||||
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 256)
|
||||
{
|
||||
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 512)
|
||||
{
|
||||
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
|
||||
}
|
||||
else if (query.cols <= 1024)
|
||||
{
|
||||
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match caller
|
||||
|
||||
template <typename T> void matchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train_, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));
|
||||
if (mask.data)
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, SingleMask(mask), trainIdx, DevMem2D(), distance, cc, stream);
|
||||
{
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
|
||||
trainIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, DevMem2D(), distance, cc, stream);
|
||||
{
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
|
||||
trainIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void matchSingleL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchSingleL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchSingleL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchSingleL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchSingleL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchSingleL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train_, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));
|
||||
if (mask.data)
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, SingleMask(mask), trainIdx, DevMem2D(), distance, cc, stream);
|
||||
{
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
|
||||
trainIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, DevMem2D(), distance, cc, stream);
|
||||
{
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
|
||||
trainIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
//template void matchSingleL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchSingleL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchSingleL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchSingleL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchSingleL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchSingleL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train_, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));
|
||||
if (mask.data)
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, SingleMask(mask), trainIdx, DevMem2D(), distance, cc, stream);
|
||||
{
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),
|
||||
trainIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, DevMem2D(), distance, cc, stream);
|
||||
{
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),
|
||||
trainIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void matchSingleHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchSingleHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchSingleHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchSingleHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchSingleHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);
|
||||
if (maskCollection.data)
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, MaskCollection(maskCollection.data), trainIdx, imgIdx, distance, cc, stream);
|
||||
if (masks.data)
|
||||
{
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
|
||||
trainIdx, imgIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
|
||||
{
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
|
||||
trainIdx, imgIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void matchCollectionL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchCollectionL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchCollectionL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchCollectionL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchCollectionL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchCollectionL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL1_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<float >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);
|
||||
if (maskCollection.data)
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, MaskCollection(maskCollection.data), trainIdx, imgIdx, distance, cc, stream);
|
||||
if (masks.data)
|
||||
{
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
|
||||
trainIdx, imgIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
|
||||
{
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
|
||||
trainIdx, imgIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
//template void matchCollectionL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchCollectionL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchCollectionL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchCollectionL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchCollectionL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchCollectionL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchL2_gpu<float >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void matchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);
|
||||
if (maskCollection.data)
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, MaskCollection(maskCollection.data), trainIdx, imgIdx, distance, cc, stream);
|
||||
if (masks.data)
|
||||
{
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
|
||||
trainIdx, imgIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
|
||||
{
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),
|
||||
trainIdx, imgIdx, distance,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void matchCollectionHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchCollectionHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchCollectionHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
//template void matchCollectionHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchCollectionHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<uchar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<schar >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<ushort>(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<short >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<int >(const DevMem2D& query, const DevMem2D& trains, const DevMem2D_<PtrStep>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);
|
||||
}}}
|
||||
|
@ -49,466 +49,410 @@ using namespace cv::gpu::device;
|
||||
|
||||
namespace cv { namespace gpu { namespace bf_radius_match
|
||||
{
|
||||
template <typename T> struct SingleTrain
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match Unrolled
|
||||
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, bool SAVE_IMG_IDX, typename Dist, typename T, typename Mask>
|
||||
__global__ void matchUnrolled(const DevMem2D_<T> query, int imgIdx, const DevMem2D_<T> train, float maxDistance, const Mask mask,
|
||||
PtrStepi bestTrainIdx, PtrStepi bestImgIdx, PtrStepf bestDistance, unsigned int* nMatches, int maxCount)
|
||||
{
|
||||
enum {USE_IMG_IDX = 0};
|
||||
#if __CUDA_ARCH__ >= 110
|
||||
|
||||
explicit SingleTrain(const DevMem2D_<T>& train_) : train(train_)
|
||||
extern __shared__ int smem[];
|
||||
|
||||
const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;
|
||||
const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
Dist dist;
|
||||
|
||||
#pragma unroll
|
||||
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
|
||||
{
|
||||
}
|
||||
const int loadX = threadIdx.x + i * BLOCK_SIZE;
|
||||
|
||||
static __device__ __forceinline__ void store(const int* s_trainIdx, const int* s_imgIdx, const float* s_dist, unsigned int& s_count, int& s_globInd,
|
||||
int* trainIdx, int* imgIdx, float* distance, int maxCount)
|
||||
{
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
if (tid < s_count && s_globInd + tid < maxCount)
|
||||
if (loadX < query.cols)
|
||||
{
|
||||
trainIdx[s_globInd + tid] = s_trainIdx[tid];
|
||||
distance[s_globInd + tid] = s_dist[tid];
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
|
||||
}
|
||||
else
|
||||
{
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
|
||||
}
|
||||
|
||||
if (tid == 0)
|
||||
__syncthreads();
|
||||
|
||||
#pragma unroll
|
||||
for (int j = 0; j < BLOCK_SIZE; ++j)
|
||||
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
|
||||
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
float distVal = (typename Dist::result_type)dist;
|
||||
|
||||
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx) && distVal < maxDistance)
|
||||
{
|
||||
unsigned int ind = atomicInc(nMatches + queryIdx, (unsigned int) -1);
|
||||
if (ind < maxCount)
|
||||
{
|
||||
s_globInd += s_count;
|
||||
s_count = 0;
|
||||
bestTrainIdx.ptr(queryIdx)[ind] = trainIdx;
|
||||
if (SAVE_IMG_IDX) bestImgIdx.ptr(queryIdx)[ind] = imgIdx;
|
||||
bestDistance.ptr(queryIdx)[ind] = distVal;
|
||||
}
|
||||
}
|
||||
|
||||
template <int BLOCK_STACK, typename Dist, typename VecDiff, typename Mask>
|
||||
__device__ __forceinline__ void loop(float maxDistance, Mask& mask, const VecDiff& vecDiff,
|
||||
int* s_trainIdx, int* s_imgIdx, float* s_dist, unsigned int& s_count, int& s_globInd,
|
||||
int* trainIdxRow, int* imgIdxRow, float* distanceRow, int maxCount,
|
||||
typename Dist::result_type* s_diffRow) const
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 120
|
||||
|
||||
for (int i = 0; i < train.rows; i += blockDim.y)
|
||||
{
|
||||
int trainIdx = i + threadIdx.y;
|
||||
|
||||
if (trainIdx < train.rows && mask(blockIdx.x, trainIdx))
|
||||
{
|
||||
Dist dist;
|
||||
|
||||
vecDiff.calc(train.ptr(trainIdx), train.cols, dist, s_diffRow, threadIdx.x);
|
||||
|
||||
const typename Dist::result_type val = dist;
|
||||
|
||||
if (threadIdx.x == 0 && val < maxDistance)
|
||||
{
|
||||
unsigned int ind = atomicInc(&s_count, (unsigned int) -1);
|
||||
s_trainIdx[ind] = trainIdx;
|
||||
s_dist[ind] = val;
|
||||
}
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (s_count >= BLOCK_STACK - blockDim.y)
|
||||
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);
|
||||
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int descLen() const
|
||||
{
|
||||
return train.cols;
|
||||
}
|
||||
|
||||
const DevMem2D_<T> train;
|
||||
};
|
||||
|
||||
template <typename T> struct TrainCollection
|
||||
{
|
||||
enum {USE_IMG_IDX = 1};
|
||||
|
||||
TrainCollection(const DevMem2D_<T>* trainCollection_, int nImg_, int desclen_) :
|
||||
trainCollection(trainCollection_), nImg(nImg_), desclen(desclen_)
|
||||
{
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ void store(const int* s_trainIdx, const int* s_imgIdx, const float* s_dist, unsigned int& s_count, int& s_globInd,
|
||||
int* trainIdx, int* imgIdx, float* distance, int maxCount)
|
||||
{
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
if (tid < s_count && s_globInd + tid < maxCount)
|
||||
{
|
||||
trainIdx[s_globInd + tid] = s_trainIdx[tid];
|
||||
imgIdx[s_globInd + tid] = s_imgIdx[tid];
|
||||
distance[s_globInd + tid] = s_dist[tid];
|
||||
}
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
s_globInd += s_count;
|
||||
s_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
template <int BLOCK_STACK, typename Dist, typename VecDiff, typename Mask>
|
||||
__device__ void loop(float maxDistance, Mask& mask, const VecDiff& vecDiff,
|
||||
int* s_trainIdx, int* s_imgIdx, float* s_dist, unsigned int& s_count, int& s_globInd,
|
||||
int* trainIdxRow, int* imgIdxRow, float* distanceRow, int maxCount,
|
||||
typename Dist::result_type* s_diffRow) const
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 120
|
||||
|
||||
for (int imgIdx = 0; imgIdx < nImg; ++imgIdx)
|
||||
{
|
||||
const DevMem2D_<T> train = trainCollection[imgIdx];
|
||||
|
||||
mask.next();
|
||||
|
||||
for (int i = 0; i < train.rows; i += blockDim.y)
|
||||
{
|
||||
int trainIdx = i + threadIdx.y;
|
||||
|
||||
if (trainIdx < train.rows && mask(blockIdx.x, trainIdx))
|
||||
{
|
||||
Dist dist;
|
||||
|
||||
vecDiff.calc(train.ptr(trainIdx), desclen, dist, s_diffRow, threadIdx.x);
|
||||
|
||||
const typename Dist::result_type val = dist;
|
||||
|
||||
if (threadIdx.x == 0 && val < maxDistance)
|
||||
{
|
||||
unsigned int ind = atomicInc(&s_count, (unsigned int) -1);
|
||||
s_trainIdx[ind] = trainIdx;
|
||||
s_imgIdx[ind] = imgIdx;
|
||||
s_dist[ind] = val;
|
||||
}
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (s_count >= BLOCK_STACK - blockDim.y)
|
||||
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);
|
||||
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int descLen() const
|
||||
{
|
||||
return desclen;
|
||||
}
|
||||
|
||||
const DevMem2D_<T>* trainCollection;
|
||||
const int nImg;
|
||||
const int desclen;
|
||||
};
|
||||
|
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename VecDiff, typename Dist, typename T, typename Train, typename Mask>
|
||||
__global__ void radiusMatch(const PtrStep_<T> query, const Train train, float maxDistance, const Mask mask,
|
||||
PtrStepi trainIdx, PtrStepi imgIdx, PtrStepf distance, int* nMatches, int maxCount)
|
||||
{
|
||||
typedef typename Dist::result_type result_type;
|
||||
typedef typename Dist::value_type value_type;
|
||||
|
||||
__shared__ result_type s_mem[BLOCK_DIM_X * BLOCK_DIM_Y];
|
||||
|
||||
__shared__ int s_trainIdx[BLOCK_STACK];
|
||||
__shared__ int s_imgIdx[Train::USE_IMG_IDX ? BLOCK_STACK : 1];
|
||||
__shared__ float s_dist[BLOCK_STACK];
|
||||
__shared__ unsigned int s_count;
|
||||
|
||||
__shared__ int s_globInd;
|
||||
|
||||
if (threadIdx.x == 0 && threadIdx.y == 0)
|
||||
{
|
||||
s_count = 0;
|
||||
s_globInd = 0;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
const VecDiff vecDiff(query.ptr(blockIdx.x), train.descLen(), (typename Dist::value_type*)s_mem, threadIdx.y * BLOCK_DIM_X + threadIdx.x, threadIdx.x);
|
||||
|
||||
Mask m = mask;
|
||||
|
||||
train.template loop<BLOCK_STACK, Dist>(maxDistance, m, vecDiff,
|
||||
s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd,
|
||||
trainIdx.ptr(blockIdx.x), imgIdx.ptr(blockIdx.x), distance.ptr(blockIdx.x), maxCount,
|
||||
s_mem + BLOCK_DIM_X * threadIdx.y);
|
||||
|
||||
if (threadIdx.x == 0 && threadIdx.y == 0)
|
||||
nMatches[blockIdx.x] = s_globInd;
|
||||
#endif
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Radius Match kernel caller
|
||||
|
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename Dist, typename T, typename Train, typename Mask>
|
||||
void radiusMatchSimple_caller(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int* nMatches,
|
||||
cudaStream_t stream)
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
|
||||
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, cudaStream_t stream)
|
||||
{
|
||||
StaticAssert<BLOCK_STACK >= BLOCK_DIM_Y>::check();
|
||||
StaticAssert<BLOCK_STACK <= BLOCK_DIM_X * BLOCK_DIM_Y>::check();
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
const dim3 grid(query.rows, 1, 1);
|
||||
const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
|
||||
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_STACK, VecDiffGlobal<BLOCK_DIM_X, T>, Dist, T>
|
||||
<<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, imgIdx, distance, nMatches, trainIdx.cols);
|
||||
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask,
|
||||
trainIdx, PtrStepi(), distance, nMatches.data, trainIdx.cols);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T, typename Train, typename Mask>
|
||||
void radiusMatchCached_caller(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int* nMatches,
|
||||
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T>
|
||||
void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
StaticAssert<BLOCK_STACK >= BLOCK_DIM_Y>::check();
|
||||
StaticAssert<BLOCK_STACK <= BLOCK_DIM_X * BLOCK_DIM_Y>::check();
|
||||
StaticAssert<BLOCK_DIM_X * BLOCK_DIM_Y >= MAX_LEN>::check();
|
||||
StaticAssert<MAX_LEN % BLOCK_DIM_X == 0>::check();
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
|
||||
const dim3 grid(query.rows, 1, 1);
|
||||
const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
|
||||
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_STACK, VecDiffCachedRegister<BLOCK_DIM_X, MAX_LEN, LEN_EQ_MAX_LEN, typename Dist::value_type>, Dist, T>
|
||||
<<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, imgIdx, distance, nMatches, trainIdx.cols);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
const DevMem2D_<T> train = trains[i];
|
||||
|
||||
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
if (masks != 0 && masks[i].data)
|
||||
{
|
||||
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]),
|
||||
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);
|
||||
}
|
||||
else
|
||||
{
|
||||
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(),
|
||||
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);
|
||||
}
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
}
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Radius Match Dispatcher
|
||||
// Match
|
||||
|
||||
template <typename Dist, typename T, typename Train, typename Mask>
|
||||
void radiusMatchDispatcher(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
template <int BLOCK_SIZE, bool SAVE_IMG_IDX, typename Dist, typename T, typename Mask>
|
||||
__global__ void match(const DevMem2D_<T> query, int imgIdx, const DevMem2D_<T> train, float maxDistance, const Mask mask,
|
||||
PtrStepi bestTrainIdx, PtrStepi bestImgIdx, PtrStepf bestDistance, unsigned int* nMatches, int maxCount)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 110
|
||||
|
||||
extern __shared__ int smem[];
|
||||
|
||||
const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;
|
||||
const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
|
||||
|
||||
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
|
||||
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
|
||||
|
||||
Dist dist;
|
||||
|
||||
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
|
||||
{
|
||||
const int loadX = threadIdx.x + i * BLOCK_SIZE;
|
||||
|
||||
if (loadX < query.cols)
|
||||
{
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(min(queryIdx, query.rows - 1))[loadX];
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
|
||||
}
|
||||
else
|
||||
{
|
||||
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
|
||||
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
#pragma unroll
|
||||
for (int j = 0; j < BLOCK_SIZE; ++j)
|
||||
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
|
||||
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
float distVal = (typename Dist::result_type)dist;
|
||||
|
||||
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx) && distVal < maxDistance)
|
||||
{
|
||||
unsigned int ind = atomicInc(nMatches + queryIdx, (unsigned int) -1);
|
||||
if (ind < maxCount)
|
||||
{
|
||||
bestTrainIdx.ptr(queryIdx)[ind] = trainIdx;
|
||||
if (SAVE_IMG_IDX) bestImgIdx.ptr(queryIdx)[ind] = imgIdx;
|
||||
bestDistance.ptr(queryIdx)[ind] = distVal;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
|
||||
void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
if (query.cols < 64)
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
match<BLOCK_SIZE, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask,
|
||||
trainIdx, PtrStepi(), distance, nMatches.data, trainIdx.cols);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE, typename Dist, typename T>
|
||||
void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
|
||||
|
||||
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
|
||||
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
radiusMatchCached_caller<16, 16, 64, 64, false, Dist>(
|
||||
query, train, maxDistance, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,
|
||||
stream);
|
||||
const DevMem2D_<T> train = trains[i];
|
||||
|
||||
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
|
||||
|
||||
if (masks != 0 && masks[i].data)
|
||||
{
|
||||
match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]),
|
||||
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);
|
||||
}
|
||||
else
|
||||
{
|
||||
match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(),
|
||||
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);
|
||||
}
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
}
|
||||
else if (query.cols == 64)
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Match dispatcher
|
||||
|
||||
template <typename Dist, typename T, typename Mask>
|
||||
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
if (query.cols <= 64)
|
||||
{
|
||||
radiusMatchCached_caller<16, 16, 64, 64, true, Dist>(
|
||||
query, train, maxDistance, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,
|
||||
stream);
|
||||
matchUnrolled<16, 64, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols < 128)
|
||||
else if (query.cols <= 128)
|
||||
{
|
||||
radiusMatchCached_caller<16, 16, 64, 128, false, Dist>(
|
||||
query, train, maxDistance, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,
|
||||
stream);
|
||||
matchUnrolled<16, 128, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols == 128)
|
||||
else if (query.cols <= 256)
|
||||
{
|
||||
radiusMatchCached_caller<16, 16, 64, 128, true, Dist>(
|
||||
query, train, maxDistance, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,
|
||||
stream);
|
||||
matchUnrolled<16, 256, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols < 256)
|
||||
else if (query.cols <= 512)
|
||||
{
|
||||
radiusMatchCached_caller<16, 16, 64, 256, false, Dist>(
|
||||
query, train, maxDistance, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,
|
||||
stream);
|
||||
matchUnrolled<16, 512, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols == 256)
|
||||
else if (query.cols <= 1024)
|
||||
{
|
||||
radiusMatchCached_caller<16, 16, 64, 256, true, Dist>(
|
||||
query, train, maxDistance, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,
|
||||
stream);
|
||||
matchUnrolled<16, 1024, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
radiusMatchSimple_caller<16, 16, 64, Dist>(
|
||||
query, train, maxDistance, mask,
|
||||
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data,
|
||||
stream);
|
||||
match<16, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Dist, typename T>
|
||||
void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
if (query.cols <= 64)
|
||||
{
|
||||
matchUnrolled<16, 64, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols <= 128)
|
||||
{
|
||||
matchUnrolled<16, 128, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols <= 256)
|
||||
{
|
||||
matchUnrolled<16, 256, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols <= 512)
|
||||
{
|
||||
matchUnrolled<16, 512, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);
|
||||
}
|
||||
else if (query.cols <= 1024)
|
||||
{
|
||||
matchUnrolled<16, 1024, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
match<16, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Radius Match caller
|
||||
|
||||
template <typename T> void radiusMatchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream)
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));
|
||||
|
||||
if (mask.data)
|
||||
{
|
||||
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask),
|
||||
trainIdx, DevMem2D(), distance, nMatches,
|
||||
stream);
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask),
|
||||
trainIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(),
|
||||
trainIdx, DevMem2D(), distance, nMatches,
|
||||
stream);
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(),
|
||||
trainIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void radiusMatchSingleL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchSingleL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchSingleL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchSingleL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchSingleL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchSingleL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void matchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void radiusMatchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream)
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));
|
||||
|
||||
if (mask.data)
|
||||
{
|
||||
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask),
|
||||
trainIdx, DevMem2D(), distance, nMatches,
|
||||
stream);
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask),
|
||||
trainIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(),
|
||||
trainIdx, DevMem2D(), distance, nMatches,
|
||||
stream);
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(),
|
||||
trainIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
//template void radiusMatchSingleL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchSingleL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchSingleL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchSingleL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchSingleL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchSingleL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void matchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void radiusMatchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream)
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask,
|
||||
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_));
|
||||
|
||||
if (mask.data)
|
||||
{
|
||||
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask),
|
||||
trainIdx, DevMem2D(), distance, nMatches,
|
||||
stream);
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask),
|
||||
trainIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(),
|
||||
trainIdx, DevMem2D(), distance, nMatches,
|
||||
stream);
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(),
|
||||
trainIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void radiusMatchSingleHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchSingleHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchSingleHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchSingleHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchSingleHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void matchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void radiusMatchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream)
|
||||
template <typename T> void matchL1_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);
|
||||
|
||||
if (maskCollection.data)
|
||||
{
|
||||
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data),
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(),
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
stream);
|
||||
}
|
||||
matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks,
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
|
||||
template void radiusMatchCollectionL1_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchCollectionL1_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchCollectionL1_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchCollectionL1_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchCollectionL1_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchCollectionL1_gpu<float >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void matchL1_gpu<uchar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL1_gpu<schar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<ushort>(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<short >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<int >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL1_gpu<float >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void radiusMatchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream)
|
||||
template <typename T> void matchL2_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);
|
||||
|
||||
if (maskCollection.data)
|
||||
{
|
||||
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data),
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(),
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
stream);
|
||||
}
|
||||
matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks,
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
|
||||
//template void radiusMatchCollectionL2_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchCollectionL2_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchCollectionL2_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchCollectionL2_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchCollectionL2_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchCollectionL2_gpu<float >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void matchL2_gpu<uchar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<schar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<ushort>(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<short >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchL2_gpu<int >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchL2_gpu<float >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
|
||||
template <typename T> void radiusMatchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection,
|
||||
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches,
|
||||
cudaStream_t stream)
|
||||
template <typename T> void matchHamming_gpu(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks,
|
||||
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,
|
||||
int cc, cudaStream_t stream)
|
||||
{
|
||||
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols);
|
||||
|
||||
if (maskCollection.data)
|
||||
{
|
||||
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data),
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(),
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
stream);
|
||||
}
|
||||
matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks,
|
||||
trainIdx, imgIdx, distance, nMatches,
|
||||
cc, stream);
|
||||
}
|
||||
|
||||
template void radiusMatchCollectionHamming_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchCollectionHamming_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchCollectionHamming_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
//template void radiusMatchCollectionHamming_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void radiusMatchCollectionHamming_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream);
|
||||
template void matchHamming_gpu<uchar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<schar >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<ushort>(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
//template void matchHamming_gpu<short >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
template void matchHamming_gpu<int >(const DevMem2D& query, const DevMem2D* trains, int n, float maxDistance, const DevMem2D* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);
|
||||
}}}
|
||||
|
@ -47,6 +47,9 @@ namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Reduction
|
||||
|
||||
template <int n> struct WarpReductor
|
||||
{
|
||||
template <typename T, typename Op> static __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
||||
@ -209,6 +212,8 @@ namespace cv { namespace gpu { namespace device
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// PredValWarpReductor
|
||||
|
||||
template <int n> struct PredValWarpReductor;
|
||||
template <> struct PredValWarpReductor<64>
|
||||
@ -501,6 +506,335 @@ namespace cv { namespace gpu { namespace device
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// PredVal2WarpReductor
|
||||
|
||||
template <int n> struct PredVal2WarpReductor;
|
||||
template <> struct PredVal2WarpReductor<64>
|
||||
{
|
||||
template <typename T, typename V1, typename V2, typename Pred>
|
||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||
{
|
||||
if (tid < 32)
|
||||
{
|
||||
myData = sdata[tid];
|
||||
myVal1 = sval1[tid];
|
||||
myVal2 = sval2[tid];
|
||||
|
||||
T reg = sdata[tid + 32];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 32];
|
||||
sval2[tid] = myVal2 = sval2[tid + 32];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 16];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 16];
|
||||
sval2[tid] = myVal2 = sval2[tid + 16];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 8];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 8];
|
||||
sval2[tid] = myVal2 = sval2[tid + 8];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 4];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 2];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 1];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 1];
|
||||
sval2[tid] = myVal2 = sval2[tid + 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
template <> struct PredVal2WarpReductor<32>
|
||||
{
|
||||
template <typename T, typename V1, typename V2, typename Pred>
|
||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||
{
|
||||
if (tid < 16)
|
||||
{
|
||||
myData = sdata[tid];
|
||||
myVal1 = sval1[tid];
|
||||
myVal2 = sval2[tid];
|
||||
|
||||
T reg = sdata[tid + 16];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 16];
|
||||
sval2[tid] = myVal2 = sval2[tid + 16];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 8];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 8];
|
||||
sval2[tid] = myVal2 = sval2[tid + 8];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 4];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 2];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 1];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 1];
|
||||
sval2[tid] = myVal2 = sval2[tid + 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct PredVal2WarpReductor<16>
|
||||
{
|
||||
template <typename T, typename V1, typename V2, typename Pred>
|
||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||
{
|
||||
if (tid < 8)
|
||||
{
|
||||
myData = sdata[tid];
|
||||
myVal1 = sval1[tid];
|
||||
myVal2 = sval2[tid];
|
||||
|
||||
T reg = reg = sdata[tid + 8];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 8];
|
||||
sval2[tid] = myVal2 = sval2[tid + 8];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 4];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 2];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 1];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 1];
|
||||
sval2[tid] = myVal2 = sval2[tid + 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
template <> struct PredVal2WarpReductor<8>
|
||||
{
|
||||
template <typename T, typename V1, typename V2, typename Pred>
|
||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||
{
|
||||
if (tid < 4)
|
||||
{
|
||||
myData = sdata[tid];
|
||||
myVal1 = sval1[tid];
|
||||
myVal2 = sval2[tid];
|
||||
|
||||
T reg = reg = sdata[tid + 4];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 2];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||
}
|
||||
|
||||
reg = sdata[tid + 1];
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 1];
|
||||
sval2[tid] = myVal2 = sval2[tid + 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <bool warp> struct PredVal2ReductionDispatcher;
|
||||
template <> struct PredVal2ReductionDispatcher<true>
|
||||
{
|
||||
template <int n, typename T, typename V1, typename V2, typename Pred>
|
||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||
{
|
||||
PredVal2WarpReductor<n>::reduce(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred);
|
||||
}
|
||||
};
|
||||
template <> struct PredVal2ReductionDispatcher<false>
|
||||
{
|
||||
template <int n, typename T, typename V1, typename V2, typename Pred>
|
||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||
{
|
||||
myData = sdata[tid];
|
||||
myVal1 = sval1[tid];
|
||||
myVal2 = sval2[tid];
|
||||
|
||||
if (n >= 512 && tid < 256)
|
||||
{
|
||||
T reg = sdata[tid + 256];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 256];
|
||||
sval2[tid] = myVal2 = sval2[tid + 256];
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (n >= 256 && tid < 128)
|
||||
{
|
||||
T reg = sdata[tid + 128];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 128];
|
||||
sval2[tid] = myVal2 = sval2[tid + 128];
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (n >= 128 && tid < 64)
|
||||
{
|
||||
T reg = sdata[tid + 64];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 64];
|
||||
sval2[tid] = myVal2 = sval2[tid + 64];
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
if (tid < 32)
|
||||
{
|
||||
if (n >= 64)
|
||||
{
|
||||
T reg = sdata[tid + 32];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 32];
|
||||
sval2[tid] = myVal2 = sval2[tid + 32];
|
||||
}
|
||||
}
|
||||
if (n >= 32)
|
||||
{
|
||||
T reg = sdata[tid + 16];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 16];
|
||||
sval2[tid] = myVal2 = sval2[tid + 16];
|
||||
}
|
||||
}
|
||||
if (n >= 16)
|
||||
{
|
||||
T reg = sdata[tid + 8];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 8];
|
||||
sval2[tid] = myVal2 = sval2[tid + 8];
|
||||
}
|
||||
}
|
||||
if (n >= 8)
|
||||
{
|
||||
T reg = sdata[tid + 4];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||
}
|
||||
}
|
||||
if (n >= 4)
|
||||
{
|
||||
T reg = sdata[tid + 2];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||
}
|
||||
}
|
||||
if (n >= 2)
|
||||
{
|
||||
T reg = sdata[tid + 1];
|
||||
|
||||
if (pred(reg, myData))
|
||||
{
|
||||
sdata[tid] = myData = reg;
|
||||
sval1[tid] = myVal1 = sval1[tid + 1];
|
||||
sval2[tid] = myVal2 = sval2[tid + 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}}}
|
||||
|
||||
|
@ -121,7 +121,6 @@ namespace cv { namespace gpu { namespace device
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Reduction
|
||||
|
||||
// reduction
|
||||
template <int n, typename T, typename Op> __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
||||
{
|
||||
StaticAssert<n >= 8 && n <= 512>::check();
|
||||
@ -135,6 +134,13 @@ namespace cv { namespace gpu { namespace device
|
||||
detail::PredValReductionDispatcher<n <= 64>::reduce<n>(myData, myVal, sdata, sval, tid, pred);
|
||||
}
|
||||
|
||||
template <int n, typename T, typename V1, typename V2, typename Pred>
|
||||
__device__ __forceinline__ void reducePredVal2(volatile T* sdata, T& myData, V1* sval1, V1& myVal1, V2* sval2, V2& myVal2, int tid, const Pred& pred)
|
||||
{
|
||||
StaticAssert<n >= 8 && n <= 512>::check();
|
||||
detail::PredVal2ReductionDispatcher<n <= 64>::reduce<n>(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Solve linear system
|
||||
|
||||
|
@ -198,7 +198,7 @@ void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &feat
|
||||
|
||||
// Find 1->2 matches
|
||||
pair_matches.clear();
|
||||
matcher.knnMatch(descriptors1_, descriptors2_, train_idx_, distance_, all_dist_, 2);
|
||||
matcher.knnMatchSingle(descriptors1_, descriptors2_, train_idx_, distance_, all_dist_, 2);
|
||||
matcher.knnMatchDownload(train_idx_, distance_, pair_matches);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
@ -215,7 +215,7 @@ void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &feat
|
||||
|
||||
// Find 2->1 matches
|
||||
pair_matches.clear();
|
||||
matcher.knnMatch(descriptors2_, descriptors1_, train_idx_, distance_, all_dist_, 2);
|
||||
matcher.knnMatchSingle(descriptors2_, descriptors1_, train_idx_, distance_, all_dist_, 2);
|
||||
matcher.knnMatchDownload(train_idx_, distance_, pair_matches);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
|
@ -413,38 +413,55 @@ TEST(BruteForceMatcher)
|
||||
|
||||
// Output
|
||||
vector< vector<DMatch> > matches(2);
|
||||
vector< vector<DMatch> > d_matches(2);
|
||||
gpu::GpuMat d_trainIdx, d_distance, d_allDist, d_nMatches;
|
||||
|
||||
SUBTEST << "match";
|
||||
|
||||
matcher.match(query, train, matches[0]);
|
||||
CPU_ON;
|
||||
matcher.match(query, train, matches[0]);
|
||||
CPU_OFF;
|
||||
|
||||
d_matcher.matchSingle(d_query, d_train, d_trainIdx, d_distance);
|
||||
GPU_ON;
|
||||
d_matcher.match(d_query, d_train, d_matches[0]);
|
||||
d_matcher.matchSingle(d_query, d_train, d_trainIdx, d_distance);
|
||||
GPU_OFF;
|
||||
|
||||
SUBTEST << "knnMatch";
|
||||
int knn = 2;
|
||||
SUBTEST << "knnMatch, 2";
|
||||
|
||||
matcher.knnMatch(query, train, matches, 2);
|
||||
CPU_ON;
|
||||
matcher.knnMatch(query, train, matches, knn);
|
||||
matcher.knnMatch(query, train, matches, 2);
|
||||
CPU_OFF;
|
||||
|
||||
d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 2);
|
||||
GPU_ON;
|
||||
d_matcher.knnMatch(d_query, d_train, d_matches, knn);
|
||||
d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 2);
|
||||
GPU_OFF;
|
||||
|
||||
SUBTEST << "knnMatch, 3";
|
||||
|
||||
matcher.knnMatch(query, train, matches, 3);
|
||||
CPU_ON;
|
||||
matcher.knnMatch(query, train, matches, 3);
|
||||
CPU_OFF;
|
||||
|
||||
d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 3);
|
||||
GPU_ON;
|
||||
d_matcher.knnMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_allDist, 3);
|
||||
GPU_OFF;
|
||||
|
||||
SUBTEST << "radiusMatch";
|
||||
float max_distance = 2.0f;
|
||||
|
||||
matcher.radiusMatch(query, train, matches, max_distance);
|
||||
CPU_ON;
|
||||
matcher.radiusMatch(query, train, matches, max_distance);
|
||||
CPU_OFF;
|
||||
|
||||
d_matcher.radiusMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_nMatches, max_distance);
|
||||
GPU_ON;
|
||||
d_matcher.radiusMatch(d_query, d_train, d_matches, max_distance);
|
||||
d_matcher.radiusMatchSingle(d_query, d_train, d_trainIdx, d_distance, d_nMatches, max_distance);
|
||||
GPU_OFF;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user