mirror of
https://github.com/opencv/opencv.git
synced 2025-08-06 06:26:29 +08:00
Added fix for 0 found features in stitching matcher
This commit is contained in:
parent
94b3bb154e
commit
a72da12c80
@ -604,6 +604,78 @@ namespace cv { namespace gpu { namespace device
|
|||||||
template void pow_caller<float>(const DevMem2D& src, float power, DevMem2D dst, cudaStream_t stream);
|
template void pow_caller<float>(const DevMem2D& src, float power, DevMem2D dst, cudaStream_t stream);
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
// divide
|
||||||
|
|
||||||
|
struct divide_8uc4_32f : binary_function<uchar4, float, uchar4>
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ uchar4 operator ()(uchar4 a, float b) const
|
||||||
|
{
|
||||||
|
return make_uchar4(saturate_cast<uchar>(a.x / b), saturate_cast<uchar>(a.y / b),
|
||||||
|
saturate_cast<uchar>(a.z / b), saturate_cast<uchar>(a.w / b));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <> struct TransformFunctorTraits<divide_8uc4_32f> : DefaultTransformFunctorTraits<divide_8uc4_32f>
|
||||||
|
{
|
||||||
|
enum { smart_block_dim_x = 8 };
|
||||||
|
enum { smart_block_dim_y = 8 };
|
||||||
|
enum { smart_shift = 8 };
|
||||||
|
};
|
||||||
|
|
||||||
|
void divide_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream)
|
||||||
|
{
|
||||||
|
transform(static_cast< DevMem2D_<uchar4> >(src1), src2, static_cast< DevMem2D_<uchar4> >(dst), divide_8uc4_32f(), stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct divide_16sc4_32f : binary_function<short4, float, short4>
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
|
||||||
|
{
|
||||||
|
return make_short4(saturate_cast<short>(a.x / b), saturate_cast<uchar>(a.y / b),
|
||||||
|
saturate_cast<short>(a.z / b), saturate_cast<uchar>(a.w / b));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <> struct TransformFunctorTraits<divide_16sc4_32f> : DefaultTransformFunctorTraits<divide_16sc4_32f>
|
||||||
|
{
|
||||||
|
enum { smart_block_dim_x = 8 };
|
||||||
|
enum { smart_block_dim_y = 8 };
|
||||||
|
enum { smart_shift = 8 };
|
||||||
|
};
|
||||||
|
|
||||||
|
void divide_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream)
|
||||||
|
{
|
||||||
|
transform(static_cast< DevMem2D_<short4> >(src1), src2, static_cast< DevMem2D_<short4> >(dst), divide_16sc4_32f(), stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
// multiply
|
||||||
|
|
||||||
|
struct add_16sc4 : binary_function<short4, short4, short4>
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ short4 operator ()(short4 a, short4 b) const
|
||||||
|
{
|
||||||
|
return make_short4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <> struct TransformFunctorTraits<add_16sc4> : DefaultTransformFunctorTraits<add_16sc4>
|
||||||
|
{
|
||||||
|
enum { smart_block_dim_x = 8 };
|
||||||
|
enum { smart_block_dim_y = 8 };
|
||||||
|
enum { smart_shift = 8 };
|
||||||
|
};
|
||||||
|
|
||||||
|
void add_gpu(const DevMem2D_<short4>& src1, const DevMem2D_<short4>& src2, const DevMem2D_<short4>& dst, cudaStream_t stream)
|
||||||
|
{
|
||||||
|
transform(static_cast< DevMem2D_<short4> >(src1), static_cast< DevMem2D_<short4> >(src2),
|
||||||
|
static_cast< DevMem2D_<short4> >(dst), add_16sc4(), stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
// multiply
|
// multiply
|
||||||
|
|
||||||
@ -634,6 +706,29 @@ namespace cv { namespace gpu { namespace device
|
|||||||
transform(static_cast< DevMem2D_<uint> >(src1), src2, static_cast< DevMem2D_<uint> >(dst), multiply_8uc4_32f(), stream);
|
transform(static_cast< DevMem2D_<uint> >(src1), src2, static_cast< DevMem2D_<uint> >(dst), multiply_8uc4_32f(), stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct multiply_16sc4_32f : binary_function<short4, float, short4>
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
|
||||||
|
{
|
||||||
|
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
|
||||||
|
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <> struct TransformFunctorTraits<multiply_16sc4_32f> : DefaultTransformFunctorTraits<multiply_16sc4_32f>
|
||||||
|
{
|
||||||
|
enum { smart_block_dim_x = 8 };
|
||||||
|
enum { smart_block_dim_y = 8 };
|
||||||
|
enum { smart_shift = 8 };
|
||||||
|
};
|
||||||
|
|
||||||
|
void multiply_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream)
|
||||||
|
{
|
||||||
|
transform(static_cast< DevMem2D_<short4> >(src1), src2,
|
||||||
|
static_cast< DevMem2D_<short4> >(dst), multiply_16sc4_32f(), stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
// multiply (by scalar)
|
// multiply (by scalar)
|
||||||
|
|
||||||
|
@ -174,9 +174,21 @@ namespace
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace cv { namespace gpu { namespace device
|
||||||
|
{
|
||||||
|
void add_gpu(const DevMem2D_<short4>& src1, const DevMem2D_<short4>& src2, const DevMem2D_<short4>& dst, cudaStream_t stream);
|
||||||
|
}}}
|
||||||
|
|
||||||
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
|
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
|
||||||
{
|
{
|
||||||
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R, StreamAccessor::getStream(stream));
|
if (src1.type() == CV_16SC4 && src2.type() == CV_16SC4)
|
||||||
|
{
|
||||||
|
CV_Assert(src1.size() == src2.size());
|
||||||
|
dst.create(src1.size(), src1.type());
|
||||||
|
device::add_gpu(src1, src2, dst, StreamAccessor::getStream(stream));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R, StreamAccessor::getStream(stream));
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
@ -200,6 +212,7 @@ void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stre
|
|||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
void multiply_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream);
|
void multiply_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream);
|
||||||
|
void multiply_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream);
|
||||||
|
|
||||||
template <typename T, typename D>
|
template <typename T, typename D>
|
||||||
void multiplyScalar_gpu(const DevMem2D& src, float scale, const DevMem2D& dst, cudaStream_t stream);
|
void multiplyScalar_gpu(const DevMem2D& src, float scale, const DevMem2D& dst, cudaStream_t stream);
|
||||||
@ -213,7 +226,17 @@ void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stre
|
|||||||
|
|
||||||
dst.create(src1.size(), src1.type());
|
dst.create(src1.size(), src1.type());
|
||||||
|
|
||||||
device::multiply_gpu(src1, src2, dst, StreamAccessor::getStream(stream));
|
device::multiply_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2),
|
||||||
|
static_cast<DevMem2D_<uchar4> >(dst), StreamAccessor::getStream(stream));
|
||||||
|
}
|
||||||
|
else if (src1.type() == CV_16SC4 && src2.type() == CV_32FC1)
|
||||||
|
{
|
||||||
|
CV_Assert(src1.size() == src2.size());
|
||||||
|
|
||||||
|
dst.create(src1.size(), src1.type());
|
||||||
|
|
||||||
|
device::multiply_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2),
|
||||||
|
static_cast<DevMem2D_<short4> >(dst), StreamAccessor::getStream(stream));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R, StreamAccessor::getStream(stream));
|
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R, StreamAccessor::getStream(stream));
|
||||||
@ -249,9 +272,35 @@ void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream&
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace cv { namespace gpu { namespace device
|
||||||
|
{
|
||||||
|
void divide_gpu(const DevMem2D_<uchar4>& src1, const DevMem2Df& src2, const DevMem2D_<uchar4>& dst, cudaStream_t stream);
|
||||||
|
void divide_gpu(const DevMem2D_<short4>& src1, const DevMem2Df& src2, const DevMem2D_<short4>& dst, cudaStream_t stream);
|
||||||
|
}}}
|
||||||
|
|
||||||
|
|
||||||
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
|
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
|
||||||
{
|
{
|
||||||
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R, StreamAccessor::getStream(stream));
|
if (src1.type() == CV_8UC4 && src2.type() == CV_32FC1)
|
||||||
|
{
|
||||||
|
CV_Assert(src1.size() == src2.size());
|
||||||
|
|
||||||
|
dst.create(src1.size(), src1.type());
|
||||||
|
|
||||||
|
device::divide_gpu(static_cast<DevMem2D_<uchar4> >(src1), static_cast<DevMem2Df>(src2),
|
||||||
|
static_cast<DevMem2D_<uchar4> >(dst), StreamAccessor::getStream(stream));
|
||||||
|
}
|
||||||
|
else if (src1.type() == CV_16SC4 && src2.type() == CV_32FC1)
|
||||||
|
{
|
||||||
|
CV_Assert(src1.size() == src2.size());
|
||||||
|
|
||||||
|
dst.create(src1.size(), src1.type());
|
||||||
|
|
||||||
|
device::divide_gpu(static_cast<DevMem2D_<short4> >(src1), static_cast<DevMem2Df>(src2),
|
||||||
|
static_cast<DevMem2D_<short4> >(dst), StreamAccessor::getStream(stream));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R, StreamAccessor::getStream(stream));
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
|
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst, Stream& stream)
|
||||||
|
@ -347,7 +347,8 @@ void FeaturesMatcher::operator ()(const vector<ImageFeatures> &features, vector<
|
|||||||
vector<pair<int,int> > near_pairs;
|
vector<pair<int,int> > near_pairs;
|
||||||
for (int i = 0; i < num_images - 1; ++i)
|
for (int i = 0; i < num_images - 1; ++i)
|
||||||
for (int j = i + 1; j < num_images; ++j)
|
for (int j = i + 1; j < num_images; ++j)
|
||||||
near_pairs.push_back(make_pair(i, j));
|
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0)
|
||||||
|
near_pairs.push_back(make_pair(i, j));
|
||||||
|
|
||||||
pairwise_matches.resize(num_images * num_images);
|
pairwise_matches.resize(num_images * num_images);
|
||||||
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
|
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
|
||||||
|
Loading…
Reference in New Issue
Block a user