mirror of
https://github.com/opencv/opencv.git
synced 2024-11-29 05:29:54 +08:00
implemented gpu::copyMakeBorder for all border modes
This commit is contained in:
parent
8483f9ce40
commit
64119dd924
@ -662,8 +662,7 @@ namespace cv
|
||||
CV_EXPORTS void rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift = 0, double yShift = 0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
|
||||
|
||||
//! copies 2D array to a larger destination array and pads borders with user-specifiable constant
|
||||
//! supports CV_8UC1, CV_8UC4, CV_32SC1 and CV_32FC1 types
|
||||
CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value = Scalar(), Stream& stream = Stream::Null());
|
||||
CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value = Scalar(), Stream& stream = Stream::Null());
|
||||
|
||||
//! computes the integral image
|
||||
//! sum will have CV_32S type, but will contain unsigned int values
|
||||
|
@ -460,13 +460,15 @@ PERF_TEST_P(DevInfo_Size_MatType_Interpolation, rotate, testing::Combine(testing
|
||||
SANITY_CHECK(dst_host);
|
||||
}
|
||||
|
||||
PERF_TEST_P(DevInfo_Size_MatType, copyMakeBorder, testing::Combine(testing::ValuesIn(devices()),
|
||||
testing::Values(GPU_TYPICAL_MAT_SIZES),
|
||||
testing::Values(CV_8UC1, CV_8UC4, CV_32SC1)))
|
||||
PERF_TEST_P(DevInfo_Size_MatType_BorderMode, copyMakeBorder, testing::Combine(testing::ValuesIn(devices()),
|
||||
testing::Values(GPU_TYPICAL_MAT_SIZES),
|
||||
testing::Values(CV_8UC1, CV_8UC4, CV_32FC1),
|
||||
testing::Values((int)BORDER_REPLICATE, (int)BORDER_CONSTANT)))
|
||||
{
|
||||
DeviceInfo devInfo = std::tr1::get<0>(GetParam());
|
||||
Size size = std::tr1::get<1>(GetParam());
|
||||
int type = std::tr1::get<2>(GetParam());
|
||||
int borderType = std::tr1::get<3>(GetParam());
|
||||
|
||||
setDevice(devInfo.deviceID());
|
||||
|
||||
@ -481,7 +483,7 @@ PERF_TEST_P(DevInfo_Size_MatType, copyMakeBorder, testing::Combine(testing::Valu
|
||||
|
||||
SIMPLE_TEST_CYCLE()
|
||||
{
|
||||
copyMakeBorder(src, dst, 5, 5, 5, 5);
|
||||
copyMakeBorder(src, dst, 5, 5, 5, 5, borderType);
|
||||
}
|
||||
|
||||
Mat dst_host(dst);
|
||||
|
@ -49,6 +49,7 @@ typedef TestBaseWithParam< std::tr1::tuple<DeviceInfo, Size, NormType> > DevInfo
|
||||
typedef TestBaseWithParam< std::tr1::tuple<DeviceInfo, Size, MatType, NormType> > DevInfo_Size_MatType_NormType;
|
||||
typedef TestBaseWithParam< std::tr1::tuple<DeviceInfo, int> > DevInfo_DescSize;
|
||||
typedef TestBaseWithParam< std::tr1::tuple<DeviceInfo, int, int> > DevInfo_K_DescSize;
|
||||
typedef TestBaseWithParam< std::tr1::tuple<DeviceInfo, Size, MatType, BorderMode> > DevInfo_Size_MatType_BorderMode;
|
||||
|
||||
const cv::Size sz1800x1500 = cv::Size(1800, 1500);
|
||||
const cv::Size sz4700x3000 = cv::Size(4700, 3000);
|
||||
|
@ -109,12 +109,6 @@ namespace cv { namespace gpu { namespace filters
|
||||
|
||||
B<T> b(src.rows);
|
||||
|
||||
if (!b.is_range_safe(-BLOCK_DIM_Y, (grid.y + 1) * BLOCK_DIM_Y - 1))
|
||||
{
|
||||
cv::gpu::error("linearColumnFilter: can't use specified border extrapolation, image is too small, "
|
||||
"try bigger image or another border extrapolation mode", __FILE__, __LINE__);
|
||||
}
|
||||
|
||||
filter_krnls_column::linearColumnFilter<ksize, T, D><<<grid, threads, 0, stream>>>(src, dst, anchor, b);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
|
127
modules/gpu/src/cuda/copy_make_border.cu
Normal file
127
modules/gpu/src/cuda/copy_make_border.cu
Normal file
@ -0,0 +1,127 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "internal_shared.hpp"
|
||||
#include "opencv2/gpu/device/border_interpolate.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::device;
|
||||
|
||||
namespace cv { namespace gpu { namespace imgproc
|
||||
{
|
||||
template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, DevMem2D_<T> dst, int top, int left)
|
||||
{
|
||||
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
||||
|
||||
if (x < dst.cols && y < dst.rows)
|
||||
dst.ptr(y)[x] = src(y - top, x - left);
|
||||
}
|
||||
|
||||
template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher
|
||||
{
|
||||
static void call(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, int top, int left,
|
||||
const typename VecTraits<T>::elem_type* borderValue, cudaStream_t stream)
|
||||
{
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
|
||||
|
||||
B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue));
|
||||
BorderReader< PtrStep_<T>, B<T> > brdSrc(src, brd);
|
||||
|
||||
copyMakeBorder<<<grid, block, 0, stream>>>(brdSrc, dst, top, left);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, int cn> void copyMakeBorder_gpu(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode,
|
||||
const T* borderValue, cudaStream_t stream)
|
||||
{
|
||||
typedef typename TypeVec<T, cn>::vec_type vec_type;
|
||||
|
||||
typedef void (*caller_t)(const DevMem2D_<vec_type>& src, const DevMem2D_<vec_type>& dst, int top, int left, const T* borderValue, cudaStream_t stream);
|
||||
|
||||
static const caller_t callers[5] =
|
||||
{
|
||||
CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdConstant, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdReflect, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdWrap, vec_type>::call
|
||||
};
|
||||
|
||||
callers[borderMode](DevMem2D_<vec_type>(src), DevMem2D_<vec_type>(dst), top, left, borderValue, stream);
|
||||
}
|
||||
|
||||
template void copyMakeBorder_gpu<uchar, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<uchar, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<uchar, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<uchar, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream);
|
||||
|
||||
//template void copyMakeBorder_gpu<schar, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<schar, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<schar, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<schar, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream);
|
||||
|
||||
template void copyMakeBorder_gpu<ushort, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<ushort, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<ushort, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<ushort, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream);
|
||||
|
||||
template void copyMakeBorder_gpu<short, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<short, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<short, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<short, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream);
|
||||
|
||||
//template void copyMakeBorder_gpu<int, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<int, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<int, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<int, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream);
|
||||
|
||||
template void copyMakeBorder_gpu<float, 1>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
|
||||
//template void copyMakeBorder_gpu<float, 2>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<float, 3>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
|
||||
template void copyMakeBorder_gpu<float, 4>(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream);
|
||||
}}}
|
@ -125,12 +125,6 @@ namespace cv { namespace gpu { namespace filters
|
||||
typedef typename filter_krnls_row::SmemType<T>::smem_t smem_t;
|
||||
B<smem_t> b(src.cols);
|
||||
|
||||
if (!b.is_range_safe(-BLOCK_DIM_X, (grid.x + 1) * BLOCK_DIM_X - 1))
|
||||
{
|
||||
cv::gpu::error("linearRowFilter: can't use specified border extrapolation, image is too small, "
|
||||
"try bigger image or another border extrapolation mode", __FILE__, __LINE__);
|
||||
}
|
||||
|
||||
filter_krnls_row::linearRowFilter<ksize, T, D><<<grid, threads, 0, stream>>>(src, dst, anchor, b);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
|
@ -53,7 +53,7 @@ void cv::gpu::meanShiftProc(const GpuMat&, GpuMat&, GpuMat&, int, int, TermCrite
|
||||
void cv::gpu::drawColorDisp(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::reprojectImageTo3D(const GpuMat&, GpuMat&, const Mat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::resize(const GpuMat&, GpuMat&, Size, double, double, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::warpAffine(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::warpPerspective(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
|
||||
@ -360,60 +360,99 @@ void cv::gpu::resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx, doub
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// copyMakeBorder
|
||||
|
||||
void cv::gpu::copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value, Stream& s)
|
||||
namespace cv { namespace gpu { namespace imgproc
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4 || src.type() == CV_32SC1 || src.type() == CV_32FC1);
|
||||
template <typename T, int cn> void copyMakeBorder_gpu(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const T* borderValue, cudaStream_t stream);
|
||||
}}}
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T, int cn> void copyMakeBorder_caller(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream)
|
||||
{
|
||||
Scalar_<T> val(saturate_cast<T>(value[0]), saturate_cast<T>(value[1]), saturate_cast<T>(value[2]), saturate_cast<T>(value[3]));
|
||||
|
||||
imgproc::copyMakeBorder_gpu<T, cn>(src, dst, top, left, borderType, val.val, stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value, Stream& s)
|
||||
{
|
||||
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
|
||||
CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
|
||||
|
||||
dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
|
||||
|
||||
NppiSize srcsz;
|
||||
srcsz.width = src.cols;
|
||||
srcsz.height = src.rows;
|
||||
NppiSize dstsz;
|
||||
dstsz.width = dst.cols;
|
||||
dstsz.height = dst.rows;
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
switch (src.type())
|
||||
if (borderType == BORDER_CONSTANT && (src.type() == CV_8UC1 || src.type() == CV_8UC4 || src.type() == CV_32SC1 || src.type() == CV_32FC1))
|
||||
{
|
||||
case CV_8UC1:
|
||||
{
|
||||
Npp8u nVal = static_cast<Npp8u>(value[0]);
|
||||
nppSafeCall( nppiCopyConstBorder_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_8UC4:
|
||||
{
|
||||
Npp8u nVal[] = {static_cast<Npp8u>(value[0]), static_cast<Npp8u>(value[1]), static_cast<Npp8u>(value[2]), static_cast<Npp8u>(value[3])};
|
||||
nppSafeCall( nppiCopyConstBorder_8u_C4R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_32SC1:
|
||||
{
|
||||
Npp32s nVal = static_cast<Npp32s>(value[0]);
|
||||
nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_32FC1:
|
||||
{
|
||||
Npp32f val = static_cast<Npp32f>(value[0]);
|
||||
Npp32s nVal = *(reinterpret_cast<Npp32s*>(&val));
|
||||
nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
default:
|
||||
CV_Assert(!"Unsupported source type");
|
||||
}
|
||||
NppiSize srcsz;
|
||||
srcsz.width = src.cols;
|
||||
srcsz.height = src.rows;
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
NppiSize dstsz;
|
||||
dstsz.width = dst.cols;
|
||||
dstsz.height = dst.rows;
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
switch (src.type())
|
||||
{
|
||||
case CV_8UC1:
|
||||
{
|
||||
Npp8u nVal = saturate_cast<Npp8u>(value[0]);
|
||||
nppSafeCall( nppiCopyConstBorder_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_8UC4:
|
||||
{
|
||||
Npp8u nVal[] = {saturate_cast<Npp8u>(value[0]), saturate_cast<Npp8u>(value[1]), saturate_cast<Npp8u>(value[2]), saturate_cast<Npp8u>(value[3])};
|
||||
nppSafeCall( nppiCopyConstBorder_8u_C4R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_32SC1:
|
||||
{
|
||||
Npp32s nVal = saturate_cast<Npp32s>(value[0]);
|
||||
nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_32FC1:
|
||||
{
|
||||
Npp32f val = saturate_cast<Npp32f>(value[0]);
|
||||
Npp32s nVal = *(reinterpret_cast<Npp32s*>(&val));
|
||||
nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
else
|
||||
{
|
||||
typedef void (*caller_t)(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream);
|
||||
static const caller_t callers[6][4] =
|
||||
{
|
||||
{ copyMakeBorder_caller<uchar, 1> , 0/*copyMakeBorder_caller<uchar, 2>*/ , copyMakeBorder_caller<uchar, 3> , copyMakeBorder_caller<uchar, 4>},
|
||||
{0/*copyMakeBorder_caller<schar, 1>*/, 0/*copyMakeBorder_caller<schar, 2>*/ , 0/*copyMakeBorder_caller<schar, 3>*/, 0/*copyMakeBorder_caller<schar, 4>*/},
|
||||
{ copyMakeBorder_caller<ushort, 1> , 0/*copyMakeBorder_caller<ushort, 2>*/, copyMakeBorder_caller<ushort, 3> , copyMakeBorder_caller<ushort, 4>},
|
||||
{ copyMakeBorder_caller<short, 1> , 0/*copyMakeBorder_caller<short, 2>*/ , copyMakeBorder_caller<short, 3> , copyMakeBorder_caller<short, 4>},
|
||||
{0/*copyMakeBorder_caller<int, 1>*/ , 0/*copyMakeBorder_caller<int, 2>*/ , 0/*copyMakeBorder_caller<int, 3>*/ , 0/*copyMakeBorder_caller<int, 4>*/},
|
||||
{ copyMakeBorder_caller<float, 1> , 0/*copyMakeBorder_caller<float, 2>*/ , copyMakeBorder_caller<float, 3> , copyMakeBorder_caller<float ,4>}
|
||||
};
|
||||
|
||||
caller_t func = callers[src.depth()][src.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
int gpuBorderType;
|
||||
CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
|
||||
|
||||
func(src, dst, top, left, gpuBorderType, value, stream);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
@ -73,11 +73,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return (x >= 0 && x < width) ? saturate_cast<D>(data[x]) : val;
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const int width;
|
||||
const D val;
|
||||
};
|
||||
@ -103,11 +98,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return (y >= 0 && y < height) ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const int height;
|
||||
const D val;
|
||||
};
|
||||
@ -116,8 +106,7 @@ namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
typedef D result_type;
|
||||
|
||||
__host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits<D>::all(0)) :
|
||||
height(height_), width(width_), val(val_)
|
||||
__host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits<D>::all(0)) : height(height_), width(width_), val(val_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -176,11 +165,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const int last_col;
|
||||
};
|
||||
|
||||
@ -221,11 +205,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(*(const T*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
};
|
||||
|
||||
@ -233,15 +212,8 @@ namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
typedef D result_type;
|
||||
|
||||
__host__ __device__ __forceinline__ BrdReplicate(int height, int width) :
|
||||
last_row(height - 1), last_col(width - 1)
|
||||
{
|
||||
}
|
||||
template <typename U>
|
||||
__host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) :
|
||||
last_row(height - 1), last_col(width - 1)
|
||||
{
|
||||
}
|
||||
__host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {}
|
||||
template <typename U> __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
|
||||
|
||||
__device__ __forceinline__ int idx_row_low(int y) const
|
||||
{
|
||||
@ -299,12 +271,12 @@ namespace cv { namespace gpu { namespace device
|
||||
|
||||
__device__ __forceinline__ int idx_col_low(int x) const
|
||||
{
|
||||
return ::abs(x);
|
||||
return ::abs(x) % (last_col + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col_high(int x) const
|
||||
{
|
||||
return last_col - ::abs(last_col - x);
|
||||
return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col(int x) const
|
||||
@ -327,11 +299,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return -last_col <= mini && maxi <= 2 * last_col;
|
||||
}
|
||||
|
||||
const int last_col;
|
||||
};
|
||||
|
||||
@ -344,12 +311,12 @@ namespace cv { namespace gpu { namespace device
|
||||
|
||||
__device__ __forceinline__ int idx_row_low(int y) const
|
||||
{
|
||||
return ::abs(y);
|
||||
return ::abs(y) % (last_row + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row_high(int y) const
|
||||
{
|
||||
return last_row - ::abs(last_row - y);
|
||||
return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row(int y) const
|
||||
@ -372,11 +339,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return -last_row <= mini && maxi <= 2 * last_row;
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
};
|
||||
|
||||
@ -384,24 +346,17 @@ namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
typedef D result_type;
|
||||
|
||||
__host__ __device__ __forceinline__ BrdReflect101(int height, int width) :
|
||||
last_row(height - 1), last_col(width - 1)
|
||||
{
|
||||
}
|
||||
template <typename U>
|
||||
__host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) :
|
||||
last_row(height - 1), last_col(width - 1)
|
||||
{
|
||||
}
|
||||
__host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {}
|
||||
template <typename U> __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
|
||||
|
||||
__device__ __forceinline__ int idx_row_low(int y) const
|
||||
{
|
||||
return ::abs(y);
|
||||
return ::abs(y) % (last_row + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row_high(int y) const
|
||||
{
|
||||
return last_row - ::abs(last_row - y);
|
||||
return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row(int y) const
|
||||
@ -411,12 +366,12 @@ namespace cv { namespace gpu { namespace device
|
||||
|
||||
__device__ __forceinline__ int idx_col_low(int x) const
|
||||
{
|
||||
return ::abs(x);
|
||||
return ::abs(x) % (last_col + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col_high(int x) const
|
||||
{
|
||||
return last_col - ::abs(last_col - x);
|
||||
return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col(int x) const
|
||||
@ -450,12 +405,12 @@ namespace cv { namespace gpu { namespace device
|
||||
|
||||
__device__ __forceinline__ int idx_col_low(int x) const
|
||||
{
|
||||
return ::abs(x) - (x < 0);
|
||||
return (::abs(x) - (x < 0)) % (last_col + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col_high(int x) const
|
||||
{
|
||||
return last_col - ::abs(last_col - x) + (x > last_col);
|
||||
return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col(int x) const
|
||||
@ -478,11 +433,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return -last_col <= mini && maxi <= 2 * last_col;
|
||||
}
|
||||
|
||||
const int last_col;
|
||||
};
|
||||
|
||||
@ -495,12 +445,12 @@ namespace cv { namespace gpu { namespace device
|
||||
|
||||
__device__ __forceinline__ int idx_row_low(int y) const
|
||||
{
|
||||
return ::abs(y) - (y < 0);
|
||||
return (::abs(y) - (y < 0)) % (last_row + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row_high(int y) const
|
||||
{
|
||||
return last_row - ::abs(last_row - y) + (y > last_row);
|
||||
return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row(int y) const
|
||||
@ -523,11 +473,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return -last_row <= mini && maxi <= 2 * last_row;
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
};
|
||||
|
||||
@ -535,24 +480,17 @@ namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
typedef D result_type;
|
||||
|
||||
__host__ __device__ __forceinline__ BrdReflect(int height, int width) :
|
||||
last_row(height - 1), last_col(width - 1)
|
||||
{
|
||||
}
|
||||
template <typename U>
|
||||
__host__ __device__ __forceinline__ BrdReflect(int height, int width, U) :
|
||||
last_row(height - 1), last_col(width - 1)
|
||||
{
|
||||
}
|
||||
__host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {}
|
||||
template <typename U> __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
|
||||
|
||||
__device__ __forceinline__ int idx_row_low(int y) const
|
||||
{
|
||||
return ::abs(y) - (y < 0);
|
||||
return (::abs(y) - (y < 0)) % (last_row + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row_high(int y) const
|
||||
{
|
||||
return last_row - ::abs(last_row - y) + (y > last_row);
|
||||
return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_row(int y) const
|
||||
@ -562,12 +500,12 @@ namespace cv { namespace gpu { namespace device
|
||||
|
||||
__device__ __forceinline__ int idx_col_low(int x) const
|
||||
{
|
||||
return ::abs(x) - (x < 0);
|
||||
return (::abs(x) - (x < 0)) % (last_col + 1);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col_high(int x) const
|
||||
{
|
||||
return last_col - ::abs(last_col - x) + (x > last_col);
|
||||
return /*::abs*/(last_col - ::abs(last_col - x) + (x > last_col)) /*% (last_col + 1)*/;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int idx_col(int x) const
|
||||
@ -629,11 +567,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const int width;
|
||||
};
|
||||
|
||||
@ -674,11 +607,6 @@ namespace cv { namespace gpu { namespace device
|
||||
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
__host__ __device__ __forceinline__ bool is_range_safe(int mini, int maxi) const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const int height;
|
||||
};
|
||||
|
||||
|
@ -262,10 +262,11 @@ INSTANTIATE_TEST_CASE_P
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// copyMakeBorder
|
||||
|
||||
struct CopyMakeBorder : testing::TestWithParam< std::tr1::tuple<cv::gpu::DeviceInfo, int> >
|
||||
struct CopyMakeBorder : testing::TestWithParam< std::tr1::tuple<cv::gpu::DeviceInfo, int, int> >
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
int type;
|
||||
int borderType;
|
||||
|
||||
cv::Size size;
|
||||
cv::Mat src;
|
||||
@ -281,6 +282,7 @@ struct CopyMakeBorder : testing::TestWithParam< std::tr1::tuple<cv::gpu::DeviceI
|
||||
{
|
||||
devInfo = std::tr1::get<0>(GetParam());
|
||||
type = std::tr1::get<1>(GetParam());
|
||||
borderType = std::tr1::get<2>(GetParam());
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
|
||||
@ -296,12 +298,16 @@ struct CopyMakeBorder : testing::TestWithParam< std::tr1::tuple<cv::gpu::DeviceI
|
||||
right = rng.uniform(1, 10);
|
||||
val = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
|
||||
|
||||
cv::copyMakeBorder(src, dst_gold, top, botton, left, right, cv::BORDER_CONSTANT, val);
|
||||
cv::copyMakeBorder(src, dst_gold, top, botton, left, right, borderType, val);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(CopyMakeBorder, Accuracy)
|
||||
{
|
||||
static const char* borderTypes_str[] = {"BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101"};
|
||||
|
||||
const char* borderTypeStr = borderTypes_str[borderType];
|
||||
|
||||
PRINT_PARAM(devInfo);
|
||||
PRINT_TYPE(type);
|
||||
PRINT_PARAM(size);
|
||||
@ -309,6 +315,7 @@ TEST_P(CopyMakeBorder, Accuracy)
|
||||
PRINT_PARAM(botton);
|
||||
PRINT_PARAM(left);
|
||||
PRINT_PARAM(right);
|
||||
PRINT_PARAM(borderTypeStr);
|
||||
PRINT_PARAM(val);
|
||||
|
||||
cv::Mat dst;
|
||||
@ -316,7 +323,7 @@ TEST_P(CopyMakeBorder, Accuracy)
|
||||
ASSERT_NO_THROW(
|
||||
cv::gpu::GpuMat gpuRes;
|
||||
|
||||
cv::gpu::copyMakeBorder(cv::gpu::GpuMat(src), gpuRes, top, botton, left, right, val);
|
||||
cv::gpu::copyMakeBorder(cv::gpu::GpuMat(src), gpuRes, top, botton, left, right, borderType, val);
|
||||
|
||||
gpuRes.download(dst);
|
||||
);
|
||||
@ -326,7 +333,8 @@ TEST_P(CopyMakeBorder, Accuracy)
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(ImgProc, CopyMakeBorder, testing::Combine(
|
||||
testing::ValuesIn(devices()),
|
||||
testing::Values(CV_8UC1, CV_8UC4, CV_32SC1)));
|
||||
testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_16SC1, CV_16SC3, CV_16SC4, CV_32FC1, CV_32FC3, CV_32FC4),
|
||||
testing::Values((int)cv::BORDER_REFLECT101, (int)cv::BORDER_REPLICATE, (int)cv::BORDER_CONSTANT, (int)cv::BORDER_REFLECT, (int)cv::BORDER_WRAP)));
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// warpAffine & warpPerspective
|
||||
|
Loading…
Reference in New Issue
Block a user