mirror of
https://github.com/opencv/opencv.git
synced 2024-11-29 05:29:54 +08:00
Merge pull request #900 from jet47:restore-gpu-modules
This commit is contained in:
commit
c929547164
@ -37,6 +37,9 @@ if(CUDA_FOUND)
|
||||
|
||||
if(WITH_NVCUVID)
|
||||
find_cuda_helper_libs(nvcuvid)
|
||||
if(WIN32)
|
||||
find_cuda_helper_libs(nvcuvenc)
|
||||
endif()
|
||||
set(HAVE_NVCUVID 1)
|
||||
endif()
|
||||
|
||||
@ -157,6 +160,10 @@ if(CUDA_FOUND)
|
||||
|
||||
# we remove -Wsign-promo as it generates warnings under linux
|
||||
string(REPLACE "-Wsign-promo" "" ${var} "${${var}}")
|
||||
|
||||
# we remove -fvisibility-inlines-hidden because it's used for C++ compiler
|
||||
# but NVCC uses C compiler by default
|
||||
string(REPLACE "-fvisibility-inlines-hidden" "" ${var} "${${var}}")
|
||||
endforeach()
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
|
@ -73,8 +73,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return (x >= 0 && x < width) ? saturate_cast<D>(data[x]) : val;
|
||||
}
|
||||
|
||||
const int width;
|
||||
const D val;
|
||||
int width;
|
||||
D val;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdColConstant
|
||||
@ -98,8 +98,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return (y >= 0 && y < height) ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
|
||||
}
|
||||
|
||||
const int height;
|
||||
const D val;
|
||||
int height;
|
||||
D val;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdConstant
|
||||
@ -120,9 +120,9 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
|
||||
}
|
||||
|
||||
const int height;
|
||||
const int width;
|
||||
const D val;
|
||||
int height;
|
||||
int width;
|
||||
D val;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
@ -165,7 +165,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
const int last_col;
|
||||
int last_col;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdColReplicate
|
||||
@ -205,7 +205,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(*(const T*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
int last_row;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdReplicate
|
||||
@ -255,8 +255,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
const int last_col;
|
||||
int last_row;
|
||||
int last_col;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
@ -299,7 +299,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
const int last_col;
|
||||
int last_col;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdColReflect101
|
||||
@ -339,7 +339,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
int last_row;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdReflect101
|
||||
@ -389,8 +389,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
const int last_col;
|
||||
int last_row;
|
||||
int last_col;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
@ -433,7 +433,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
const int last_col;
|
||||
int last_col;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdColReflect
|
||||
@ -473,7 +473,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
int last_row;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdReflect
|
||||
@ -523,8 +523,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
||||
}
|
||||
|
||||
const int last_row;
|
||||
const int last_col;
|
||||
int last_row;
|
||||
int last_col;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
@ -567,7 +567,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(data[idx_col(x)]);
|
||||
}
|
||||
|
||||
const int width;
|
||||
int width;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdColWrap
|
||||
@ -607,7 +607,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
||||
}
|
||||
|
||||
const int height;
|
||||
int height;
|
||||
};
|
||||
|
||||
template <typename D> struct BrdWrap
|
||||
@ -664,8 +664,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
||||
}
|
||||
|
||||
const int height;
|
||||
const int width;
|
||||
int height;
|
||||
int width;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
@ -683,8 +683,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return b.at(y, x, ptr);
|
||||
}
|
||||
|
||||
const Ptr2D ptr;
|
||||
const B b;
|
||||
Ptr2D ptr;
|
||||
B b;
|
||||
};
|
||||
|
||||
// under win32 there is some bug with templated types that passed as kernel parameters
|
||||
@ -704,10 +704,10 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
|
||||
}
|
||||
|
||||
const Ptr2D src;
|
||||
const int height;
|
||||
const int width;
|
||||
const D val;
|
||||
Ptr2D src;
|
||||
int height;
|
||||
int width;
|
||||
D val;
|
||||
};
|
||||
}}} // namespace cv { namespace gpu { namespace cudev
|
||||
|
||||
|
@ -87,15 +87,6 @@ namespace cv { namespace gpu
|
||||
|
||||
namespace cv { namespace gpu
|
||||
{
|
||||
enum
|
||||
{
|
||||
BORDER_REFLECT101_GPU = 0,
|
||||
BORDER_REPLICATE_GPU,
|
||||
BORDER_CONSTANT_GPU,
|
||||
BORDER_REFLECT_GPU,
|
||||
BORDER_WRAP_GPU
|
||||
};
|
||||
|
||||
namespace cudev
|
||||
{
|
||||
__host__ __device__ __forceinline__ int divUp(int total, int grain)
|
||||
|
@ -43,6 +43,7 @@
|
||||
#ifndef OPENCV_GPU_EMULATION_HPP_
|
||||
#define OPENCV_GPU_EMULATION_HPP_
|
||||
|
||||
#include "common.hpp"
|
||||
#include "warp_reduce.hpp"
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
@ -131,8 +132,130 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return ::atomicMin(address, val);
|
||||
#endif
|
||||
}
|
||||
}; // struct cmem
|
||||
|
||||
struct glob
|
||||
{
|
||||
static __device__ __forceinline__ int atomicAdd(int* address, int val)
|
||||
{
|
||||
return ::atomicAdd(address, val);
|
||||
}
|
||||
static __device__ __forceinline__ unsigned int atomicAdd(unsigned int* address, unsigned int val)
|
||||
{
|
||||
return ::atomicAdd(address, val);
|
||||
}
|
||||
static __device__ __forceinline__ float atomicAdd(float* address, float val)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 200
|
||||
return ::atomicAdd(address, val);
|
||||
#else
|
||||
int* address_as_i = (int*) address;
|
||||
int old = *address_as_i, assumed;
|
||||
do {
|
||||
assumed = old;
|
||||
old = ::atomicCAS(address_as_i, assumed,
|
||||
__float_as_int(val + __int_as_float(assumed)));
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
#endif
|
||||
}
|
||||
static __device__ __forceinline__ double atomicAdd(double* address, double val)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 130
|
||||
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
||||
unsigned long long int old = *address_as_ull, assumed;
|
||||
do {
|
||||
assumed = old;
|
||||
old = ::atomicCAS(address_as_ull, assumed,
|
||||
__double_as_longlong(val + __longlong_as_double(assumed)));
|
||||
} while (assumed != old);
|
||||
return __longlong_as_double(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ int atomicMin(int* address, int val)
|
||||
{
|
||||
return ::atomicMin(address, val);
|
||||
}
|
||||
static __device__ __forceinline__ float atomicMin(float* address, float val)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 120
|
||||
int* address_as_i = (int*) address;
|
||||
int old = *address_as_i, assumed;
|
||||
do {
|
||||
assumed = old;
|
||||
old = ::atomicCAS(address_as_i, assumed,
|
||||
__float_as_int(::fminf(val, __int_as_float(assumed))));
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0f;
|
||||
#endif
|
||||
}
|
||||
static __device__ __forceinline__ double atomicMin(double* address, double val)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 130
|
||||
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
||||
unsigned long long int old = *address_as_ull, assumed;
|
||||
do {
|
||||
assumed = old;
|
||||
old = ::atomicCAS(address_as_ull, assumed,
|
||||
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
|
||||
} while (assumed != old);
|
||||
return __longlong_as_double(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ int atomicMax(int* address, int val)
|
||||
{
|
||||
return ::atomicMax(address, val);
|
||||
}
|
||||
static __device__ __forceinline__ float atomicMax(float* address, float val)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 120
|
||||
int* address_as_i = (int*) address;
|
||||
int old = *address_as_i, assumed;
|
||||
do {
|
||||
assumed = old;
|
||||
old = ::atomicCAS(address_as_i, assumed,
|
||||
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0f;
|
||||
#endif
|
||||
}
|
||||
static __device__ __forceinline__ double atomicMax(double* address, double val)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 130
|
||||
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
||||
unsigned long long int old = *address_as_ull, assumed;
|
||||
do {
|
||||
assumed = old;
|
||||
old = ::atomicCAS(address_as_ull, assumed,
|
||||
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
|
||||
} while (assumed != old);
|
||||
return __longlong_as_double(old);
|
||||
#else
|
||||
(void) address;
|
||||
(void) val;
|
||||
return 0.0;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
};
|
||||
}; //struct Emulation
|
||||
}}} // namespace cv { namespace gpu { namespace cudev
|
||||
|
||||
#endif /* OPENCV_GPU_EMULATION_HPP_ */
|
||||
|
@ -67,7 +67,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return src(__float2int_rz(y), __float2int_rz(x));
|
||||
}
|
||||
|
||||
const Ptr2D src;
|
||||
Ptr2D src;
|
||||
};
|
||||
|
||||
template <typename Ptr2D> struct LinearFilter
|
||||
@ -107,7 +107,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<elem_type>(out);
|
||||
}
|
||||
|
||||
const Ptr2D src;
|
||||
Ptr2D src;
|
||||
};
|
||||
|
||||
template <typename Ptr2D> struct CubicFilter
|
||||
@ -166,7 +166,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<elem_type>(res);
|
||||
}
|
||||
|
||||
const Ptr2D src;
|
||||
Ptr2D src;
|
||||
};
|
||||
// for integer scaling
|
||||
template <typename Ptr2D> struct IntegerAreaFilter
|
||||
@ -203,7 +203,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<elem_type>(out);
|
||||
}
|
||||
|
||||
const Ptr2D src;
|
||||
Ptr2D src;
|
||||
float scale_x, scale_y ,scale;
|
||||
};
|
||||
|
||||
@ -269,7 +269,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
return saturate_cast<elem_type>(out);
|
||||
}
|
||||
|
||||
const Ptr2D src;
|
||||
Ptr2D src;
|
||||
float scale_x, scale_y;
|
||||
int width, haight;
|
||||
};
|
||||
|
@ -554,8 +554,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)
|
||||
: thresh(other.thresh), maxVal(other.maxVal) {}
|
||||
|
||||
const T thresh;
|
||||
const T maxVal;
|
||||
T thresh;
|
||||
T maxVal;
|
||||
};
|
||||
|
||||
template <typename T> struct thresh_binary_inv_func : unary_function<T, T>
|
||||
@ -571,8 +571,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)
|
||||
: thresh(other.thresh), maxVal(other.maxVal) {}
|
||||
|
||||
const T thresh;
|
||||
const T maxVal;
|
||||
T thresh;
|
||||
T maxVal;
|
||||
};
|
||||
|
||||
template <typename T> struct thresh_trunc_func : unary_function<T, T>
|
||||
@ -588,7 +588,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)
|
||||
: thresh(other.thresh) {}
|
||||
|
||||
const T thresh;
|
||||
T thresh;
|
||||
};
|
||||
|
||||
template <typename T> struct thresh_to_zero_func : unary_function<T, T>
|
||||
@ -604,7 +604,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)
|
||||
: thresh(other.thresh) {}
|
||||
|
||||
const T thresh;
|
||||
T thresh;
|
||||
};
|
||||
|
||||
template <typename T> struct thresh_to_zero_inv_func : unary_function<T, T>
|
||||
@ -620,7 +620,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)
|
||||
: thresh(other.thresh) {}
|
||||
|
||||
const T thresh;
|
||||
T thresh;
|
||||
};
|
||||
|
||||
// Function Object Adaptors
|
||||
@ -636,7 +636,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ unary_negate() {}
|
||||
__host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {}
|
||||
|
||||
const Predicate pred;
|
||||
Predicate pred;
|
||||
};
|
||||
|
||||
template <typename Predicate> __host__ __device__ __forceinline__ unary_negate<Predicate> not1(const Predicate& pred)
|
||||
@ -657,7 +657,7 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ binary_negate() {}
|
||||
__host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {}
|
||||
|
||||
const Predicate pred;
|
||||
Predicate pred;
|
||||
};
|
||||
|
||||
template <typename BinaryPredicate> __host__ __device__ __forceinline__ binary_negate<BinaryPredicate> not2(const BinaryPredicate& pred)
|
||||
@ -677,8 +677,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ binder1st() {}
|
||||
__host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {}
|
||||
|
||||
const Op op;
|
||||
const typename Op::first_argument_type arg1;
|
||||
Op op;
|
||||
typename Op::first_argument_type arg1;
|
||||
};
|
||||
|
||||
template <typename Op, typename T> __host__ __device__ __forceinline__ binder1st<Op> bind1st(const Op& op, const T& x)
|
||||
@ -698,8 +698,8 @@ namespace cv { namespace gpu { namespace cudev
|
||||
__host__ __device__ __forceinline__ binder2nd() {}
|
||||
__host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {}
|
||||
|
||||
const Op op;
|
||||
const typename Op::second_argument_type arg2;
|
||||
Op op;
|
||||
typename Op::second_argument_type arg2;
|
||||
};
|
||||
|
||||
template <typename Op, typename T> __host__ __device__ __forceinline__ binder2nd<Op> bind2nd(const Op& op, const T& x)
|
||||
|
@ -74,10 +74,6 @@
|
||||
namespace cv { namespace gpu {
|
||||
CV_EXPORTS cv::String getNppErrorMessage(int code);
|
||||
CV_EXPORTS cv::String getCudaDriverApiErrorMessage(int code);
|
||||
|
||||
// Converts CPU border extrapolation mode into GPU internal analogue.
|
||||
// Returns true if the GPU analogue exists, false otherwise.
|
||||
CV_EXPORTS bool tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType);
|
||||
}}
|
||||
|
||||
#ifndef HAVE_CUDA
|
||||
|
@ -1678,33 +1678,3 @@ String cv::gpu::getCudaDriverApiErrorMessage(int code)
|
||||
return getErrorString(code, cu_errors, cu_errors_num);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool cv::gpu::tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType)
|
||||
{
|
||||
#ifndef HAVE_CUDA
|
||||
(void) cpuBorderType;
|
||||
(void) gpuBorderType;
|
||||
return false;
|
||||
#else
|
||||
switch (cpuBorderType)
|
||||
{
|
||||
case IPL_BORDER_REFLECT_101:
|
||||
gpuBorderType = cv::gpu::BORDER_REFLECT101_GPU;
|
||||
return true;
|
||||
case IPL_BORDER_REPLICATE:
|
||||
gpuBorderType = cv::gpu::BORDER_REPLICATE_GPU;
|
||||
return true;
|
||||
case IPL_BORDER_CONSTANT:
|
||||
gpuBorderType = cv::gpu::BORDER_CONSTANT_GPU;
|
||||
return true;
|
||||
case IPL_BORDER_REFLECT:
|
||||
gpuBorderType = cv::gpu::BORDER_REFLECT_GPU;
|
||||
return true;
|
||||
case IPL_BORDER_WRAP:
|
||||
gpuBorderType = cv::gpu::BORDER_WRAP_GPU;
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
};
|
||||
#endif
|
||||
}
|
||||
|
@ -3,101 +3,10 @@ if(ANDROID OR IOS)
|
||||
endif()
|
||||
|
||||
set(the_description "GPU-accelerated Computer Vision")
|
||||
ocv_add_module(gpu opencv_imgproc opencv_calib3d opencv_objdetect opencv_video opencv_photo opencv_legacy)
|
||||
|
||||
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/src/cuda")
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4100 /wd4324 /wd4512 /wd4515 -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter)
|
||||
|
||||
file(GLOB lib_hdrs "include/opencv2/*.hpp" "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h")
|
||||
file(GLOB lib_int_hdrs "src/*.hpp" "src/*.h")
|
||||
file(GLOB lib_cuda_hdrs "src/cuda/*.hpp" "src/cuda/*.h")
|
||||
file(GLOB lib_srcs "src/*.cpp")
|
||||
file(GLOB lib_cuda "src/cuda/*.cu*")
|
||||
|
||||
source_group("Include" FILES ${lib_hdrs})
|
||||
source_group("Src\\Host" FILES ${lib_srcs} ${lib_int_hdrs})
|
||||
source_group("Src\\Cuda" FILES ${lib_cuda} ${lib_cuda_hdrs})
|
||||
|
||||
if(HAVE_CUDA)
|
||||
file(GLOB_RECURSE ncv_srcs "src/nvidia/*.cpp" "src/nvidia/*.h*")
|
||||
file(GLOB_RECURSE ncv_cuda "src/nvidia/*.cu")
|
||||
set(ncv_files ${ncv_srcs} ${ncv_cuda})
|
||||
|
||||
source_group("Src\\NVidia" FILES ${ncv_files})
|
||||
ocv_include_directories("src/nvidia" "src/nvidia/core" "src/nvidia/NPP_staging" ${CUDA_INCLUDE_DIRS})
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter /wd4211 /wd4201 /wd4100 /wd4505 /wd4408)
|
||||
|
||||
if(MSVC)
|
||||
if(NOT ENABLE_NOISY_WARNINGS)
|
||||
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
|
||||
string(REPLACE "/W4" "/W3" ${var} "${${var}}")
|
||||
endforeach()
|
||||
|
||||
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -Xcompiler /wd4251)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
ocv_cuda_compile(cuda_objs ${lib_cuda} ${ncv_cuda})
|
||||
|
||||
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_npp_LIBRARY})
|
||||
|
||||
if(WITH_NVCUVID)
|
||||
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvid_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
find_cuda_helper_libs(nvcuvenc)
|
||||
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvenc_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(WITH_FFMPEG)
|
||||
set(cuda_link_libs ${cuda_link_libs} ${HIGHGUI_LIBRARIES})
|
||||
endif()
|
||||
else()
|
||||
set(lib_cuda "")
|
||||
set(cuda_objs "")
|
||||
set(cuda_link_libs "")
|
||||
set(ncv_files "")
|
||||
endif()
|
||||
|
||||
ocv_set_module_sources(
|
||||
HEADERS ${lib_hdrs}
|
||||
SOURCES ${lib_int_hdrs} ${lib_cuda_hdrs} ${lib_srcs} ${lib_cuda} ${ncv_files} ${cuda_objs}
|
||||
)
|
||||
|
||||
ocv_create_module(${cuda_link_libs})
|
||||
|
||||
if(HAVE_CUDA)
|
||||
if(HAVE_CUFFT)
|
||||
CUDA_ADD_CUFFT_TO_TARGET(${the_module})
|
||||
endif()
|
||||
|
||||
if(HAVE_CUBLAS)
|
||||
CUDA_ADD_CUBLAS_TO_TARGET(${the_module})
|
||||
endif()
|
||||
|
||||
install(FILES src/nvidia/NPP_staging/NPP_staging.hpp src/nvidia/core/NCV.hpp
|
||||
DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2/${name}
|
||||
COMPONENT main)
|
||||
endif()
|
||||
|
||||
ocv_add_precompiled_headers(${the_module})
|
||||
|
||||
################################################################################################################
|
||||
################################ GPU Module Tests #####################################################
|
||||
################################################################################################################
|
||||
file(GLOB test_srcs "test/*.cpp")
|
||||
file(GLOB test_hdrs "test/*.hpp" "test/*.h")
|
||||
|
||||
set(nvidia "")
|
||||
if(HAVE_CUDA)
|
||||
file(GLOB nvidia "test/nvidia/*.cpp" "test/nvidia/*.hpp" "test/nvidia/*.h")
|
||||
set(nvidia FILES "Src\\\\\\\\NVidia" ${nvidia}) # 8 ugly backslashes :'(
|
||||
endif()
|
||||
|
||||
ocv_add_accuracy_tests(FILES "Include" ${test_hdrs}
|
||||
FILES "Src" ${test_srcs}
|
||||
${nvidia})
|
||||
ocv_add_perf_tests()
|
||||
ocv_define_module(gpu opencv_calib3d opencv_objdetect opencv_gpuarithm opencv_gpuwarping OPTIONAL opencv_gpulegacy)
|
||||
|
||||
if(HAVE_CUDA)
|
||||
add_subdirectory(perf4au)
|
||||
|
@ -1,10 +0,0 @@
|
||||
cmake_minimum_required(VERSION 2.8.3)
|
||||
|
||||
project(nv_perf_test)
|
||||
|
||||
find_package(OpenCV REQUIRED)
|
||||
include_directories(${OpenCV_INCLUDE_DIR})
|
||||
|
||||
add_executable(${PROJECT_NAME} main.cpp)
|
||||
|
||||
target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS})
|
Binary file not shown.
Before Width: | Height: | Size: 140 KiB |
Binary file not shown.
Before Width: | Height: | Size: 140 KiB |
@ -1,486 +0,0 @@
|
||||
#include <cstdio>
|
||||
#define HAVE_CUDA 1
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/gpu.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/video.hpp>
|
||||
#include <opencv2/ts.hpp>
|
||||
|
||||
static void printOsInfo()
|
||||
{
|
||||
#if defined _WIN32
|
||||
# if defined _WIN64
|
||||
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x64.\n[----------]\n"); fflush(stdout);
|
||||
# else
|
||||
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x32.\n[----------]\n"); fflush(stdout);
|
||||
# endif
|
||||
#elif defined linux
|
||||
# if defined _LP64
|
||||
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x64.\n[----------]\n"); fflush(stdout);
|
||||
# else
|
||||
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x32.\n[----------]\n"); fflush(stdout);
|
||||
# endif
|
||||
#elif defined __APPLE__
|
||||
# if defined _LP64
|
||||
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x64.\n[----------]\n"); fflush(stdout);
|
||||
# else
|
||||
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x32.\n[----------]\n"); fflush(stdout);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static void printCudaInfo()
|
||||
{
|
||||
const int deviceCount = cv::gpu::getCudaEnabledDeviceCount();
|
||||
|
||||
printf("[----------]\n"); fflush(stdout);
|
||||
printf("[ GPU INFO ] \tCUDA device count:: %d.\n", deviceCount); fflush(stdout);
|
||||
printf("[----------]\n"); fflush(stdout);
|
||||
|
||||
for (int i = 0; i < deviceCount; ++i)
|
||||
{
|
||||
cv::gpu::DeviceInfo info(i);
|
||||
|
||||
printf("[----------]\n"); fflush(stdout);
|
||||
printf("[ DEVICE ] \t# %d %s.\n", i, info.name().c_str()); fflush(stdout);
|
||||
printf("[ ] \tCompute capability: %d.%d\n", info.majorVersion(), info.minorVersion()); fflush(stdout);
|
||||
printf("[ ] \tMulti Processor Count: %d\n", info.multiProcessorCount()); fflush(stdout);
|
||||
printf("[ ] \tTotal memory: %d Mb\n", static_cast<int>(static_cast<int>(info.totalMemory() / 1024.0) / 1024.0)); fflush(stdout);
|
||||
printf("[ ] \tFree memory: %d Mb\n", static_cast<int>(static_cast<int>(info.freeMemory() / 1024.0) / 1024.0)); fflush(stdout);
|
||||
if (!info.isCompatible())
|
||||
printf("[ GPU INFO ] \tThis device is NOT compatible with current GPU module build\n");
|
||||
printf("[----------]\n"); fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
printOsInfo();
|
||||
printCudaInfo();
|
||||
|
||||
perf::Regression::Init("nv_perf_test");
|
||||
perf::TestBase::Init(argc, argv);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
||||
#define DEF_PARAM_TEST(name, ...) typedef ::perf::TestBaseWithParam< std::tr1::tuple< __VA_ARGS__ > > name
|
||||
#define DEF_PARAM_TEST_1(name, param_type) typedef ::perf::TestBaseWithParam< param_type > name
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// HoughLinesP
|
||||
|
||||
DEF_PARAM_TEST_1(Image, std::string);
|
||||
|
||||
GPU_PERF_TEST_P(Image, HoughLinesP, testing::Values(std::string("im1_1280x800.jpg")))
|
||||
{
|
||||
declare.time(30.0);
|
||||
|
||||
std::string fileName = GetParam();
|
||||
|
||||
const float rho = 1.f;
|
||||
const float theta = 1.f;
|
||||
const int threshold = 40;
|
||||
const int minLineLenght = 20;
|
||||
const int maxLineGap = 5;
|
||||
|
||||
cv::Mat image = cv::imread(fileName, cv::IMREAD_GRAYSCALE);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GpuMat d_image(image);
|
||||
cv::gpu::GpuMat d_lines;
|
||||
cv::gpu::HoughLinesBuf d_buf;
|
||||
|
||||
cv::gpu::HoughLinesP(d_image, d_lines, d_buf, rho, theta, minLineLenght, maxLineGap);
|
||||
|
||||
TEST_CYCLE()
|
||||
{
|
||||
cv::gpu::HoughLinesP(d_image, d_lines, d_buf, rho, theta, minLineLenght, maxLineGap);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat mask;
|
||||
cv::Canny(image, mask, 50, 100);
|
||||
|
||||
std::vector<cv::Vec4i> lines;
|
||||
cv::HoughLinesP(mask, lines, rho, theta, threshold, minLineLenght, maxLineGap);
|
||||
|
||||
TEST_CYCLE()
|
||||
{
|
||||
cv::HoughLinesP(mask, lines, rho, theta, threshold, minLineLenght, maxLineGap);
|
||||
}
|
||||
}
|
||||
|
||||
SANITY_CHECK(0);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// GoodFeaturesToTrack
|
||||
|
||||
DEF_PARAM_TEST(Image_Depth, std::string, perf::MatDepth);
|
||||
|
||||
GPU_PERF_TEST_P(Image_Depth, GoodFeaturesToTrack,
|
||||
testing::Combine(
|
||||
testing::Values(std::string("im1_1280x800.jpg")),
|
||||
testing::Values(CV_8U, CV_16U)
|
||||
))
|
||||
{
|
||||
declare.time(60);
|
||||
|
||||
const std::string fileName = std::tr1::get<0>(GetParam());
|
||||
const int depth = std::tr1::get<1>(GetParam());
|
||||
|
||||
const int maxCorners = 5000;
|
||||
const double qualityLevel = 0.05;
|
||||
const int minDistance = 5;
|
||||
const int blockSize = 3;
|
||||
const bool useHarrisDetector = true;
|
||||
const double k = 0.05;
|
||||
|
||||
cv::Mat src = cv::imread(fileName, cv::IMREAD_GRAYSCALE);
|
||||
if (src.empty())
|
||||
FAIL() << "Unable to load source image [" << fileName << "]";
|
||||
|
||||
if (depth != CV_8U)
|
||||
src.convertTo(src, depth);
|
||||
|
||||
cv::Mat mask(src.size(), CV_8UC1, cv::Scalar::all(1));
|
||||
mask(cv::Rect(0, 0, 100, 100)).setTo(cv::Scalar::all(0));
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GoodFeaturesToTrackDetector_GPU d_detector(maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, k);
|
||||
|
||||
cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_mask(mask);
|
||||
cv::gpu::GpuMat d_pts;
|
||||
|
||||
d_detector(d_src, d_pts, d_mask);
|
||||
|
||||
TEST_CYCLE()
|
||||
{
|
||||
d_detector(d_src, d_pts, d_mask);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (depth != CV_8U)
|
||||
FAIL() << "Unsupported depth";
|
||||
|
||||
cv::Mat pts;
|
||||
|
||||
cv::goodFeaturesToTrack(src, pts, maxCorners, qualityLevel, minDistance, mask, blockSize, useHarrisDetector, k);
|
||||
|
||||
TEST_CYCLE()
|
||||
{
|
||||
cv::goodFeaturesToTrack(src, pts, maxCorners, qualityLevel, minDistance, mask, blockSize, useHarrisDetector, k);
|
||||
}
|
||||
}
|
||||
|
||||
SANITY_CHECK(0);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// OpticalFlowPyrLKSparse
|
||||
|
||||
typedef std::pair<std::string, std::string> string_pair;
|
||||
|
||||
DEF_PARAM_TEST(ImagePair_Depth_GraySource, string_pair, perf::MatDepth, bool);
|
||||
|
||||
GPU_PERF_TEST_P(ImagePair_Depth_GraySource, OpticalFlowPyrLKSparse,
|
||||
testing::Combine(
|
||||
testing::Values(string_pair("im1_1280x800.jpg", "im2_1280x800.jpg")),
|
||||
testing::Values(CV_8U, CV_16U),
|
||||
testing::Bool()
|
||||
))
|
||||
{
|
||||
declare.time(60);
|
||||
|
||||
const string_pair fileNames = std::tr1::get<0>(GetParam());
|
||||
const int depth = std::tr1::get<1>(GetParam());
|
||||
const bool graySource = std::tr1::get<2>(GetParam());
|
||||
|
||||
// PyrLK params
|
||||
const cv::Size winSize(15, 15);
|
||||
const int maxLevel = 5;
|
||||
const cv::TermCriteria criteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, 0.01);
|
||||
|
||||
// GoodFeaturesToTrack params
|
||||
const int maxCorners = 5000;
|
||||
const double qualityLevel = 0.05;
|
||||
const int minDistance = 5;
|
||||
const int blockSize = 3;
|
||||
const bool useHarrisDetector = true;
|
||||
const double k = 0.05;
|
||||
|
||||
cv::Mat src1 = cv::imread(fileNames.first, graySource ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
|
||||
if (src1.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.first << "]";
|
||||
|
||||
cv::Mat src2 = cv::imread(fileNames.second, graySource ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
|
||||
if (src2.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.second << "]";
|
||||
|
||||
cv::Mat gray_src;
|
||||
if (graySource)
|
||||
gray_src = src1;
|
||||
else
|
||||
cv::cvtColor(src1, gray_src, cv::COLOR_BGR2GRAY);
|
||||
|
||||
cv::Mat pts;
|
||||
cv::goodFeaturesToTrack(gray_src, pts, maxCorners, qualityLevel, minDistance, cv::noArray(), blockSize, useHarrisDetector, k);
|
||||
|
||||
if (depth != CV_8U)
|
||||
{
|
||||
src1.convertTo(src1, depth);
|
||||
src2.convertTo(src2, depth);
|
||||
}
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GpuMat d_src1(src1);
|
||||
cv::gpu::GpuMat d_src2(src2);
|
||||
cv::gpu::GpuMat d_pts(pts.reshape(2, 1));
|
||||
cv::gpu::GpuMat d_nextPts;
|
||||
cv::gpu::GpuMat d_status;
|
||||
|
||||
cv::gpu::PyrLKOpticalFlow d_pyrLK;
|
||||
d_pyrLK.winSize = winSize;
|
||||
d_pyrLK.maxLevel = maxLevel;
|
||||
d_pyrLK.iters = criteria.maxCount;
|
||||
d_pyrLK.useInitialFlow = false;
|
||||
|
||||
d_pyrLK.sparse(d_src1, d_src2, d_pts, d_nextPts, d_status);
|
||||
|
||||
TEST_CYCLE()
|
||||
{
|
||||
d_pyrLK.sparse(d_src1, d_src2, d_pts, d_nextPts, d_status);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (depth != CV_8U)
|
||||
FAIL() << "Unsupported depth";
|
||||
|
||||
cv::Mat nextPts;
|
||||
cv::Mat status;
|
||||
|
||||
cv::calcOpticalFlowPyrLK(src1, src2, pts, nextPts, status, cv::noArray(), winSize, maxLevel, criteria);
|
||||
|
||||
TEST_CYCLE()
|
||||
{
|
||||
cv::calcOpticalFlowPyrLK(src1, src2, pts, nextPts, status, cv::noArray(), winSize, maxLevel, criteria);
|
||||
}
|
||||
}
|
||||
|
||||
SANITY_CHECK(0);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// OpticalFlowFarneback
|
||||
|
||||
DEF_PARAM_TEST(ImagePair_Depth, string_pair, perf::MatDepth);
|
||||
|
||||
GPU_PERF_TEST_P(ImagePair_Depth, OpticalFlowFarneback,
|
||||
testing::Combine(
|
||||
testing::Values(string_pair("im1_1280x800.jpg", "im2_1280x800.jpg")),
|
||||
testing::Values(CV_8U, CV_16U)
|
||||
))
|
||||
{
|
||||
declare.time(500);
|
||||
|
||||
const string_pair fileNames = std::tr1::get<0>(GetParam());
|
||||
const int depth = std::tr1::get<1>(GetParam());
|
||||
|
||||
const double pyrScale = 0.5;
|
||||
const int numLevels = 6;
|
||||
const int winSize = 7;
|
||||
const int numIters = 15;
|
||||
const int polyN = 7;
|
||||
const double polySigma = 1.5;
|
||||
const int flags = cv::OPTFLOW_USE_INITIAL_FLOW;
|
||||
|
||||
cv::Mat src1 = cv::imread(fileNames.first, cv::IMREAD_GRAYSCALE);
|
||||
if (src1.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.first << "]";
|
||||
|
||||
cv::Mat src2 = cv::imread(fileNames.second, cv::IMREAD_GRAYSCALE);
|
||||
if (src2.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.second << "]";
|
||||
|
||||
if (depth != CV_8U)
|
||||
{
|
||||
src1.convertTo(src1, depth);
|
||||
src2.convertTo(src2, depth);
|
||||
}
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GpuMat d_src1(src1);
|
||||
cv::gpu::GpuMat d_src2(src2);
|
||||
cv::gpu::GpuMat d_u(src1.size(), CV_32FC1, cv::Scalar::all(0));
|
||||
cv::gpu::GpuMat d_v(src1.size(), CV_32FC1, cv::Scalar::all(0));
|
||||
|
||||
cv::gpu::FarnebackOpticalFlow d_farneback;
|
||||
d_farneback.pyrScale = pyrScale;
|
||||
d_farneback.numLevels = numLevels;
|
||||
d_farneback.winSize = winSize;
|
||||
d_farneback.numIters = numIters;
|
||||
d_farneback.polyN = polyN;
|
||||
d_farneback.polySigma = polySigma;
|
||||
d_farneback.flags = flags;
|
||||
|
||||
d_farneback(d_src1, d_src2, d_u, d_v);
|
||||
|
||||
TEST_CYCLE_N(10)
|
||||
{
|
||||
d_farneback(d_src1, d_src2, d_u, d_v);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (depth != CV_8U)
|
||||
FAIL() << "Unsupported depth";
|
||||
|
||||
cv::Mat flow(src1.size(), CV_32FC2, cv::Scalar::all(0));
|
||||
|
||||
cv::calcOpticalFlowFarneback(src1, src2, flow, pyrScale, numLevels, winSize, numIters, polyN, polySigma, flags);
|
||||
|
||||
TEST_CYCLE_N(10)
|
||||
{
|
||||
cv::calcOpticalFlowFarneback(src1, src2, flow, pyrScale, numLevels, winSize, numIters, polyN, polySigma, flags);
|
||||
}
|
||||
}
|
||||
|
||||
SANITY_CHECK(0);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// OpticalFlowBM
|
||||
|
||||
void calcOpticalFlowBM(const cv::Mat& prev, const cv::Mat& curr,
|
||||
cv::Size bSize, cv::Size shiftSize, cv::Size maxRange, int usePrevious,
|
||||
cv::Mat& velx, cv::Mat& vely)
|
||||
{
|
||||
cv::Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
|
||||
|
||||
velx.create(sz, CV_32FC1);
|
||||
vely.create(sz, CV_32FC1);
|
||||
|
||||
CvMat cvprev = prev;
|
||||
CvMat cvcurr = curr;
|
||||
|
||||
CvMat cvvelx = velx;
|
||||
CvMat cvvely = vely;
|
||||
|
||||
cvCalcOpticalFlowBM(&cvprev, &cvcurr, bSize, shiftSize, maxRange, usePrevious, &cvvelx, &cvvely);
|
||||
}
|
||||
|
||||
DEF_PARAM_TEST(ImagePair_BlockSize_ShiftSize_MaxRange, string_pair, cv::Size, cv::Size, cv::Size);
|
||||
|
||||
GPU_PERF_TEST_P(ImagePair_BlockSize_ShiftSize_MaxRange, OpticalFlowBM,
|
||||
testing::Combine(
|
||||
testing::Values(string_pair("im1_1280x800.jpg", "im2_1280x800.jpg")),
|
||||
testing::Values(cv::Size(16, 16)),
|
||||
testing::Values(cv::Size(2, 2)),
|
||||
testing::Values(cv::Size(16, 16))
|
||||
))
|
||||
{
|
||||
declare.time(3000);
|
||||
|
||||
const string_pair fileNames = std::tr1::get<0>(GetParam());
|
||||
const cv::Size block_size = std::tr1::get<1>(GetParam());
|
||||
const cv::Size shift_size = std::tr1::get<2>(GetParam());
|
||||
const cv::Size max_range = std::tr1::get<3>(GetParam());
|
||||
|
||||
cv::Mat src1 = cv::imread(fileNames.first, cv::IMREAD_GRAYSCALE);
|
||||
if (src1.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.first << "]";
|
||||
|
||||
cv::Mat src2 = cv::imread(fileNames.second, cv::IMREAD_GRAYSCALE);
|
||||
if (src2.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.second << "]";
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GpuMat d_src1(src1);
|
||||
cv::gpu::GpuMat d_src2(src2);
|
||||
cv::gpu::GpuMat d_velx, d_vely, buf;
|
||||
|
||||
cv::gpu::calcOpticalFlowBM(d_src1, d_src2, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
|
||||
|
||||
TEST_CYCLE_N(10)
|
||||
{
|
||||
cv::gpu::calcOpticalFlowBM(d_src1, d_src2, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat velx, vely;
|
||||
|
||||
calcOpticalFlowBM(src1, src2, block_size, shift_size, max_range, false, velx, vely);
|
||||
|
||||
TEST_CYCLE_N(10)
|
||||
{
|
||||
calcOpticalFlowBM(src1, src2, block_size, shift_size, max_range, false, velx, vely);
|
||||
}
|
||||
}
|
||||
|
||||
SANITY_CHECK(0);
|
||||
}
|
||||
|
||||
GPU_PERF_TEST_P(ImagePair_BlockSize_ShiftSize_MaxRange, FastOpticalFlowBM,
|
||||
testing::Combine(
|
||||
testing::Values(string_pair("im1_1280x800.jpg", "im2_1280x800.jpg")),
|
||||
testing::Values(cv::Size(16, 16)),
|
||||
testing::Values(cv::Size(1, 1)),
|
||||
testing::Values(cv::Size(16, 16))
|
||||
))
|
||||
{
|
||||
declare.time(3000);
|
||||
|
||||
const string_pair fileNames = std::tr1::get<0>(GetParam());
|
||||
const cv::Size block_size = std::tr1::get<1>(GetParam());
|
||||
const cv::Size shift_size = std::tr1::get<2>(GetParam());
|
||||
const cv::Size max_range = std::tr1::get<3>(GetParam());
|
||||
|
||||
cv::Mat src1 = cv::imread(fileNames.first, cv::IMREAD_GRAYSCALE);
|
||||
if (src1.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.first << "]";
|
||||
|
||||
cv::Mat src2 = cv::imread(fileNames.second, cv::IMREAD_GRAYSCALE);
|
||||
if (src2.empty())
|
||||
FAIL() << "Unable to load source image [" << fileNames.second << "]";
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GpuMat d_src1(src1);
|
||||
cv::gpu::GpuMat d_src2(src2);
|
||||
cv::gpu::GpuMat d_velx, d_vely;
|
||||
|
||||
cv::gpu::FastOpticalFlowBM fastBM;
|
||||
|
||||
fastBM(d_src1, d_src2, d_velx, d_vely, max_range.width, block_size.width);
|
||||
|
||||
TEST_CYCLE_N(10)
|
||||
{
|
||||
fastBM(d_src1, d_src2, d_velx, d_vely, max_range.width, block_size.width);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat velx, vely;
|
||||
|
||||
calcOpticalFlowBM(src1, src2, block_size, shift_size, max_range, false, velx, vely);
|
||||
|
||||
TEST_CYCLE_N(10)
|
||||
{
|
||||
calcOpticalFlowBM(src1, src2, block_size, shift_size, max_range, false, velx, vely);
|
||||
}
|
||||
}
|
||||
|
||||
SANITY_CHECK(0);
|
||||
}
|
36
modules/gpu/doc/calib3d.rst
Normal file
36
modules/gpu/doc/calib3d.rst
Normal file
@ -0,0 +1,36 @@
|
||||
Camera Calibration and 3D Reconstruction
|
||||
========================================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
|
||||
|
||||
gpu::solvePnPRansac
|
||||
-------------------
|
||||
Finds the object pose from 3D-2D point correspondences.
|
||||
|
||||
.. ocv:function:: void gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, int num_iters=100, float max_dist=8.0, int min_inlier_count=100, vector<int>* inliers=NULL)
|
||||
|
||||
:param object: Single-row matrix of object points.
|
||||
|
||||
:param image: Single-row matrix of image points.
|
||||
|
||||
:param camera_mat: 3x3 matrix of intrinsic camera parameters.
|
||||
|
||||
:param dist_coef: Distortion coefficients. See :ocv:func:`undistortPoints` for details.
|
||||
|
||||
:param rvec: Output 3D rotation vector.
|
||||
|
||||
:param tvec: Output 3D translation vector.
|
||||
|
||||
:param use_extrinsic_guess: Flag to indicate that the function must use ``rvec`` and ``tvec`` as an initial transformation guess. It is not supported for now.
|
||||
|
||||
:param num_iters: Maximum number of RANSAC iterations.
|
||||
|
||||
:param max_dist: Euclidean distance threshold to detect whether point is inlier or not.
|
||||
|
||||
:param min_inlier_count: Flag to indicate that the function must stop if greater or equal number of inliers is achieved. It is not supported for now.
|
||||
|
||||
:param inliers: Output vector of inlier indices.
|
||||
|
||||
.. seealso:: :ocv:func:`solvePnPRansac`
|
@ -8,12 +8,5 @@ gpu. GPU-accelerated Computer Vision
|
||||
introduction
|
||||
initalization_and_information
|
||||
data_structures
|
||||
operations_on_matrices
|
||||
per_element_operations
|
||||
image_processing
|
||||
matrix_reductions
|
||||
object_detection
|
||||
feature_detection_and_description
|
||||
image_filtering
|
||||
camera_calibration_and_3d_reconstruction
|
||||
video
|
||||
calib3d
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,274 +0,0 @@
|
||||
Operations on Matrices
|
||||
======================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
|
||||
|
||||
gpu::gemm
|
||||
------------------
|
||||
Performs generalized matrix multiplication.
|
||||
|
||||
.. ocv:function:: void gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null())
|
||||
|
||||
:param src1: First multiplied input matrix that should have ``CV_32FC1`` , ``CV_64FC1`` , ``CV_32FC2`` , or ``CV_64FC2`` type.
|
||||
|
||||
:param src2: Second multiplied input matrix of the same type as ``src1`` .
|
||||
|
||||
:param alpha: Weight of the matrix product.
|
||||
|
||||
:param src3: Third optional delta matrix added to the matrix product. It should have the same type as ``src1`` and ``src2`` .
|
||||
|
||||
:param beta: Weight of ``src3`` .
|
||||
|
||||
:param dst: Destination matrix. It has the proper size and the same type as input matrices.
|
||||
|
||||
:param flags: Operation flags:
|
||||
|
||||
* **GEMM_1_T** transpose ``src1``
|
||||
* **GEMM_2_T** transpose ``src2``
|
||||
* **GEMM_3_T** transpose ``src3``
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
The function performs generalized matrix multiplication similar to the ``gemm`` functions in BLAS level 3. For example, ``gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)`` corresponds to
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{dst} = \texttt{alpha} \cdot \texttt{src1} ^T \cdot \texttt{src2} + \texttt{beta} \cdot \texttt{src3} ^T
|
||||
|
||||
.. note:: Transposition operation doesn't support ``CV_64FC2`` input type.
|
||||
|
||||
.. seealso:: :ocv:func:`gemm`
|
||||
|
||||
|
||||
|
||||
gpu::transpose
|
||||
------------------
|
||||
Transposes a matrix.
|
||||
|
||||
.. ocv:function:: void gpu::transpose( const GpuMat& src1, GpuMat& dst, Stream& stream=Stream::Null() )
|
||||
|
||||
:param src1: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc).
|
||||
|
||||
:param dst: Destination matrix.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`transpose`
|
||||
|
||||
|
||||
|
||||
gpu::flip
|
||||
-------------
|
||||
Flips a 2D matrix around vertical, horizontal, or both axes.
|
||||
|
||||
.. ocv:function:: void gpu::flip( const GpuMat& a, GpuMat& b, int flipCode, Stream& stream=Stream::Null() )
|
||||
|
||||
:param a: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth.
|
||||
|
||||
:param b: Destination matrix.
|
||||
|
||||
:param flipCode: Flip mode for the source:
|
||||
|
||||
* ``0`` Flips around x-axis.
|
||||
|
||||
* ``>0`` Flips around y-axis.
|
||||
|
||||
* ``<0`` Flips around both axes.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`flip`
|
||||
|
||||
|
||||
|
||||
gpu::LUT
|
||||
------------
|
||||
Transforms the source matrix into the destination matrix using the given look-up table: ``dst(I) = lut(src(I))``
|
||||
|
||||
.. ocv:function:: void gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source matrix. ``CV_8UC1`` and ``CV_8UC3`` matrices are supported for now.
|
||||
|
||||
:param lut: Look-up table of 256 elements. It is a continuous ``CV_8U`` matrix.
|
||||
|
||||
:param dst: Destination matrix with the same depth as ``lut`` and the same number of channels as ``src`` .
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`LUT`
|
||||
|
||||
|
||||
|
||||
gpu::merge
|
||||
--------------
|
||||
Makes a multi-channel matrix out of several single-channel matrices.
|
||||
|
||||
.. ocv:function:: void gpu::merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
.. ocv:function:: void gpu::merge(const vector<GpuMat>& src, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Array/vector of source matrices.
|
||||
|
||||
:param n: Number of source matrices.
|
||||
|
||||
:param dst: Destination matrix.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`merge`
|
||||
|
||||
|
||||
|
||||
gpu::split
|
||||
--------------
|
||||
Copies each plane of a multi-channel matrix into an array.
|
||||
|
||||
.. ocv:function:: void gpu::split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null())
|
||||
|
||||
.. ocv:function:: void gpu::split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source matrix.
|
||||
|
||||
:param dst: Destination array/vector of single-channel matrices.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`split`
|
||||
|
||||
|
||||
|
||||
gpu::magnitude
|
||||
------------------
|
||||
Computes magnitudes of complex matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::magnitude( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
|
||||
|
||||
.. ocv:function:: void gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
|
||||
|
||||
:param xy: Source complex matrix in the interleaved format ( ``CV_32FC2`` ).
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param magnitude: Destination matrix of float magnitudes ( ``CV_32FC1`` ).
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`magnitude`
|
||||
|
||||
|
||||
|
||||
gpu::magnitudeSqr
|
||||
---------------------
|
||||
Computes squared magnitudes of complex matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::magnitudeSqr( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
|
||||
|
||||
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
|
||||
|
||||
:param xy: Source complex matrix in the interleaved format ( ``CV_32FC2`` ).
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param magnitude: Destination matrix of float magnitude squares ( ``CV_32FC1`` ).
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
|
||||
|
||||
gpu::phase
|
||||
--------------
|
||||
Computes polar angles of complex matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees=false, Stream& stream = Stream::Null())
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param angle: Destination matrix of angles ( ``CV_32FC1`` ).
|
||||
|
||||
:param angleInDegrees: Flag for angles that must be evaluated in degrees.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`phase`
|
||||
|
||||
|
||||
|
||||
gpu::cartToPolar
|
||||
--------------------
|
||||
Converts Cartesian coordinates into polar.
|
||||
|
||||
.. ocv:function:: void gpu::cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees=false, Stream& stream = Stream::Null())
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param magnitude: Destination matrix of float magnitudes ( ``CV_32FC1`` ).
|
||||
|
||||
:param angle: Destination matrix of angles ( ``CV_32FC1`` ).
|
||||
|
||||
:param angleInDegrees: Flag for angles that must be evaluated in degrees.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`cartToPolar`
|
||||
|
||||
|
||||
|
||||
gpu::polarToCart
|
||||
--------------------
|
||||
Converts polar coordinates into Cartesian.
|
||||
|
||||
.. ocv:function:: void gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees=false, Stream& stream = Stream::Null())
|
||||
|
||||
:param magnitude: Source matrix containing magnitudes ( ``CV_32FC1`` ).
|
||||
|
||||
:param angle: Source matrix containing angles ( ``CV_32FC1`` ).
|
||||
|
||||
:param x: Destination matrix of real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Destination matrix of imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param angleInDegrees: Flag that indicates angles in degrees.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`polarToCart`
|
||||
|
||||
|
||||
|
||||
gpu::normalize
|
||||
--------------
|
||||
Normalizes the norm or value range of an array.
|
||||
|
||||
.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat())
|
||||
|
||||
.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
|
||||
|
||||
:param src: input array.
|
||||
|
||||
:param dst: output array of the same size as ``src`` .
|
||||
|
||||
:param alpha: norm value to normalize to or the lower range boundary in case of the range normalization.
|
||||
|
||||
:param beta: upper range boundary in case of the range normalization; it is not used for the norm normalization.
|
||||
|
||||
:param normType: normalization type (see the details below).
|
||||
|
||||
:param dtype: when negative, the output array has the same type as ``src``; otherwise, it has the same number of channels as ``src`` and the depth ``=CV_MAT_DEPTH(dtype)``.
|
||||
|
||||
:param mask: optional operation mask.
|
||||
|
||||
:param norm_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
|
||||
|
||||
:param cvt_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
|
||||
|
||||
.. seealso:: :ocv:func:`normalize`
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -46,181 +46,8 @@ using namespace std;
|
||||
using namespace testing;
|
||||
using namespace perf;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// StereoBM
|
||||
|
||||
typedef std::tr1::tuple<string, string> pair_string;
|
||||
DEF_PARAM_TEST_1(ImagePair, pair_string);
|
||||
|
||||
PERF_TEST_P(ImagePair, Calib3D_StereoBM,
|
||||
Values(pair_string("gpu/perf/aloe.png", "gpu/perf/aloeR.png")))
|
||||
{
|
||||
declare.time(300.0);
|
||||
|
||||
const cv::Mat imgLeft = readImage(GET_PARAM(0), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(imgLeft.empty());
|
||||
|
||||
const cv::Mat imgRight = readImage(GET_PARAM(1), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(imgRight.empty());
|
||||
|
||||
const int preset = 0;
|
||||
const int ndisp = 256;
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::StereoBM_GPU d_bm(preset, ndisp);
|
||||
|
||||
const cv::gpu::GpuMat d_imgLeft(imgLeft);
|
||||
const cv::gpu::GpuMat d_imgRight(imgRight);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() d_bm(d_imgLeft, d_imgRight, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Ptr<cv::StereoBM> bm = cv::createStereoBM(ndisp);
|
||||
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() bm->compute(imgLeft, imgRight, dst);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// StereoBeliefPropagation
|
||||
|
||||
PERF_TEST_P(ImagePair, Calib3D_StereoBeliefPropagation,
|
||||
Values(pair_string("gpu/stereobp/aloe-L.png", "gpu/stereobp/aloe-R.png")))
|
||||
{
|
||||
declare.time(300.0);
|
||||
|
||||
const cv::Mat imgLeft = readImage(GET_PARAM(0));
|
||||
ASSERT_FALSE(imgLeft.empty());
|
||||
|
||||
const cv::Mat imgRight = readImage(GET_PARAM(1));
|
||||
ASSERT_FALSE(imgRight.empty());
|
||||
|
||||
const int ndisp = 64;
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::StereoBeliefPropagation d_bp(ndisp);
|
||||
|
||||
const cv::gpu::GpuMat d_imgLeft(imgLeft);
|
||||
const cv::gpu::GpuMat d_imgRight(imgRight);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() d_bp(d_imgLeft, d_imgRight, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// StereoConstantSpaceBP
|
||||
|
||||
PERF_TEST_P(ImagePair, Calib3D_StereoConstantSpaceBP,
|
||||
Values(pair_string("gpu/stereobm/aloe-L.png", "gpu/stereobm/aloe-R.png")))
|
||||
{
|
||||
declare.time(300.0);
|
||||
|
||||
const cv::Mat imgLeft = readImage(GET_PARAM(0), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(imgLeft.empty());
|
||||
|
||||
const cv::Mat imgRight = readImage(GET_PARAM(1), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(imgRight.empty());
|
||||
|
||||
const int ndisp = 128;
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::StereoConstantSpaceBP d_csbp(ndisp);
|
||||
|
||||
const cv::gpu::GpuMat d_imgLeft(imgLeft);
|
||||
const cv::gpu::GpuMat d_imgRight(imgRight);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() d_csbp(d_imgLeft, d_imgRight, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// DisparityBilateralFilter
|
||||
|
||||
PERF_TEST_P(ImagePair, Calib3D_DisparityBilateralFilter,
|
||||
Values(pair_string("gpu/stereobm/aloe-L.png", "gpu/stereobm/aloe-disp.png")))
|
||||
{
|
||||
const cv::Mat img = readImage(GET_PARAM(0), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(img.empty());
|
||||
|
||||
const cv::Mat disp = readImage(GET_PARAM(1), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(disp.empty());
|
||||
|
||||
const int ndisp = 128;
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::DisparityBilateralFilter d_filter(ndisp);
|
||||
|
||||
const cv::gpu::GpuMat d_img(img);
|
||||
const cv::gpu::GpuMat d_disp(disp);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() d_filter(d_disp, d_img, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// TransformPoints
|
||||
|
||||
DEF_PARAM_TEST_1(Count, int);
|
||||
|
||||
PERF_TEST_P(Count, Calib3D_TransformPoints,
|
||||
Values(5000, 10000, 20000))
|
||||
{
|
||||
const int count = GetParam();
|
||||
|
||||
cv::Mat src(1, count, CV_32FC3);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
const cv::Mat rvec = cv::Mat::ones(1, 3, CV_32FC1);
|
||||
const cv::Mat tvec = cv::Mat::ones(1, 3, CV_32FC1);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::transformPoints(d_src, rvec, tvec, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// ProjectPoints
|
||||
|
||||
@ -306,66 +133,3 @@ PERF_TEST_P(Count, Calib3D_SolvePnPRansac,
|
||||
CPU_SANITY_CHECK(tvec, 1e-6);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// ReprojectImageTo3D
|
||||
|
||||
PERF_TEST_P(Sz_Depth, Calib3D_ReprojectImageTo3D,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16S)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, depth);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
cv::Mat Q(4, 4, CV_32FC1);
|
||||
cv::randu(Q, 0.1, 1.0);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::reprojectImageTo3D(d_src, dst, Q);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::reprojectImageTo3D(src, dst, Q);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// DrawColorDisp
|
||||
|
||||
PERF_TEST_P(Sz_Depth, Calib3D_DrawColorDisp,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16S)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int type = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::drawColorDisp(d_src, dst, 255);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -51,21 +51,12 @@
|
||||
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/ts/gpu_perf.hpp"
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/gpu.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/photo.hpp"
|
||||
|
||||
#include "opencv2/core/gpu_private.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
#ifdef GTEST_CREATE_SHARED_LIBRARY
|
||||
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
set(PERF4AU_REQUIRED_DEPS opencv_core opencv_imgproc opencv_highgui opencv_video opencv_legacy opencv_gpu opencv_ts)
|
||||
set(PERF4AU_REQUIRED_DEPS opencv_core opencv_imgproc opencv_highgui opencv_video opencv_legacy opencv_ml opencv_ts opencv_gpufilters opencv_gpuimgproc opencv_gpuoptflow)
|
||||
|
||||
ocv_check_dependencies(${PERF4AU_REQUIRED_DEPS})
|
||||
|
||||
@ -25,4 +25,3 @@ if(WIN32)
|
||||
set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
@ -44,7 +44,8 @@
|
||||
|
||||
#include "cvconfig.h"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/gpu.hpp"
|
||||
#include "opencv2/gpuimgproc.hpp"
|
||||
#include "opencv2/gpuoptflow.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/legacy.hpp"
|
||||
|
@ -1,565 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
void cv::gpu::gemm(const GpuMat&, const GpuMat&, double, const GpuMat&, double, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::transpose(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::flip(const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::LUT(const GpuMat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::magnitude(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::magnitudeSqr(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::magnitude(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::magnitudeSqr(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::phase(const GpuMat&, const GpuMat&, GpuMat&, bool, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&) { throw_no_cuda(); }
|
||||
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// gemm
|
||||
|
||||
void cv::gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags, Stream& stream)
|
||||
{
|
||||
#ifndef HAVE_CUBLAS
|
||||
(void)src1;
|
||||
(void)src2;
|
||||
(void)alpha;
|
||||
(void)src3;
|
||||
(void)beta;
|
||||
(void)dst;
|
||||
(void)flags;
|
||||
(void)stream;
|
||||
CV_Error(cv::Error::StsNotImplemented, "The library was build without CUBLAS");
|
||||
#else
|
||||
// CUBLAS works with column-major matrices
|
||||
|
||||
CV_Assert(src1.type() == CV_32FC1 || src1.type() == CV_32FC2 || src1.type() == CV_64FC1 || src1.type() == CV_64FC2);
|
||||
CV_Assert(src2.type() == src1.type() && (src3.empty() || src3.type() == src1.type()));
|
||||
|
||||
if (src1.depth() == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
bool tr1 = (flags & GEMM_1_T) != 0;
|
||||
bool tr2 = (flags & GEMM_2_T) != 0;
|
||||
bool tr3 = (flags & GEMM_3_T) != 0;
|
||||
|
||||
if (src1.type() == CV_64FC2)
|
||||
{
|
||||
if (tr1 || tr2 || tr3)
|
||||
CV_Error(cv::Error::StsNotImplemented, "transpose operation doesn't implemented for CV_64FC2 type");
|
||||
}
|
||||
|
||||
Size src1Size = tr1 ? Size(src1.rows, src1.cols) : src1.size();
|
||||
Size src2Size = tr2 ? Size(src2.rows, src2.cols) : src2.size();
|
||||
Size src3Size = tr3 ? Size(src3.rows, src3.cols) : src3.size();
|
||||
Size dstSize(src2Size.width, src1Size.height);
|
||||
|
||||
CV_Assert(src1Size.width == src2Size.height);
|
||||
CV_Assert(src3.empty() || src3Size == dstSize);
|
||||
|
||||
dst.create(dstSize, src1.type());
|
||||
|
||||
if (beta != 0)
|
||||
{
|
||||
if (src3.empty())
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueMemSet(dst, Scalar::all(0));
|
||||
else
|
||||
dst.setTo(Scalar::all(0));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (tr3)
|
||||
{
|
||||
transpose(src3, dst, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueCopy(src3, dst);
|
||||
else
|
||||
src3.copyTo(dst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cublasHandle_t handle;
|
||||
cublasSafeCall( cublasCreate_v2(&handle) );
|
||||
|
||||
cublasSafeCall( cublasSetStream_v2(handle, StreamAccessor::getStream(stream)) );
|
||||
|
||||
cublasSafeCall( cublasSetPointerMode_v2(handle, CUBLAS_POINTER_MODE_HOST) );
|
||||
|
||||
const float alphaf = static_cast<float>(alpha);
|
||||
const float betaf = static_cast<float>(beta);
|
||||
|
||||
const cuComplex alphacf = make_cuComplex(alphaf, 0);
|
||||
const cuComplex betacf = make_cuComplex(betaf, 0);
|
||||
|
||||
const cuDoubleComplex alphac = make_cuDoubleComplex(alpha, 0);
|
||||
const cuDoubleComplex betac = make_cuDoubleComplex(beta, 0);
|
||||
|
||||
cublasOperation_t transa = tr2 ? CUBLAS_OP_T : CUBLAS_OP_N;
|
||||
cublasOperation_t transb = tr1 ? CUBLAS_OP_T : CUBLAS_OP_N;
|
||||
|
||||
switch (src1.type())
|
||||
{
|
||||
case CV_32FC1:
|
||||
cublasSafeCall( cublasSgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alphaf,
|
||||
src2.ptr<float>(), static_cast<int>(src2.step / sizeof(float)),
|
||||
src1.ptr<float>(), static_cast<int>(src1.step / sizeof(float)),
|
||||
&betaf,
|
||||
dst.ptr<float>(), static_cast<int>(dst.step / sizeof(float))) );
|
||||
break;
|
||||
|
||||
case CV_64FC1:
|
||||
cublasSafeCall( cublasDgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alpha,
|
||||
src2.ptr<double>(), static_cast<int>(src2.step / sizeof(double)),
|
||||
src1.ptr<double>(), static_cast<int>(src1.step / sizeof(double)),
|
||||
&beta,
|
||||
dst.ptr<double>(), static_cast<int>(dst.step / sizeof(double))) );
|
||||
break;
|
||||
|
||||
case CV_32FC2:
|
||||
cublasSafeCall( cublasCgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alphacf,
|
||||
src2.ptr<cuComplex>(), static_cast<int>(src2.step / sizeof(cuComplex)),
|
||||
src1.ptr<cuComplex>(), static_cast<int>(src1.step / sizeof(cuComplex)),
|
||||
&betacf,
|
||||
dst.ptr<cuComplex>(), static_cast<int>(dst.step / sizeof(cuComplex))) );
|
||||
break;
|
||||
|
||||
case CV_64FC2:
|
||||
cublasSafeCall( cublasZgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alphac,
|
||||
src2.ptr<cuDoubleComplex>(), static_cast<int>(src2.step / sizeof(cuDoubleComplex)),
|
||||
src1.ptr<cuDoubleComplex>(), static_cast<int>(src1.step / sizeof(cuDoubleComplex)),
|
||||
&betac,
|
||||
dst.ptr<cuDoubleComplex>(), static_cast<int>(dst.step / sizeof(cuDoubleComplex))) );
|
||||
break;
|
||||
}
|
||||
|
||||
cublasSafeCall( cublasDestroy_v2(handle) );
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// transpose
|
||||
|
||||
void cv::gpu::transpose(const GpuMat& src, GpuMat& dst, Stream& s)
|
||||
{
|
||||
CV_Assert(src.elemSize() == 1 || src.elemSize() == 4 || src.elemSize() == 8);
|
||||
|
||||
dst.create( src.cols, src.rows, src.type() );
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
if (src.elemSize() == 1)
|
||||
{
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
nppSafeCall( nppiTranspose_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz) );
|
||||
}
|
||||
else if (src.elemSize() == 4)
|
||||
{
|
||||
NppStStreamHandler h(stream);
|
||||
|
||||
NcvSize32u sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
ncvSafeCall( nppiStTranspose_32u_C1R(const_cast<Ncv32u*>(src.ptr<Ncv32u>()), static_cast<int>(src.step),
|
||||
dst.ptr<Ncv32u>(), static_cast<int>(dst.step), sz) );
|
||||
}
|
||||
else // if (src.elemSize() == 8)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
|
||||
NppStStreamHandler h(stream);
|
||||
|
||||
NcvSize32u sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
ncvSafeCall( nppiStTranspose_64u_C1R(const_cast<Ncv64u*>(src.ptr<Ncv64u>()), static_cast<int>(src.step),
|
||||
dst.ptr<Ncv64u>(), static_cast<int>(dst.step), sz) );
|
||||
}
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// flip
|
||||
|
||||
namespace
|
||||
{
|
||||
template<int DEPTH> struct NppTypeTraits;
|
||||
template<> struct NppTypeTraits<CV_8U> { typedef Npp8u npp_t; };
|
||||
template<> struct NppTypeTraits<CV_8S> { typedef Npp8s npp_t; };
|
||||
template<> struct NppTypeTraits<CV_16U> { typedef Npp16u npp_t; };
|
||||
template<> struct NppTypeTraits<CV_16S> { typedef Npp16s npp_t; };
|
||||
template<> struct NppTypeTraits<CV_32S> { typedef Npp32s npp_t; };
|
||||
template<> struct NppTypeTraits<CV_32F> { typedef Npp32f npp_t; };
|
||||
template<> struct NppTypeTraits<CV_64F> { typedef Npp64f npp_t; };
|
||||
|
||||
template <int DEPTH> struct NppMirrorFunc
|
||||
{
|
||||
typedef typename NppTypeTraits<DEPTH>::npp_t npp_t;
|
||||
|
||||
typedef NppStatus (*func_t)(const npp_t* pSrc, int nSrcStep, npp_t* pDst, int nDstStep, NppiSize oROI, NppiAxis flip);
|
||||
};
|
||||
|
||||
template <int DEPTH, typename NppMirrorFunc<DEPTH>::func_t func> struct NppMirror
|
||||
{
|
||||
typedef typename NppMirrorFunc<DEPTH>::npp_t npp_t;
|
||||
|
||||
static void call(const GpuMat& src, GpuMat& dst, int flipCode, cudaStream_t stream)
|
||||
{
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
nppSafeCall( func(src.ptr<npp_t>(), static_cast<int>(src.step),
|
||||
dst.ptr<npp_t>(), static_cast<int>(dst.step), sz,
|
||||
(flipCode == 0 ? NPP_HORIZONTAL_AXIS : (flipCode > 0 ? NPP_VERTICAL_AXIS : NPP_BOTH_AXIS))) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
void cv::gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode, Stream& stream)
|
||||
{
|
||||
typedef void (*func_t)(const GpuMat& src, GpuMat& dst, int flipCode, cudaStream_t stream);
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{NppMirror<CV_8U, nppiMirror_8u_C1R>::call, 0, NppMirror<CV_8U, nppiMirror_8u_C3R>::call, NppMirror<CV_8U, nppiMirror_8u_C4R>::call},
|
||||
{0,0,0,0},
|
||||
{NppMirror<CV_16U, nppiMirror_16u_C1R>::call, 0, NppMirror<CV_16U, nppiMirror_16u_C3R>::call, NppMirror<CV_16U, nppiMirror_16u_C4R>::call},
|
||||
{0,0,0,0},
|
||||
{NppMirror<CV_32S, nppiMirror_32s_C1R>::call, 0, NppMirror<CV_32S, nppiMirror_32s_C3R>::call, NppMirror<CV_32S, nppiMirror_32s_C4R>::call},
|
||||
{NppMirror<CV_32F, nppiMirror_32f_C1R>::call, 0, NppMirror<CV_32F, nppiMirror_32f_C3R>::call, NppMirror<CV_32F, nppiMirror_32f_C4R>::call}
|
||||
};
|
||||
|
||||
CV_Assert(src.depth() == CV_8U || src.depth() == CV_16U || src.depth() == CV_32S || src.depth() == CV_32F);
|
||||
CV_Assert(src.channels() == 1 || src.channels() == 3 || src.channels() == 4);
|
||||
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
funcs[src.depth()][src.channels() - 1](src, dst, flipCode, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// LUT
|
||||
|
||||
void cv::gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& s)
|
||||
{
|
||||
const int cn = src.channels();
|
||||
|
||||
CV_Assert( src.type() == CV_8UC1 || src.type() == CV_8UC3 );
|
||||
CV_Assert( lut.depth() == CV_8U );
|
||||
CV_Assert( lut.channels() == 1 || lut.channels() == cn );
|
||||
CV_Assert( lut.rows * lut.cols == 256 && lut.isContinuous() );
|
||||
|
||||
dst.create(src.size(), CV_MAKE_TYPE(lut.depth(), cn));
|
||||
|
||||
NppiSize sz;
|
||||
sz.height = src.rows;
|
||||
sz.width = src.cols;
|
||||
|
||||
Mat nppLut;
|
||||
lut.convertTo(nppLut, CV_32S);
|
||||
|
||||
int nValues3[] = {256, 256, 256};
|
||||
|
||||
Npp32s pLevels[256];
|
||||
for (int i = 0; i < 256; ++i)
|
||||
pLevels[i] = i;
|
||||
|
||||
const Npp32s* pLevels3[3];
|
||||
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
pLevels3[0] = pLevels3[1] = pLevels3[2] = pLevels;
|
||||
#else
|
||||
GpuMat d_pLevels;
|
||||
d_pLevels.upload(Mat(1, 256, CV_32S, pLevels));
|
||||
pLevels3[0] = pLevels3[1] = pLevels3[2] = d_pLevels.ptr<Npp32s>();
|
||||
#endif
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
if (src.type() == CV_8UC1)
|
||||
{
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
nppSafeCall( nppiLUT_Linear_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, nppLut.ptr<Npp32s>(), pLevels, 256) );
|
||||
#else
|
||||
GpuMat d_nppLut(Mat(1, 256, CV_32S, nppLut.data));
|
||||
nppSafeCall( nppiLUT_Linear_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, d_nppLut.ptr<Npp32s>(), d_pLevels.ptr<Npp32s>(), 256) );
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
const Npp32s* pValues3[3];
|
||||
|
||||
Mat nppLut3[3];
|
||||
if (nppLut.channels() == 1)
|
||||
{
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
pValues3[0] = pValues3[1] = pValues3[2] = nppLut.ptr<Npp32s>();
|
||||
#else
|
||||
GpuMat d_nppLut(Mat(1, 256, CV_32S, nppLut.data));
|
||||
pValues3[0] = pValues3[1] = pValues3[2] = d_nppLut.ptr<Npp32s>();
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::split(nppLut, nppLut3);
|
||||
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
pValues3[0] = nppLut3[0].ptr<Npp32s>();
|
||||
pValues3[1] = nppLut3[1].ptr<Npp32s>();
|
||||
pValues3[2] = nppLut3[2].ptr<Npp32s>();
|
||||
#else
|
||||
GpuMat d_nppLut0(Mat(1, 256, CV_32S, nppLut3[0].data));
|
||||
GpuMat d_nppLut1(Mat(1, 256, CV_32S, nppLut3[1].data));
|
||||
GpuMat d_nppLut2(Mat(1, 256, CV_32S, nppLut3[2].data));
|
||||
|
||||
pValues3[0] = d_nppLut0.ptr<Npp32s>();
|
||||
pValues3[1] = d_nppLut1.ptr<Npp32s>();
|
||||
pValues3[2] = d_nppLut2.ptr<Npp32s>();
|
||||
#endif
|
||||
}
|
||||
|
||||
nppSafeCall( nppiLUT_Linear_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, pValues3, pLevels3, nValues3) );
|
||||
}
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// NPP magnitide
|
||||
|
||||
namespace
|
||||
{
|
||||
typedef NppStatus (*nppMagnitude_t)(const Npp32fc* pSrc, int nSrcStep, Npp32f* pDst, int nDstStep, NppiSize oSizeROI);
|
||||
|
||||
inline void npp_magnitude(const GpuMat& src, GpuMat& dst, nppMagnitude_t func, cudaStream_t stream)
|
||||
{
|
||||
CV_Assert(src.type() == CV_32FC2);
|
||||
|
||||
dst.create(src.size(), CV_32FC1);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
nppSafeCall( func(src.ptr<Npp32fc>(), static_cast<int>(src.step), dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::magnitude(const GpuMat& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
npp_magnitude(src, dst, nppiMagnitude_32fc32f_C1R, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::magnitudeSqr(const GpuMat& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
npp_magnitude(src, dst, nppiMagnitudeSqr_32fc32f_C1R, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Polar <-> Cart
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace mathfunc
|
||||
{
|
||||
void cartToPolar_gpu(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, bool magSqr, PtrStepSzf angle, bool angleInDegrees, cudaStream_t stream);
|
||||
void polarToCart_gpu(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
namespace
|
||||
{
|
||||
inline void cartToPolar_caller(const GpuMat& x, const GpuMat& y, GpuMat* mag, bool magSqr, GpuMat* angle, bool angleInDegrees, cudaStream_t stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::mathfunc;
|
||||
|
||||
CV_Assert(x.size() == y.size() && x.type() == y.type());
|
||||
CV_Assert(x.depth() == CV_32F);
|
||||
|
||||
if (mag)
|
||||
mag->create(x.size(), x.type());
|
||||
if (angle)
|
||||
angle->create(x.size(), x.type());
|
||||
|
||||
GpuMat x1cn = x.reshape(1);
|
||||
GpuMat y1cn = y.reshape(1);
|
||||
GpuMat mag1cn = mag ? mag->reshape(1) : GpuMat();
|
||||
GpuMat angle1cn = angle ? angle->reshape(1) : GpuMat();
|
||||
|
||||
cartToPolar_gpu(x1cn, y1cn, mag1cn, magSqr, angle1cn, angleInDegrees, stream);
|
||||
}
|
||||
|
||||
inline void polarToCart_caller(const GpuMat& mag, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, cudaStream_t stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::mathfunc;
|
||||
|
||||
CV_Assert((mag.empty() || mag.size() == angle.size()) && mag.type() == angle.type());
|
||||
CV_Assert(mag.depth() == CV_32F);
|
||||
|
||||
x.create(mag.size(), mag.type());
|
||||
y.create(mag.size(), mag.type());
|
||||
|
||||
GpuMat mag1cn = mag.reshape(1);
|
||||
GpuMat angle1cn = angle.reshape(1);
|
||||
GpuMat x1cn = x.reshape(1);
|
||||
GpuMat y1cn = y.reshape(1);
|
||||
|
||||
polarToCart_gpu(mag1cn, angle1cn, x1cn, y1cn, angleInDegrees, stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, &dst, false, 0, false, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, &dst, true, 0, false, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, 0, false, &angle, angleInDegrees, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& mag, GpuMat& angle, bool angleInDegrees, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, &mag, false, &angle, angleInDegrees, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, Stream& stream)
|
||||
{
|
||||
polarToCart_caller(magnitude, angle, x, y, angleInDegrees, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// normalize
|
||||
|
||||
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask)
|
||||
{
|
||||
GpuMat norm_buf;
|
||||
GpuMat cvt_buf;
|
||||
normalize(src, dst, a, b, norm_type, dtype, mask, norm_buf, cvt_buf);
|
||||
}
|
||||
|
||||
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
|
||||
{
|
||||
double scale = 1, shift = 0;
|
||||
if (norm_type == NORM_MINMAX)
|
||||
{
|
||||
double smin = 0, smax = 0;
|
||||
double dmin = std::min(a, b), dmax = std::max(a, b);
|
||||
minMax(src, &smin, &smax, mask, norm_buf);
|
||||
scale = (dmax - dmin) * (smax - smin > std::numeric_limits<double>::epsilon() ? 1.0 / (smax - smin) : 0.0);
|
||||
shift = dmin - smin * scale;
|
||||
}
|
||||
else if (norm_type == NORM_L2 || norm_type == NORM_L1 || norm_type == NORM_INF)
|
||||
{
|
||||
scale = norm(src, norm_type, mask, norm_buf);
|
||||
scale = scale > std::numeric_limits<double>::epsilon() ? a / scale : 0.0;
|
||||
shift = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_Error(cv::Error::StsBadArg, "Unknown/unsupported norm type");
|
||||
}
|
||||
|
||||
if (mask.empty())
|
||||
{
|
||||
src.convertTo(dst, dtype, scale, shift);
|
||||
}
|
||||
else
|
||||
{
|
||||
src.convertTo(cvt_buf, dtype, scale, shift);
|
||||
cvt_buf.copyTo(dst, mask);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
@ -252,7 +252,7 @@ void cv::gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& cam
|
||||
// Find the best hypothesis index
|
||||
Point best_idx;
|
||||
double best_score;
|
||||
minMaxLoc(d_hypothesis_scores, NULL, &best_score, NULL, &best_idx);
|
||||
gpu::minMaxLoc(d_hypothesis_scores, NULL, &best_score, NULL, &best_idx);
|
||||
int num_inliers = static_cast<int>(best_score);
|
||||
|
||||
// Extract the best hypothesis data
|
||||
|
@ -41,8 +41,6 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include "opencv2/objdetect/objdetect_c.h"
|
||||
|
||||
using namespace cv;
|
||||
@ -75,6 +73,37 @@ public:
|
||||
virtual bool read(const String& classifierAsXml) = 0;
|
||||
};
|
||||
|
||||
#ifndef HAVE_OPENCV_GPULEGACY
|
||||
|
||||
struct cv::gpu::CascadeClassifier_GPU::HaarCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
HaarCascade()
|
||||
{
|
||||
throw_no_cuda();
|
||||
}
|
||||
|
||||
unsigned int process(const GpuMat&, GpuMat&, float, int, bool, bool, cv::Size, cv::Size)
|
||||
{
|
||||
throw_no_cuda();
|
||||
return 0;
|
||||
}
|
||||
|
||||
cv::Size getClassifierCvSize() const
|
||||
{
|
||||
throw_no_cuda();
|
||||
return cv::Size();
|
||||
}
|
||||
|
||||
bool read(const String&)
|
||||
{
|
||||
throw_no_cuda();
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
struct cv::gpu::CascadeClassifier_GPU::HaarCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
@ -284,6 +313,8 @@ private:
|
||||
virtual ~HaarCascade(){}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
cv::Size operator -(const cv::Size& a, const cv::Size& b)
|
||||
{
|
||||
return cv::Size(a.width - b.width, a.height - b.height);
|
||||
@ -477,6 +508,8 @@ private:
|
||||
resuzeBuffer.create(frame, CV_8UC1);
|
||||
|
||||
integral.create(frame.height + 1, integralFactor * (frame.width + 1), CV_32SC1);
|
||||
|
||||
#ifdef HAVE_OPENCV_GPULEGACY
|
||||
NcvSize32u roiSize;
|
||||
roiSize.width = frame.width;
|
||||
roiSize.height = frame.height;
|
||||
@ -487,6 +520,7 @@ private:
|
||||
Ncv32u bufSize;
|
||||
ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
|
||||
integralBuffer.create(1, bufSize, CV_8UC1);
|
||||
#endif
|
||||
|
||||
candidates.create(1 , frame.width >> 1, CV_32SC4);
|
||||
}
|
||||
@ -722,240 +756,3 @@ bool cv::gpu::CascadeClassifier_GPU::load(const String& filename)
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined (HAVE_CUDA)
|
||||
|
||||
struct RectConvert
|
||||
{
|
||||
Rect operator()(const NcvRect32u& nr) const { return Rect(nr.x, nr.y, nr.width, nr.height); }
|
||||
NcvRect32u operator()(const Rect& nr) const
|
||||
{
|
||||
NcvRect32u rect;
|
||||
rect.x = nr.x;
|
||||
rect.y = nr.y;
|
||||
rect.width = nr.width;
|
||||
rect.height = nr.height;
|
||||
return rect;
|
||||
}
|
||||
};
|
||||
|
||||
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights)
|
||||
{
|
||||
std::vector<Rect> rects(hypotheses.size());
|
||||
std::transform(hypotheses.begin(), hypotheses.end(), rects.begin(), RectConvert());
|
||||
|
||||
if (weights)
|
||||
{
|
||||
std::vector<int> weights_int;
|
||||
weights_int.assign(weights->begin(), weights->end());
|
||||
cv::groupRectangles(rects, weights_int, groupThreshold, eps);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::groupRectangles(rects, groupThreshold, eps);
|
||||
}
|
||||
std::transform(rects.begin(), rects.end(), hypotheses.begin(), RectConvert());
|
||||
hypotheses.resize(rects.size());
|
||||
}
|
||||
|
||||
NCVStatus loadFromXML(const String &filename,
|
||||
HaarClassifierCascadeDescriptor &haar,
|
||||
std::vector<HaarStage64> &haarStages,
|
||||
std::vector<HaarClassifierNode128> &haarClassifierNodes,
|
||||
std::vector<HaarFeature64> &haarFeatures)
|
||||
{
|
||||
NCVStatus ncvStat;
|
||||
|
||||
haar.NumStages = 0;
|
||||
haar.NumClassifierRootNodes = 0;
|
||||
haar.NumClassifierTotalNodes = 0;
|
||||
haar.NumFeatures = 0;
|
||||
haar.ClassifierSize.width = 0;
|
||||
haar.ClassifierSize.height = 0;
|
||||
haar.bHasStumpsOnly = true;
|
||||
haar.bNeedsTiltedII = false;
|
||||
Ncv32u curMaxTreeDepth;
|
||||
|
||||
std::vector<char> xmlFileCont;
|
||||
|
||||
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
|
||||
haarStages.resize(0);
|
||||
haarClassifierNodes.resize(0);
|
||||
haarFeatures.resize(0);
|
||||
|
||||
Ptr<CvHaarClassifierCascade> oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0);
|
||||
if (oldCascade.empty())
|
||||
{
|
||||
return NCV_HAAR_XML_LOADING_EXCEPTION;
|
||||
}
|
||||
|
||||
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
|
||||
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
|
||||
|
||||
int stagesCound = oldCascade->count;
|
||||
for(int s = 0; s < stagesCound; ++s) // by stages
|
||||
{
|
||||
HaarStage64 curStage;
|
||||
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
|
||||
|
||||
curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold);
|
||||
|
||||
int treesCount = oldCascade->stage_classifier[s].count;
|
||||
for(int t = 0; t < treesCount; ++t) // by trees
|
||||
{
|
||||
Ncv32u nodeId = 0;
|
||||
CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t];
|
||||
|
||||
int nodesCount = tree->count;
|
||||
for(int n = 0; n < nodesCount; ++n) //by features
|
||||
{
|
||||
CvHaarFeature* feature = &tree->haar_feature[n];
|
||||
|
||||
HaarClassifierNode128 curNode;
|
||||
curNode.setThreshold(tree->threshold[n]);
|
||||
|
||||
NcvBool bIsLeftNodeLeaf = false;
|
||||
NcvBool bIsRightNodeLeaf = false;
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeLeft;
|
||||
if ( tree->left[n] <= 0 )
|
||||
{
|
||||
Ncv32f leftVal = tree->alpha[-tree->left[n]];
|
||||
ncvStat = nodeLeft.create(leftVal);
|
||||
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
|
||||
bIsLeftNodeLeaf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
Ncv32u leftNodeOffset = tree->left[n];
|
||||
nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1));
|
||||
haar.bHasStumpsOnly = false;
|
||||
}
|
||||
curNode.setLeftNodeDesc(nodeLeft);
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeRight;
|
||||
if ( tree->right[n] <= 0 )
|
||||
{
|
||||
Ncv32f rightVal = tree->alpha[-tree->right[n]];
|
||||
ncvStat = nodeRight.create(rightVal);
|
||||
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
|
||||
bIsRightNodeLeaf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
Ncv32u rightNodeOffset = tree->right[n];
|
||||
nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1));
|
||||
haar.bHasStumpsOnly = false;
|
||||
}
|
||||
curNode.setRightNodeDesc(nodeRight);
|
||||
|
||||
Ncv32u tiltedVal = feature->tilted;
|
||||
haar.bNeedsTiltedII = (tiltedVal != 0);
|
||||
|
||||
Ncv32u featureId = 0;
|
||||
for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects
|
||||
{
|
||||
Ncv32u rectX = feature->rect[l].r.x;
|
||||
Ncv32u rectY = feature->rect[l].r.y;
|
||||
Ncv32u rectWidth = feature->rect[l].r.width;
|
||||
Ncv32u rectHeight = feature->rect[l].r.height;
|
||||
|
||||
Ncv32f rectWeight = feature->rect[l].weight;
|
||||
|
||||
if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/)
|
||||
break;
|
||||
|
||||
HaarFeature64 curFeature;
|
||||
ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height);
|
||||
curFeature.setWeight(rectWeight);
|
||||
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
|
||||
haarFeatures.push_back(curFeature);
|
||||
|
||||
featureId++;
|
||||
}
|
||||
|
||||
HaarFeatureDescriptor32 tmpFeatureDesc;
|
||||
ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf,
|
||||
featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId);
|
||||
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
|
||||
curNode.setFeatureDesc(tmpFeatureDesc);
|
||||
|
||||
if (!nodeId)
|
||||
{
|
||||
//root node
|
||||
haarClassifierNodes.push_back(curNode);
|
||||
curMaxTreeDepth = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
//other node
|
||||
h_TmpClassifierNotRootNodes.push_back(curNode);
|
||||
curMaxTreeDepth++;
|
||||
}
|
||||
|
||||
nodeId++;
|
||||
}
|
||||
}
|
||||
|
||||
curStage.setNumClassifierRootNodes(treesCount);
|
||||
haarStages.push_back(curStage);
|
||||
}
|
||||
|
||||
//fill in cascade stats
|
||||
haar.NumStages = static_cast<Ncv32u>(haarStages.size());
|
||||
haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size());
|
||||
haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size());
|
||||
haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size());
|
||||
|
||||
//merge root and leaf nodes in one classifiers array
|
||||
Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size());
|
||||
for (Ncv32u i=0; i<haarClassifierNodes.size(); i++)
|
||||
{
|
||||
HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc();
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc();
|
||||
if (!featureDesc.isLeftNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
|
||||
nodeLeft.create(newOffset);
|
||||
}
|
||||
haarClassifierNodes[i].setLeftNodeDesc(nodeLeft);
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc();
|
||||
if (!featureDesc.isRightNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
|
||||
nodeRight.create(newOffset);
|
||||
}
|
||||
haarClassifierNodes[i].setRightNodeDesc(nodeRight);
|
||||
}
|
||||
|
||||
for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++)
|
||||
{
|
||||
HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc();
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc();
|
||||
if (!featureDesc.isLeftNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
|
||||
nodeLeft.create(newOffset);
|
||||
}
|
||||
h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft);
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc();
|
||||
if (!featureDesc.isRightNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
|
||||
nodeRight.create(newOffset);
|
||||
}
|
||||
h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight);
|
||||
|
||||
haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]);
|
||||
}
|
||||
|
||||
return NCV_SUCCESS;
|
||||
}
|
||||
|
||||
#endif /* HAVE_CUDA */
|
||||
|
@ -1,201 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
/*
|
||||
* NV12ToARGB color space conversion CUDA kernel
|
||||
*
|
||||
* This sample uses CUDA to perform a simple NV12 (YUV 4:2:0 planar)
|
||||
* source and converts to output in ARGB format
|
||||
*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev {
|
||||
namespace video_decoding
|
||||
{
|
||||
__constant__ uint constAlpha = ((uint)0xff << 24);
|
||||
|
||||
__constant__ float constHueColorSpaceMat[9];
|
||||
|
||||
void loadHueCSC(float hueCSC[9])
|
||||
{
|
||||
cudaSafeCall( cudaMemcpyToSymbol(constHueColorSpaceMat, hueCSC, 9 * sizeof(float)) );
|
||||
}
|
||||
|
||||
__device__ void YUV2RGB(const uint* yuvi, float* red, float* green, float* blue)
|
||||
{
|
||||
float luma, chromaCb, chromaCr;
|
||||
|
||||
// Prepare for hue adjustment
|
||||
luma = (float)yuvi[0];
|
||||
chromaCb = (float)((int)yuvi[1] - 512.0f);
|
||||
chromaCr = (float)((int)yuvi[2] - 512.0f);
|
||||
|
||||
// Convert YUV To RGB with hue adjustment
|
||||
*red = (luma * constHueColorSpaceMat[0]) +
|
||||
(chromaCb * constHueColorSpaceMat[1]) +
|
||||
(chromaCr * constHueColorSpaceMat[2]);
|
||||
|
||||
*green = (luma * constHueColorSpaceMat[3]) +
|
||||
(chromaCb * constHueColorSpaceMat[4]) +
|
||||
(chromaCr * constHueColorSpaceMat[5]);
|
||||
|
||||
*blue = (luma * constHueColorSpaceMat[6]) +
|
||||
(chromaCb * constHueColorSpaceMat[7]) +
|
||||
(chromaCr * constHueColorSpaceMat[8]);
|
||||
}
|
||||
|
||||
__device__ uint RGBAPACK_10bit(float red, float green, float blue, uint alpha)
|
||||
{
|
||||
uint ARGBpixel = 0;
|
||||
|
||||
// Clamp final 10 bit results
|
||||
red = ::fmin(::fmax(red, 0.0f), 1023.f);
|
||||
green = ::fmin(::fmax(green, 0.0f), 1023.f);
|
||||
blue = ::fmin(::fmax(blue, 0.0f), 1023.f);
|
||||
|
||||
// Convert to 8 bit unsigned integers per color component
|
||||
ARGBpixel = (((uint)blue >> 2) |
|
||||
(((uint)green >> 2) << 8) |
|
||||
(((uint)red >> 2) << 16) |
|
||||
(uint)alpha);
|
||||
|
||||
return ARGBpixel;
|
||||
}
|
||||
|
||||
// CUDA kernel for outputing the final ARGB output from NV12
|
||||
|
||||
#define COLOR_COMPONENT_BIT_SIZE 10
|
||||
#define COLOR_COMPONENT_MASK 0x3FF
|
||||
|
||||
__global__ void NV12ToARGB(uchar* srcImage, size_t nSourcePitch,
|
||||
uint* dstImage, size_t nDestPitch,
|
||||
uint width, uint height)
|
||||
{
|
||||
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
|
||||
const int x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x >= width || y >= height)
|
||||
return;
|
||||
|
||||
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
|
||||
// if we move to texture we could read 4 luminance values
|
||||
|
||||
uint yuv101010Pel[2];
|
||||
|
||||
yuv101010Pel[0] = (srcImage[y * nSourcePitch + x ]) << 2;
|
||||
yuv101010Pel[1] = (srcImage[y * nSourcePitch + x + 1]) << 2;
|
||||
|
||||
const size_t chromaOffset = nSourcePitch * height;
|
||||
|
||||
const int y_chroma = y >> 1;
|
||||
|
||||
if (y & 1) // odd scanline ?
|
||||
{
|
||||
uint chromaCb = srcImage[chromaOffset + y_chroma * nSourcePitch + x ];
|
||||
uint chromaCr = srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1];
|
||||
|
||||
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
|
||||
{
|
||||
chromaCb = (chromaCb + srcImage[chromaOffset + (y_chroma + 1) * nSourcePitch + x ] + 1) >> 1;
|
||||
chromaCr = (chromaCr + srcImage[chromaOffset + (y_chroma + 1) * nSourcePitch + x + 1] + 1) >> 1;
|
||||
}
|
||||
|
||||
yuv101010Pel[0] |= (chromaCb << ( COLOR_COMPONENT_BIT_SIZE + 2));
|
||||
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
|
||||
|
||||
yuv101010Pel[1] |= (chromaCb << ( COLOR_COMPONENT_BIT_SIZE + 2));
|
||||
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
|
||||
}
|
||||
else
|
||||
{
|
||||
yuv101010Pel[0] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x ] << ( COLOR_COMPONENT_BIT_SIZE + 2));
|
||||
yuv101010Pel[0] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
|
||||
|
||||
yuv101010Pel[1] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x ] << ( COLOR_COMPONENT_BIT_SIZE + 2));
|
||||
yuv101010Pel[1] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
|
||||
}
|
||||
|
||||
// this steps performs the color conversion
|
||||
uint yuvi[6];
|
||||
float red[2], green[2], blue[2];
|
||||
|
||||
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK );
|
||||
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
|
||||
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
|
||||
|
||||
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK );
|
||||
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
|
||||
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
|
||||
|
||||
// YUV to RGB Transformation conversion
|
||||
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
|
||||
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
|
||||
|
||||
// Clamp the results to RGBA
|
||||
|
||||
const size_t dstImagePitch = nDestPitch >> 2;
|
||||
|
||||
dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
|
||||
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
|
||||
}
|
||||
|
||||
void NV12ToARGB_gpu(const PtrStepb decodedFrame, PtrStepSz<uint> interopFrame, cudaStream_t stream)
|
||||
{
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(interopFrame.cols, 2 * block.x), divUp(interopFrame.rows, block.y));
|
||||
|
||||
NV12ToARGB<<<grid, block, 0, stream>>>(decodedFrame.data, decodedFrame.step, interopFrame.data, interopFrame.step,
|
||||
interopFrame.cols, interopFrame.rows);
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
}
|
||||
}}}
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,175 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace video_encoding
|
||||
{
|
||||
__device__ __forceinline__ void rgbtoy(const uchar b, const uchar g, const uchar r, uchar& y)
|
||||
{
|
||||
y = static_cast<uchar>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ void rgbtoyuv(const uchar b, const uchar g, const uchar r, uchar& y, uchar& u, uchar& v)
|
||||
{
|
||||
rgbtoy(b, g, r, y);
|
||||
u = static_cast<uchar>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
|
||||
v = static_cast<uchar>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
|
||||
}
|
||||
|
||||
__global__ void Gray_to_YV12(const PtrStepSzb src, PtrStepb dst)
|
||||
{
|
||||
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
||||
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
||||
|
||||
if (x + 1 >= src.cols || y + 1 >= src.rows)
|
||||
return;
|
||||
|
||||
// get pointers to the data
|
||||
const size_t planeSize = src.rows * dst.step;
|
||||
PtrStepb y_plane(dst.data, dst.step);
|
||||
PtrStepb u_plane(y_plane.data + planeSize, dst.step / 2);
|
||||
PtrStepb v_plane(u_plane.data + (planeSize / 4), dst.step / 2);
|
||||
|
||||
uchar pix;
|
||||
uchar y_val, u_val, v_val;
|
||||
|
||||
pix = src(y, x);
|
||||
rgbtoy(pix, pix, pix, y_val);
|
||||
y_plane(y, x) = y_val;
|
||||
|
||||
pix = src(y, x + 1);
|
||||
rgbtoy(pix, pix, pix, y_val);
|
||||
y_plane(y, x + 1) = y_val;
|
||||
|
||||
pix = src(y + 1, x);
|
||||
rgbtoy(pix, pix, pix, y_val);
|
||||
y_plane(y + 1, x) = y_val;
|
||||
|
||||
pix = src(y + 1, x + 1);
|
||||
rgbtoyuv(pix, pix, pix, y_val, u_val, v_val);
|
||||
y_plane(y + 1, x + 1) = y_val;
|
||||
u_plane(y / 2, x / 2) = u_val;
|
||||
v_plane(y / 2, x / 2) = v_val;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__global__ void BGR_to_YV12(const PtrStepSz<T> src, PtrStepb dst)
|
||||
{
|
||||
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
||||
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
||||
|
||||
if (x + 1 >= src.cols || y + 1 >= src.rows)
|
||||
return;
|
||||
|
||||
// get pointers to the data
|
||||
const size_t planeSize = src.rows * dst.step;
|
||||
PtrStepb y_plane(dst.data, dst.step);
|
||||
PtrStepb u_plane(y_plane.data + planeSize, dst.step / 2);
|
||||
PtrStepb v_plane(u_plane.data + (planeSize / 4), dst.step / 2);
|
||||
|
||||
T pix;
|
||||
uchar y_val, u_val, v_val;
|
||||
|
||||
pix = src(y, x);
|
||||
rgbtoy(pix.z, pix.y, pix.x, y_val);
|
||||
y_plane(y, x) = y_val;
|
||||
|
||||
pix = src(y, x + 1);
|
||||
rgbtoy(pix.z, pix.y, pix.x, y_val);
|
||||
y_plane(y, x + 1) = y_val;
|
||||
|
||||
pix = src(y + 1, x);
|
||||
rgbtoy(pix.z, pix.y, pix.x, y_val);
|
||||
y_plane(y + 1, x) = y_val;
|
||||
|
||||
pix = src(y + 1, x + 1);
|
||||
rgbtoyuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
|
||||
y_plane(y + 1, x + 1) = y_val;
|
||||
u_plane(y / 2, x / 2) = u_val;
|
||||
v_plane(y / 2, x / 2) = v_val;
|
||||
}
|
||||
|
||||
void Gray_to_YV12_caller(const PtrStepSzb src, PtrStepb dst)
|
||||
{
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
|
||||
|
||||
Gray_to_YV12<<<grid, block>>>(src, dst);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
template <int cn>
|
||||
void BGR_to_YV12_caller(const PtrStepSzb src, PtrStepb dst)
|
||||
{
|
||||
typedef typename TypeVec<uchar, cn>::vec_type src_t;
|
||||
|
||||
dim3 block(32, 8);
|
||||
dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
|
||||
|
||||
BGR_to_YV12<<<grid, block>>>(static_cast< PtrStepSz<src_t> >(src), dst);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
void YV12_gpu(const PtrStepSzb src, int cn, PtrStepSzb dst)
|
||||
{
|
||||
typedef void (*func_t)(const PtrStepSzb src, PtrStepb dst);
|
||||
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0, Gray_to_YV12_caller, 0, BGR_to_YV12_caller<3>, BGR_to_YV12_caller<4>
|
||||
};
|
||||
|
||||
funcs[cn](src, dst);
|
||||
}
|
||||
}
|
||||
}}}
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
@ -1,188 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
namespace
|
||||
{
|
||||
#define error_entry(entry) { entry, #entry }
|
||||
|
||||
struct ErrorEntry
|
||||
{
|
||||
int code;
|
||||
const char* str;
|
||||
};
|
||||
|
||||
struct ErrorEntryComparer
|
||||
{
|
||||
int code;
|
||||
ErrorEntryComparer(int code_) : code(code_) {}
|
||||
bool operator()(const ErrorEntry& e) const { return e.code == code; }
|
||||
};
|
||||
|
||||
String getErrorString(int code, const ErrorEntry* errors, size_t n)
|
||||
{
|
||||
size_t idx = std::find_if(errors, errors + n, ErrorEntryComparer(code)) - errors;
|
||||
|
||||
const char* msg = (idx != n) ? errors[idx].str : "Unknown error code";
|
||||
String str = cv::format("%s [Code = %d]", msg, code);
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// NCV errors
|
||||
|
||||
const ErrorEntry ncv_errors [] =
|
||||
{
|
||||
error_entry( NCV_SUCCESS ),
|
||||
error_entry( NCV_UNKNOWN_ERROR ),
|
||||
error_entry( NCV_CUDA_ERROR ),
|
||||
error_entry( NCV_NPP_ERROR ),
|
||||
error_entry( NCV_FILE_ERROR ),
|
||||
error_entry( NCV_NULL_PTR ),
|
||||
error_entry( NCV_INCONSISTENT_INPUT ),
|
||||
error_entry( NCV_TEXTURE_BIND_ERROR ),
|
||||
error_entry( NCV_DIMENSIONS_INVALID ),
|
||||
error_entry( NCV_INVALID_ROI ),
|
||||
error_entry( NCV_INVALID_STEP ),
|
||||
error_entry( NCV_INVALID_SCALE ),
|
||||
error_entry( NCV_INVALID_SCALE ),
|
||||
error_entry( NCV_ALLOCATOR_NOT_INITIALIZED ),
|
||||
error_entry( NCV_ALLOCATOR_BAD_ALLOC ),
|
||||
error_entry( NCV_ALLOCATOR_BAD_DEALLOC ),
|
||||
error_entry( NCV_ALLOCATOR_INSUFFICIENT_CAPACITY ),
|
||||
error_entry( NCV_ALLOCATOR_DEALLOC_ORDER ),
|
||||
error_entry( NCV_ALLOCATOR_BAD_REUSE ),
|
||||
error_entry( NCV_MEM_COPY_ERROR ),
|
||||
error_entry( NCV_MEM_RESIDENCE_ERROR ),
|
||||
error_entry( NCV_MEM_INSUFFICIENT_CAPACITY ),
|
||||
error_entry( NCV_HAAR_INVALID_PIXEL_STEP ),
|
||||
error_entry( NCV_HAAR_TOO_MANY_FEATURES_IN_CLASSIFIER ),
|
||||
error_entry( NCV_HAAR_TOO_MANY_FEATURES_IN_CASCADE ),
|
||||
error_entry( NCV_HAAR_TOO_LARGE_FEATURES ),
|
||||
error_entry( NCV_HAAR_XML_LOADING_EXCEPTION ),
|
||||
error_entry( NCV_NOIMPL_HAAR_TILTED_FEATURES ),
|
||||
error_entry( NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW ),
|
||||
error_entry( NPPST_SUCCESS ),
|
||||
error_entry( NPPST_ERROR ),
|
||||
error_entry( NPPST_CUDA_KERNEL_EXECUTION_ERROR ),
|
||||
error_entry( NPPST_NULL_POINTER_ERROR ),
|
||||
error_entry( NPPST_TEXTURE_BIND_ERROR ),
|
||||
error_entry( NPPST_MEMCPY_ERROR ),
|
||||
error_entry( NPPST_MEM_ALLOC_ERR ),
|
||||
error_entry( NPPST_MEMFREE_ERR ),
|
||||
error_entry( NPPST_INVALID_ROI ),
|
||||
error_entry( NPPST_INVALID_STEP ),
|
||||
error_entry( NPPST_INVALID_SCALE ),
|
||||
error_entry( NPPST_MEM_INSUFFICIENT_BUFFER ),
|
||||
error_entry( NPPST_MEM_RESIDENCE_ERROR ),
|
||||
error_entry( NPPST_MEM_INTERNAL_ERROR )
|
||||
};
|
||||
|
||||
const size_t ncv_error_num = sizeof(ncv_errors) / sizeof(ncv_errors[0]);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// CUFFT errors
|
||||
|
||||
const ErrorEntry cufft_errors[] =
|
||||
{
|
||||
error_entry( CUFFT_INVALID_PLAN ),
|
||||
error_entry( CUFFT_ALLOC_FAILED ),
|
||||
error_entry( CUFFT_INVALID_TYPE ),
|
||||
error_entry( CUFFT_INVALID_VALUE ),
|
||||
error_entry( CUFFT_INTERNAL_ERROR ),
|
||||
error_entry( CUFFT_EXEC_FAILED ),
|
||||
error_entry( CUFFT_SETUP_FAILED ),
|
||||
error_entry( CUFFT_INVALID_SIZE ),
|
||||
error_entry( CUFFT_UNALIGNED_DATA )
|
||||
};
|
||||
|
||||
const int cufft_error_num = sizeof(cufft_errors) / sizeof(cufft_errors[0]);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// CUBLAS errors
|
||||
|
||||
const ErrorEntry cublas_errors[] =
|
||||
{
|
||||
error_entry( CUBLAS_STATUS_SUCCESS ),
|
||||
error_entry( CUBLAS_STATUS_NOT_INITIALIZED ),
|
||||
error_entry( CUBLAS_STATUS_ALLOC_FAILED ),
|
||||
error_entry( CUBLAS_STATUS_INVALID_VALUE ),
|
||||
error_entry( CUBLAS_STATUS_ARCH_MISMATCH ),
|
||||
error_entry( CUBLAS_STATUS_MAPPING_ERROR ),
|
||||
error_entry( CUBLAS_STATUS_EXECUTION_FAILED ),
|
||||
error_entry( CUBLAS_STATUS_INTERNAL_ERROR )
|
||||
};
|
||||
|
||||
const int cublas_error_num = sizeof(cublas_errors) / sizeof(cublas_errors[0]);
|
||||
}
|
||||
|
||||
namespace cv
|
||||
{
|
||||
namespace gpu
|
||||
{
|
||||
void ncvError(int code, const char* file, const int line, const char* func)
|
||||
{
|
||||
String msg = getErrorString(code, ncv_errors, ncv_error_num);
|
||||
cv::error(cv::Error::GpuApiCallError, msg, func, file, line);
|
||||
}
|
||||
|
||||
void cufftError(int code, const char* file, const int line, const char* func)
|
||||
{
|
||||
String msg = getErrorString(code, cufft_errors, cufft_error_num);
|
||||
cv::error(cv::Error::GpuApiCallError, msg, func, file, line);
|
||||
}
|
||||
|
||||
void cublasError(int code, const char* file, const int line, const char* func)
|
||||
{
|
||||
String msg = getErrorString(code, cublas_errors, cublas_error_num);
|
||||
cv::error(cv::Error::GpuApiCallError, msg, func, file, line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -43,64 +43,18 @@
|
||||
#ifndef __OPENCV_PRECOMP_H__
|
||||
#define __OPENCV_PRECOMP_H__
|
||||
|
||||
#if defined _MSC_VER && _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4251 4710 4711 4514 4996 )
|
||||
#endif
|
||||
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <exception>
|
||||
#include <iterator>
|
||||
#include <functional>
|
||||
#include <utility>
|
||||
#include <deque>
|
||||
#include <stdexcept>
|
||||
#include <memory>
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/gpu.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/gpuarithm.hpp"
|
||||
#include "opencv2/gpuwarping.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
#include "opencv2/core/private.hpp"
|
||||
#include "opencv2/core/gpu_private.hpp"
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
#ifdef HAVE_CUFFT
|
||||
#include <cufft.h>
|
||||
#endif
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#ifdef HAVE_CUBLAS
|
||||
#include <cublas.h>
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_NVCUVID
|
||||
#include <nvcuvid.h>
|
||||
|
||||
#ifdef WIN32
|
||||
#include <windows.h>
|
||||
#undef small
|
||||
#undef min
|
||||
#undef max
|
||||
#undef abs
|
||||
|
||||
#include <NVEncoderAPI.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "internal_shared.hpp"
|
||||
#include "opencv2/core/stream_accessor.hpp"
|
||||
|
||||
#include "nvidia/core/NCV.hpp"
|
||||
#include "nvidia/NPP_staging/NPP_staging.hpp"
|
||||
#include "nvidia/NCVHaarObjectDetection.hpp"
|
||||
#include "nvidia/NCVBroxOpticalFlow.hpp"
|
||||
#endif /* defined(HAVE_CUDA) */
|
||||
#ifdef HAVE_OPENCV_GPULEGACY
|
||||
# include "opencv2/gpulegacy/private.hpp"
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCV_PRECOMP_H__ */
|
||||
|
@ -1,168 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
namespace cv {
|
||||
// TODO: conflicts with calib3d.hpp : filterSpeckles, should be removed ?
|
||||
|
||||
//! Speckle filtering - filters small connected components on diparity image.
|
||||
//! It sets pixel (x,y) to newVal if it coresponds to small CC with size < maxSpeckleSize.
|
||||
//! Threshold for border between CC is diffThreshold;
|
||||
CV_EXPORTS void filterSpeckles(Mat& img, uchar newVal, int maxSpeckleSize, uchar diffThreshold, Mat& buf);
|
||||
}
|
||||
|
||||
typedef Point_<short> Point2s;
|
||||
|
||||
void cv::filterSpeckles( Mat& img, uchar newVal, int maxSpeckleSize, uchar maxDiff, Mat& _buf)
|
||||
{
|
||||
int MaxD = 1024;
|
||||
int WinSz = 64;
|
||||
|
||||
int bufSize0 = (MaxD + 2)*sizeof(int) + (img.rows+WinSz+2)*MaxD*sizeof(int) +
|
||||
(img.rows + WinSz + 2)*sizeof(int) +
|
||||
(img.rows+WinSz+2)*MaxD*(WinSz+1)*sizeof(uchar) + 256;
|
||||
int bufSize1 = (img.cols + 9 + 2) * sizeof(int) + 256;
|
||||
int bufSz = std::max(bufSize0 * 1, bufSize1 * 2);
|
||||
|
||||
_buf.create(1, bufSz, CV_8U);
|
||||
|
||||
CV_Assert( img.type() == CV_8U );
|
||||
|
||||
int width = img.cols, height = img.rows, npixels = width*height;
|
||||
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
|
||||
if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
|
||||
_buf.create(1, (int)bufSize, CV_8U);
|
||||
|
||||
uchar* buf = _buf.data;
|
||||
int i, j, dstep = (int)(img.step/sizeof(uchar));
|
||||
int* labels = (int*)buf;
|
||||
buf += npixels*sizeof(labels[0]);
|
||||
Point2s* wbuf = (Point2s*)buf;
|
||||
buf += npixels*sizeof(wbuf[0]);
|
||||
uchar* rtype = (uchar*)buf;
|
||||
int curlabel = 0;
|
||||
|
||||
// clear out label assignments
|
||||
memset(labels, 0, npixels*sizeof(labels[0]));
|
||||
|
||||
for( i = 0; i < height; i++ )
|
||||
{
|
||||
uchar* ds = img.ptr<uchar>(i);
|
||||
int* ls = labels + width*i;
|
||||
|
||||
for( j = 0; j < width; j++ )
|
||||
{
|
||||
if( ds[j] != newVal ) // not a bad disparity
|
||||
{
|
||||
if( ls[j] ) // has a label, check for bad label
|
||||
{
|
||||
if( rtype[ls[j]] ) // small region, zero out disparity
|
||||
ds[j] = (uchar)newVal;
|
||||
}
|
||||
// no label, assign and propagate
|
||||
else
|
||||
{
|
||||
Point2s* ws = wbuf; // initialize wavefront
|
||||
Point2s p((short)j, (short)i); // current pixel
|
||||
curlabel++; // next label
|
||||
int count = 0; // current region size
|
||||
ls[j] = curlabel;
|
||||
|
||||
// wavefront propagation
|
||||
while( ws >= wbuf ) // wavefront not empty
|
||||
{
|
||||
count++;
|
||||
// put neighbors onto wavefront
|
||||
uchar* dpp = &img.at<uchar>(p.y, p.x);
|
||||
uchar dp = *dpp;
|
||||
int* lpp = labels + width*p.y + p.x;
|
||||
|
||||
if( p.x < width-1 && !lpp[+1] && dpp[+1] != newVal && std::abs(dp - dpp[+1]) <= maxDiff )
|
||||
{
|
||||
lpp[+1] = curlabel;
|
||||
*ws++ = Point2s(p.x+1, p.y);
|
||||
}
|
||||
|
||||
if( p.x > 0 && !lpp[-1] && dpp[-1] != newVal && std::abs(dp - dpp[-1]) <= maxDiff )
|
||||
{
|
||||
lpp[-1] = curlabel;
|
||||
*ws++ = Point2s(p.x-1, p.y);
|
||||
}
|
||||
|
||||
if( p.y < height-1 && !lpp[+width] && dpp[+dstep] != newVal && std::abs(dp - dpp[+dstep]) <= maxDiff )
|
||||
{
|
||||
lpp[+width] = curlabel;
|
||||
*ws++ = Point2s(p.x, p.y+1);
|
||||
}
|
||||
|
||||
if( p.y > 0 && !lpp[-width] && dpp[-dstep] != newVal && std::abs(dp - dpp[-dstep]) <= maxDiff )
|
||||
{
|
||||
lpp[-width] = curlabel;
|
||||
*ws++ = Point2s(p.x, p.y-1);
|
||||
}
|
||||
|
||||
// pop most recent and propagate
|
||||
// NB: could try least recent, maybe better convergence
|
||||
p = *--ws;
|
||||
}
|
||||
|
||||
// assign label type
|
||||
if( count <= maxSpeckleSize ) // speckle region
|
||||
{
|
||||
//printf("count = %d\n", count);
|
||||
rtype[ls[j]] = 1; // small region label
|
||||
ds[j] = (uchar)newVal;
|
||||
}
|
||||
else
|
||||
{
|
||||
//printf("count = %d\n", count);
|
||||
rtype[ls[j]] = 0; // large region label
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,171 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
void cv::gpu::merge(const GpuMat* /*src*/, size_t /*count*/, GpuMat& /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
void cv::gpu::merge(const std::vector<GpuMat>& /*src*/, GpuMat& /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
void cv::gpu::split(const GpuMat& /*src*/, GpuMat* /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
void cv::gpu::split(const GpuMat& /*src*/, std::vector<GpuMat>& /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace split_merge
|
||||
{
|
||||
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst, int total_channels, size_t elem_size, const cudaStream_t& stream);
|
||||
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
namespace
|
||||
{
|
||||
void merge(const GpuMat* src, size_t n, GpuMat& dst, const cudaStream_t& stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::split_merge;
|
||||
|
||||
CV_Assert(src);
|
||||
CV_Assert(n > 0);
|
||||
|
||||
int depth = src[0].depth();
|
||||
Size size = src[0].size();
|
||||
|
||||
if (depth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
bool single_channel_only = true;
|
||||
int total_channels = 0;
|
||||
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
CV_Assert(src[i].size() == size);
|
||||
CV_Assert(src[i].depth() == depth);
|
||||
single_channel_only = single_channel_only && src[i].channels() == 1;
|
||||
total_channels += src[i].channels();
|
||||
}
|
||||
|
||||
CV_Assert(single_channel_only);
|
||||
CV_Assert(total_channels <= 4);
|
||||
|
||||
if (total_channels == 1)
|
||||
src[0].copyTo(dst);
|
||||
else
|
||||
{
|
||||
dst.create(size, CV_MAKETYPE(depth, total_channels));
|
||||
|
||||
PtrStepSzb src_as_devmem[4];
|
||||
for(size_t i = 0; i < n; ++i)
|
||||
src_as_devmem[i] = src[i];
|
||||
|
||||
PtrStepSzb dst_as_devmem(dst);
|
||||
merge_caller(src_as_devmem, dst_as_devmem, total_channels, CV_ELEM_SIZE(depth), stream);
|
||||
}
|
||||
}
|
||||
|
||||
void split(const GpuMat& src, GpuMat* dst, const cudaStream_t& stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::split_merge;
|
||||
|
||||
CV_Assert(dst);
|
||||
|
||||
int depth = src.depth();
|
||||
int num_channels = src.channels();
|
||||
|
||||
if (depth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
if (num_channels == 1)
|
||||
{
|
||||
src.copyTo(dst[0]);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
dst[i].create(src.size(), depth);
|
||||
|
||||
CV_Assert(num_channels <= 4);
|
||||
|
||||
PtrStepSzb dst_as_devmem[4];
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
dst_as_devmem[i] = dst[i];
|
||||
|
||||
PtrStepSzb src_as_devmem(src);
|
||||
split_caller(src_as_devmem, dst_as_devmem, num_channels, src.elemSize1(), stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
::merge(src, n, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
|
||||
void cv::gpu::merge(const std::vector<GpuMat>& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
::merge(&src[0], src.size(), dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::split(const GpuMat& src, GpuMat* dst, Stream& stream)
|
||||
{
|
||||
::split(src, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::split(const GpuMat& src, std::vector<GpuMat>& dst, Stream& stream)
|
||||
{
|
||||
dst.resize(src.channels());
|
||||
if(src.channels() > 0)
|
||||
::split(src, &dst[0], StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
@ -1,254 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "thread_wrappers.h"
|
||||
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_NVCUVID)
|
||||
|
||||
#ifdef WIN32
|
||||
#define NOMINMAX
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
class cv::gpu::detail::CriticalSection::Impl
|
||||
{
|
||||
public:
|
||||
Impl()
|
||||
{
|
||||
InitializeCriticalSection(&criticalSection_);
|
||||
}
|
||||
|
||||
~Impl()
|
||||
{
|
||||
DeleteCriticalSection(&criticalSection_);
|
||||
}
|
||||
|
||||
void enter()
|
||||
{
|
||||
EnterCriticalSection(&criticalSection_);
|
||||
}
|
||||
|
||||
void leave()
|
||||
{
|
||||
LeaveCriticalSection(&criticalSection_);
|
||||
}
|
||||
|
||||
private:
|
||||
CRITICAL_SECTION criticalSection_;
|
||||
};
|
||||
#else
|
||||
class cv::gpu::detail::CriticalSection::Impl
|
||||
{
|
||||
public:
|
||||
Impl()
|
||||
{
|
||||
pthread_mutexattr_t mutex_attribute;
|
||||
pthread_mutexattr_init(&mutex_attribute);
|
||||
pthread_mutexattr_settype(&mutex_attribute, PTHREAD_MUTEX_RECURSIVE);
|
||||
pthread_mutex_init(&mutex_, 0);
|
||||
pthread_mutexattr_destroy(&mutex_attribute);
|
||||
}
|
||||
|
||||
~Impl()
|
||||
{
|
||||
pthread_mutex_destroy(&mutex_);
|
||||
}
|
||||
|
||||
void enter()
|
||||
{
|
||||
pthread_mutex_lock(&mutex_);
|
||||
}
|
||||
|
||||
void leave()
|
||||
{
|
||||
pthread_mutex_unlock(&mutex_);
|
||||
}
|
||||
|
||||
private:
|
||||
pthread_mutex_t mutex_;
|
||||
};
|
||||
#endif
|
||||
|
||||
cv::gpu::detail::CriticalSection::CriticalSection() :
|
||||
impl_(new Impl)
|
||||
{
|
||||
}
|
||||
|
||||
cv::gpu::detail::CriticalSection::~CriticalSection()
|
||||
{
|
||||
}
|
||||
|
||||
void cv::gpu::detail::CriticalSection::enter()
|
||||
{
|
||||
impl_->enter();
|
||||
}
|
||||
|
||||
void cv::gpu::detail::CriticalSection::leave()
|
||||
{
|
||||
impl_->leave();
|
||||
}
|
||||
|
||||
#ifdef WIN32
|
||||
namespace
|
||||
{
|
||||
struct UserData
|
||||
{
|
||||
void (*func)(void* userData);
|
||||
void* param;
|
||||
};
|
||||
|
||||
DWORD WINAPI WinThreadFunction(LPVOID lpParam)
|
||||
{
|
||||
UserData* userData = static_cast<UserData*>(lpParam);
|
||||
|
||||
userData->func(userData->param);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
class cv::gpu::detail::Thread::Impl
|
||||
{
|
||||
public:
|
||||
Impl(void (*func)(void* userData), void* userData)
|
||||
{
|
||||
userData_.func = func;
|
||||
userData_.param = userData;
|
||||
|
||||
thread_ = CreateThread(
|
||||
NULL, // default security attributes
|
||||
0, // use default stack size
|
||||
WinThreadFunction, // thread function name
|
||||
&userData_, // argument to thread function
|
||||
0, // use default creation flags
|
||||
&threadId_); // returns the thread identifier
|
||||
}
|
||||
|
||||
~Impl()
|
||||
{
|
||||
CloseHandle(thread_);
|
||||
}
|
||||
|
||||
void wait()
|
||||
{
|
||||
WaitForSingleObject(thread_, INFINITE);
|
||||
}
|
||||
|
||||
private:
|
||||
UserData userData_;
|
||||
HANDLE thread_;
|
||||
DWORD threadId_;
|
||||
};
|
||||
#else
|
||||
namespace
|
||||
{
|
||||
struct UserData
|
||||
{
|
||||
void (*func)(void* userData);
|
||||
void* param;
|
||||
};
|
||||
|
||||
void* PThreadFunction(void* lpParam)
|
||||
{
|
||||
UserData* userData = static_cast<UserData*>(lpParam);
|
||||
|
||||
userData->func(userData->param);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
class cv::gpu::detail::Thread::Impl
|
||||
{
|
||||
public:
|
||||
Impl(void (*func)(void* userData), void* userData)
|
||||
{
|
||||
userData_.func = func;
|
||||
userData_.param = userData;
|
||||
|
||||
pthread_create(&thread_, NULL, PThreadFunction, &userData_);
|
||||
}
|
||||
|
||||
~Impl()
|
||||
{
|
||||
pthread_detach(thread_);
|
||||
}
|
||||
|
||||
void wait()
|
||||
{
|
||||
pthread_join(thread_, NULL);
|
||||
}
|
||||
|
||||
private:
|
||||
pthread_t thread_;
|
||||
UserData userData_;
|
||||
};
|
||||
#endif
|
||||
|
||||
cv::gpu::detail::Thread::Thread(void (*func)(void* userData), void* userData) :
|
||||
impl_(new Impl(func, userData))
|
||||
{
|
||||
}
|
||||
|
||||
cv::gpu::detail::Thread::~Thread()
|
||||
{
|
||||
}
|
||||
|
||||
void cv::gpu::detail::Thread::wait()
|
||||
{
|
||||
impl_->wait();
|
||||
}
|
||||
|
||||
void cv::gpu::detail::Thread::sleep(int ms)
|
||||
{
|
||||
#ifdef WIN32
|
||||
::Sleep(ms);
|
||||
#else
|
||||
::usleep(ms * 1000);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // HAVE_CUDA
|
@ -1,116 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __VIDEO_DECODER_H__
|
||||
#define __VIDEO_DECODER_H__
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_NVCUVID)
|
||||
|
||||
namespace cv { namespace gpu
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
class VideoDecoder
|
||||
{
|
||||
public:
|
||||
VideoDecoder(const VideoReader_GPU::FormatInfo& videoFormat, CUvideoctxlock lock) : lock_(lock), decoder_(0)
|
||||
{
|
||||
create(videoFormat);
|
||||
}
|
||||
|
||||
~VideoDecoder()
|
||||
{
|
||||
release();
|
||||
}
|
||||
|
||||
void create(const VideoReader_GPU::FormatInfo& videoFormat);
|
||||
void release();
|
||||
|
||||
// Get the code-type currently used.
|
||||
cudaVideoCodec codec() const { return createInfo_.CodecType; }
|
||||
unsigned long maxDecodeSurfaces() const { return createInfo_.ulNumDecodeSurfaces; }
|
||||
|
||||
unsigned long frameWidth() const { return createInfo_.ulWidth; }
|
||||
unsigned long frameHeight() const { return createInfo_.ulHeight; }
|
||||
|
||||
unsigned long targetWidth() const { return createInfo_.ulTargetWidth; }
|
||||
unsigned long targetHeight() const { return createInfo_.ulTargetHeight; }
|
||||
|
||||
cudaVideoChromaFormat chromaFormat() const { return createInfo_.ChromaFormat; }
|
||||
|
||||
bool decodePicture(CUVIDPICPARAMS* picParams)
|
||||
{
|
||||
return cuvidDecodePicture(decoder_, picParams) == CUDA_SUCCESS;
|
||||
}
|
||||
|
||||
cv::gpu::GpuMat mapFrame(int picIdx, CUVIDPROCPARAMS& videoProcParams)
|
||||
{
|
||||
CUdeviceptr ptr;
|
||||
unsigned int pitch;
|
||||
|
||||
cuSafeCall( cuvidMapVideoFrame(decoder_, picIdx, &ptr, &pitch, &videoProcParams) );
|
||||
|
||||
return GpuMat(targetHeight() * 3 / 2, targetWidth(), CV_8UC1, (void*) ptr, pitch);
|
||||
}
|
||||
|
||||
void unmapFrame(cv::gpu::GpuMat& frame)
|
||||
{
|
||||
cuSafeCall( cuvidUnmapVideoFrame(decoder_, (CUdeviceptr) frame.data) );
|
||||
frame.release();
|
||||
}
|
||||
|
||||
private:
|
||||
VideoDecoder(const VideoDecoder&);
|
||||
VideoDecoder& operator =(const VideoDecoder&);
|
||||
|
||||
CUvideoctxlock lock_;
|
||||
CUVIDDECODECREATEINFO createInfo_;
|
||||
CUvideodecoder decoder_;
|
||||
};
|
||||
}
|
||||
}}
|
||||
|
||||
#endif // HAVE_CUDA
|
||||
|
||||
#endif // __VIDEO_DECODER_H__
|
@ -1,100 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __VIDEO_PARSER_H__
|
||||
#define __VIDEO_PARSER_H__
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include "frame_queue.h"
|
||||
#include "video_decoder.h"
|
||||
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_NVCUVID)
|
||||
|
||||
namespace cv { namespace gpu
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
class VideoParser
|
||||
{
|
||||
public:
|
||||
VideoParser(VideoDecoder* videoDecoder, FrameQueue* frameQueue);
|
||||
|
||||
~VideoParser()
|
||||
{
|
||||
cuvidDestroyVideoParser(parser_);
|
||||
}
|
||||
|
||||
bool parseVideoData(const unsigned char* data, size_t size, bool endOfStream);
|
||||
|
||||
bool hasError() const { return hasError_; }
|
||||
|
||||
private:
|
||||
VideoParser(const VideoParser&);
|
||||
VideoParser& operator =(const VideoParser&);
|
||||
|
||||
VideoDecoder* videoDecoder_;
|
||||
FrameQueue* frameQueue_;
|
||||
CUvideoparser parser_;
|
||||
int unparsedPackets_;
|
||||
volatile bool hasError_;
|
||||
|
||||
// Called when the decoder encounters a video format change (or initial sequence header)
|
||||
// This particular implementation of the callback returns 0 in case the video format changes
|
||||
// to something different than the original format. Returning 0 causes a stop of the app.
|
||||
static int CUDAAPI HandleVideoSequence(void* pUserData, CUVIDEOFORMAT* pFormat);
|
||||
|
||||
// Called by the video parser to decode a single picture
|
||||
// Since the parser will deliver data as fast as it can, we need to make sure that the picture
|
||||
// index we're attempting to use for decode is no longer used for display
|
||||
static int CUDAAPI HandlePictureDecode(void* pUserData, CUVIDPICPARAMS* pPicParams);
|
||||
|
||||
// Called by the video parser to display a video frame (in the case of field pictures, there may be
|
||||
// 2 decode calls per 1 display call, since two fields make up one frame)
|
||||
static int CUDAAPI HandlePictureDisplay(void* pUserData, CUVIDPARSERDISPINFO* pPicParams);
|
||||
};
|
||||
}
|
||||
}}
|
||||
|
||||
#endif // HAVE_CUDA
|
||||
|
||||
#endif // __VIDEO_PARSER_H__
|
@ -46,123 +46,6 @@
|
||||
|
||||
using namespace cvtest;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// StereoBM
|
||||
|
||||
struct StereoBM : testing::TestWithParam<cv::gpu::DeviceInfo>
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GetParam();
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(StereoBM, Regression)
|
||||
{
|
||||
cv::Mat left_image = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
|
||||
cv::Mat right_image = readImage("stereobm/aloe-R.png", cv::IMREAD_GRAYSCALE);
|
||||
cv::Mat disp_gold = readImage("stereobm/aloe-disp.png", cv::IMREAD_GRAYSCALE);
|
||||
|
||||
ASSERT_FALSE(left_image.empty());
|
||||
ASSERT_FALSE(right_image.empty());
|
||||
ASSERT_FALSE(disp_gold.empty());
|
||||
|
||||
cv::gpu::StereoBM_GPU bm(0, 128, 19);
|
||||
cv::gpu::GpuMat disp;
|
||||
|
||||
bm(loadMat(left_image), loadMat(right_image), disp);
|
||||
|
||||
EXPECT_MAT_NEAR(disp_gold, disp, 0.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, StereoBM, ALL_DEVICES);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// StereoBeliefPropagation
|
||||
|
||||
struct StereoBeliefPropagation : testing::TestWithParam<cv::gpu::DeviceInfo>
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GetParam();
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(StereoBeliefPropagation, Regression)
|
||||
{
|
||||
cv::Mat left_image = readImage("stereobp/aloe-L.png");
|
||||
cv::Mat right_image = readImage("stereobp/aloe-R.png");
|
||||
cv::Mat disp_gold = readImage("stereobp/aloe-disp.png", cv::IMREAD_GRAYSCALE);
|
||||
|
||||
ASSERT_FALSE(left_image.empty());
|
||||
ASSERT_FALSE(right_image.empty());
|
||||
ASSERT_FALSE(disp_gold.empty());
|
||||
|
||||
cv::gpu::StereoBeliefPropagation bp(64, 8, 2, 25, 0.1f, 15, 1, CV_16S);
|
||||
cv::gpu::GpuMat disp;
|
||||
|
||||
bp(loadMat(left_image), loadMat(right_image), disp);
|
||||
|
||||
cv::Mat h_disp(disp);
|
||||
h_disp.convertTo(h_disp, disp_gold.depth());
|
||||
|
||||
EXPECT_MAT_NEAR(disp_gold, h_disp, 0.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, StereoBeliefPropagation, ALL_DEVICES);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// StereoConstantSpaceBP
|
||||
|
||||
struct StereoConstantSpaceBP : testing::TestWithParam<cv::gpu::DeviceInfo>
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GetParam();
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(StereoConstantSpaceBP, Regression)
|
||||
{
|
||||
cv::Mat left_image = readImage("csstereobp/aloe-L.png");
|
||||
cv::Mat right_image = readImage("csstereobp/aloe-R.png");
|
||||
|
||||
cv::Mat disp_gold;
|
||||
|
||||
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
|
||||
disp_gold = readImage("csstereobp/aloe-disp.png", cv::IMREAD_GRAYSCALE);
|
||||
else
|
||||
disp_gold = readImage("csstereobp/aloe-disp_CC1X.png", cv::IMREAD_GRAYSCALE);
|
||||
|
||||
ASSERT_FALSE(left_image.empty());
|
||||
ASSERT_FALSE(right_image.empty());
|
||||
ASSERT_FALSE(disp_gold.empty());
|
||||
|
||||
cv::gpu::StereoConstantSpaceBP csbp(128, 16, 4, 4);
|
||||
cv::gpu::GpuMat disp;
|
||||
|
||||
csbp(loadMat(left_image), loadMat(right_image), disp);
|
||||
|
||||
cv::Mat h_disp(disp);
|
||||
h_disp.convertTo(h_disp, disp_gold.depth());
|
||||
|
||||
EXPECT_MAT_NEAR(disp_gold, h_disp, 1.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, StereoConstantSpaceBP, ALL_DEVICES);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// transformPoints
|
||||
|
||||
@ -304,45 +187,4 @@ GPU_TEST_P(SolvePnPRansac, Accuracy)
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, SolvePnPRansac, ALL_DEVICES);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// reprojectImageTo3D
|
||||
|
||||
PARAM_TEST_CASE(ReprojectImageTo3D, cv::gpu::DeviceInfo, cv::Size, MatDepth, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int depth;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
depth = GET_PARAM(2);
|
||||
useRoi = GET_PARAM(3);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(ReprojectImageTo3D, Accuracy)
|
||||
{
|
||||
cv::Mat disp = randomMat(size, depth, 5.0, 30.0);
|
||||
cv::Mat Q = randomMat(cv::Size(4, 4), CV_32FC1, 0.1, 1.0);
|
||||
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::reprojectImageTo3D(loadMat(disp, useRoi), dst, Q, 3);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::reprojectImageTo3D(disp, dst_gold, Q, false);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 1e-5);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Calib3D, ReprojectImageTo3D, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatDepth(CV_8U), MatDepth(CV_16S)),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
#endif // HAVE_CUDA
|
||||
|
File diff suppressed because it is too large
Load Diff
45
modules/gpu/test/test_main.cpp
Normal file
45
modules/gpu/test/test_main.cpp
Normal file
@ -0,0 +1,45 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
CV_GPU_TEST_MAIN("gpu")
|
@ -51,32 +51,15 @@
|
||||
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||
|
||||
#include <cmath>
|
||||
#include <ctime>
|
||||
#include <cstdio>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <limits>
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/opengl.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/video.hpp"
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/ts/gpu_test.hpp"
|
||||
|
||||
#include "opencv2/gpu.hpp"
|
||||
|
||||
#include "interpolation.hpp"
|
||||
#include "main_test_nvidia.h"
|
||||
|
||||
#include "opencv2/core/gpu_private.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/opengl.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
#endif
|
||||
|
29
modules/gpuarithm/CMakeLists.txt
Normal file
29
modules/gpuarithm/CMakeLists.txt
Normal file
@ -0,0 +1,29 @@
|
||||
if(ANDROID OR IOS)
|
||||
ocv_module_disable(gpuarithm)
|
||||
endif()
|
||||
|
||||
set(the_description "GPU-accelerated Operations on Matrices")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations)
|
||||
|
||||
ocv_add_module(gpuarithm opencv_core OPTIONAL opencv_gpulegacy)
|
||||
|
||||
ocv_module_include_directories()
|
||||
ocv_glob_module_sources()
|
||||
|
||||
set(extra_libs "")
|
||||
|
||||
if(HAVE_CUBLAS)
|
||||
list(APPEND extra_libs ${CUDA_cublas_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(HAVE_CUFFT)
|
||||
list(APPEND extra_libs ${CUDA_cufft_LIBRARY})
|
||||
endif()
|
||||
|
||||
ocv_create_module(${extra_libs})
|
||||
|
||||
ocv_add_precompiled_headers(${the_module})
|
||||
|
||||
ocv_add_accuracy_tests(DEPENDS_ON opencv_imgproc)
|
||||
ocv_add_perf_tests(DEPENDS_ON opencv_imgproc)
|
211
modules/gpuarithm/doc/arithm.rst
Normal file
211
modules/gpuarithm/doc/arithm.rst
Normal file
@ -0,0 +1,211 @@
|
||||
Arithm Operations on Matrices
|
||||
=============================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
|
||||
|
||||
gpu::gemm
|
||||
------------------
|
||||
Performs generalized matrix multiplication.
|
||||
|
||||
.. ocv:function:: void gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null())
|
||||
|
||||
:param src1: First multiplied input matrix that should have ``CV_32FC1`` , ``CV_64FC1`` , ``CV_32FC2`` , or ``CV_64FC2`` type.
|
||||
|
||||
:param src2: Second multiplied input matrix of the same type as ``src1`` .
|
||||
|
||||
:param alpha: Weight of the matrix product.
|
||||
|
||||
:param src3: Third optional delta matrix added to the matrix product. It should have the same type as ``src1`` and ``src2`` .
|
||||
|
||||
:param beta: Weight of ``src3`` .
|
||||
|
||||
:param dst: Destination matrix. It has the proper size and the same type as input matrices.
|
||||
|
||||
:param flags: Operation flags:
|
||||
|
||||
* **GEMM_1_T** transpose ``src1``
|
||||
* **GEMM_2_T** transpose ``src2``
|
||||
* **GEMM_3_T** transpose ``src3``
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
The function performs generalized matrix multiplication similar to the ``gemm`` functions in BLAS level 3. For example, ``gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)`` corresponds to
|
||||
|
||||
.. math::
|
||||
|
||||
\texttt{dst} = \texttt{alpha} \cdot \texttt{src1} ^T \cdot \texttt{src2} + \texttt{beta} \cdot \texttt{src3} ^T
|
||||
|
||||
.. note:: Transposition operation doesn't support ``CV_64FC2`` input type.
|
||||
|
||||
.. seealso:: :ocv:func:`gemm`
|
||||
|
||||
|
||||
|
||||
gpu::mulSpectrums
|
||||
---------------------
|
||||
Performs a per-element multiplication of two Fourier spectrums.
|
||||
|
||||
.. ocv:function:: void gpu::mulSpectrums( const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false, Stream& stream=Stream::Null() )
|
||||
|
||||
:param a: First spectrum.
|
||||
|
||||
:param b: Second spectrum with the same size and type as ``a`` .
|
||||
|
||||
:param c: Destination spectrum.
|
||||
|
||||
:param flags: Mock parameter used for CPU/GPU interfaces similarity.
|
||||
|
||||
:param conjB: Optional flag to specify if the second spectrum needs to be conjugated before the multiplication.
|
||||
|
||||
Only full (not packed) ``CV_32FC2`` complex spectrums in the interleaved format are supported for now.
|
||||
|
||||
.. seealso:: :ocv:func:`mulSpectrums`
|
||||
|
||||
|
||||
|
||||
gpu::mulAndScaleSpectrums
|
||||
-----------------------------
|
||||
Performs a per-element multiplication of two Fourier spectrums and scales the result.
|
||||
|
||||
.. ocv:function:: void gpu::mulAndScaleSpectrums( const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false, Stream& stream=Stream::Null() )
|
||||
|
||||
:param a: First spectrum.
|
||||
|
||||
:param b: Second spectrum with the same size and type as ``a`` .
|
||||
|
||||
:param c: Destination spectrum.
|
||||
|
||||
:param flags: Mock parameter used for CPU/GPU interfaces similarity.
|
||||
|
||||
:param scale: Scale constant.
|
||||
|
||||
:param conjB: Optional flag to specify if the second spectrum needs to be conjugated before the multiplication.
|
||||
|
||||
Only full (not packed) ``CV_32FC2`` complex spectrums in the interleaved format are supported for now.
|
||||
|
||||
.. seealso:: :ocv:func:`mulSpectrums`
|
||||
|
||||
|
||||
|
||||
gpu::dft
|
||||
------------
|
||||
Performs a forward or inverse discrete Fourier transform (1D or 2D) of the floating point matrix.
|
||||
|
||||
.. ocv:function:: void gpu::dft( const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0, Stream& stream=Stream::Null() )
|
||||
|
||||
:param src: Source matrix (real or complex).
|
||||
|
||||
:param dst: Destination matrix (real or complex).
|
||||
|
||||
:param dft_size: Size of a discrete Fourier transform.
|
||||
|
||||
:param flags: Optional flags:
|
||||
|
||||
* **DFT_ROWS** transforms each individual row of the source matrix.
|
||||
|
||||
* **DFT_SCALE** scales the result: divide it by the number of elements in the transform (obtained from ``dft_size`` ).
|
||||
|
||||
* **DFT_INVERSE** inverts DFT. Use for complex-complex cases (real-complex and complex-real cases are always forward and inverse, respectively).
|
||||
|
||||
* **DFT_REAL_OUTPUT** specifies the output as real. The source matrix is the result of real-complex transform, so the destination matrix must be real.
|
||||
|
||||
Use to handle real matrices ( ``CV32FC1`` ) and complex matrices in the interleaved format ( ``CV32FC2`` ).
|
||||
|
||||
The source matrix should be continuous, otherwise reallocation and data copying is performed. The function chooses an operation mode depending on the flags, size, and channel count of the source matrix:
|
||||
|
||||
* If the source matrix is complex and the output is not specified as real, the destination matrix is complex and has the ``dft_size`` size and ``CV_32FC2`` type. The destination matrix contains a full result of the DFT (forward or inverse).
|
||||
|
||||
* If the source matrix is complex and the output is specified as real, the function assumes that its input is the result of the forward transform (see the next item). The destination matrix has the ``dft_size`` size and ``CV_32FC1`` type. It contains the result of the inverse DFT.
|
||||
|
||||
* If the source matrix is real (its type is ``CV_32FC1`` ), forward DFT is performed. The result of the DFT is packed into complex ( ``CV_32FC2`` ) matrix. So, the width of the destination matrix is ``dft_size.width / 2 + 1`` . But if the source is a single column, the height is reduced instead of the width.
|
||||
|
||||
.. seealso:: :ocv:func:`dft`
|
||||
|
||||
|
||||
|
||||
gpu::ConvolveBuf
|
||||
----------------
|
||||
.. ocv:struct:: gpu::ConvolveBuf
|
||||
|
||||
Class providing a memory buffer for :ocv:func:`gpu::convolve` function, plus it allows to adjust some specific parameters. ::
|
||||
|
||||
struct CV_EXPORTS ConvolveBuf
|
||||
{
|
||||
Size result_size;
|
||||
Size block_size;
|
||||
Size user_block_size;
|
||||
Size dft_size;
|
||||
int spect_len;
|
||||
|
||||
GpuMat image_spect, templ_spect, result_spect;
|
||||
GpuMat image_block, templ_block, result_data;
|
||||
|
||||
void create(Size image_size, Size templ_size);
|
||||
static Size estimateBlockSize(Size result_size, Size templ_size);
|
||||
};
|
||||
|
||||
You can use field `user_block_size` to set specific block size for :ocv:func:`gpu::convolve` function. If you leave its default value `Size(0,0)` then automatic estimation of block size will be used (which is optimized for speed). By varying `user_block_size` you can reduce memory requirements at the cost of speed.
|
||||
|
||||
|
||||
|
||||
gpu::ConvolveBuf::create
|
||||
------------------------
|
||||
.. ocv:function:: gpu::ConvolveBuf::create(Size image_size, Size templ_size)
|
||||
|
||||
Constructs a buffer for :ocv:func:`gpu::convolve` function with respective arguments.
|
||||
|
||||
|
||||
|
||||
gpu::convolve
|
||||
-----------------
|
||||
Computes a convolution (or cross-correlation) of two images.
|
||||
|
||||
.. ocv:function:: void gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr=false)
|
||||
|
||||
.. ocv:function:: void gpu::convolve( const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream=Stream::Null() )
|
||||
|
||||
:param image: Source image. Only ``CV_32FC1`` images are supported for now.
|
||||
|
||||
:param templ: Template image. The size is not greater than the ``image`` size. The type is the same as ``image`` .
|
||||
|
||||
:param result: Result image. If ``image`` is *W x H* and ``templ`` is *w x h*, then ``result`` must be *W-w+1 x H-h+1*.
|
||||
|
||||
:param ccorr: Flags to evaluate cross-correlation instead of convolution.
|
||||
|
||||
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`gpu::ConvolveBuf`.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`gpu::filter2D`
|
||||
|
||||
|
||||
|
||||
gpu::integral
|
||||
-----------------
|
||||
Computes an integral image.
|
||||
|
||||
.. ocv:function:: void gpu::integral(const GpuMat& src, GpuMat& sum, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source image. Only ``CV_8UC1`` images are supported for now.
|
||||
|
||||
:param sum: Integral image containing 32-bit unsigned integer values packed into ``CV_32SC1`` .
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`integral`
|
||||
|
||||
|
||||
|
||||
gpu::sqrIntegral
|
||||
--------------------
|
||||
Computes a squared integral image.
|
||||
|
||||
.. ocv:function:: void gpu::sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source image. Only ``CV_8UC1`` images are supported for now.
|
||||
|
||||
:param sqsum: Squared integral image containing 64-bit unsigned integer values packed into ``CV_64FC1`` .
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
128
modules/gpuarithm/doc/core.rst
Normal file
128
modules/gpuarithm/doc/core.rst
Normal file
@ -0,0 +1,128 @@
|
||||
Core Operations on Matrices
|
||||
===========================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
|
||||
|
||||
gpu::merge
|
||||
--------------
|
||||
Makes a multi-channel matrix out of several single-channel matrices.
|
||||
|
||||
.. ocv:function:: void gpu::merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
.. ocv:function:: void gpu::merge(const vector<GpuMat>& src, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Array/vector of source matrices.
|
||||
|
||||
:param n: Number of source matrices.
|
||||
|
||||
:param dst: Destination matrix.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`merge`
|
||||
|
||||
|
||||
|
||||
gpu::split
|
||||
--------------
|
||||
Copies each plane of a multi-channel matrix into an array.
|
||||
|
||||
.. ocv:function:: void gpu::split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null())
|
||||
|
||||
.. ocv:function:: void gpu::split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source matrix.
|
||||
|
||||
:param dst: Destination array/vector of single-channel matrices.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`split`
|
||||
|
||||
|
||||
|
||||
gpu::copyMakeBorder
|
||||
-----------------------
|
||||
Forms a border around an image.
|
||||
|
||||
.. ocv:function:: void gpu::copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value = Scalar(), Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source image. ``CV_8UC1`` , ``CV_8UC4`` , ``CV_32SC1`` , and ``CV_32FC1`` types are supported.
|
||||
|
||||
:param dst: Destination image with the same type as ``src``. The size is ``Size(src.cols+left+right, src.rows+top+bottom)`` .
|
||||
|
||||
:param top:
|
||||
|
||||
:param bottom:
|
||||
|
||||
:param left:
|
||||
|
||||
:param right: Number of pixels in each direction from the source image rectangle to extrapolate. For example: ``top=1, bottom=1, left=1, right=1`` mean that 1 pixel-wide border needs to be built.
|
||||
|
||||
:param borderType: Border type. See :ocv:func:`borderInterpolate` for details. ``BORDER_REFLECT101`` , ``BORDER_REPLICATE`` , ``BORDER_CONSTANT`` , ``BORDER_REFLECT`` and ``BORDER_WRAP`` are supported for now.
|
||||
|
||||
:param value: Border value.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`copyMakeBorder`
|
||||
|
||||
|
||||
|
||||
gpu::transpose
|
||||
------------------
|
||||
Transposes a matrix.
|
||||
|
||||
.. ocv:function:: void gpu::transpose( const GpuMat& src1, GpuMat& dst, Stream& stream=Stream::Null() )
|
||||
|
||||
:param src1: Source matrix. 1-, 4-, 8-byte element sizes are supported for now (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc).
|
||||
|
||||
:param dst: Destination matrix.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`transpose`
|
||||
|
||||
|
||||
|
||||
gpu::flip
|
||||
-------------
|
||||
Flips a 2D matrix around vertical, horizontal, or both axes.
|
||||
|
||||
.. ocv:function:: void gpu::flip( const GpuMat& a, GpuMat& b, int flipCode, Stream& stream=Stream::Null() )
|
||||
|
||||
:param a: Source matrix. Supports 1, 3 and 4 channels images with ``CV_8U``, ``CV_16U``, ``CV_32S`` or ``CV_32F`` depth.
|
||||
|
||||
:param b: Destination matrix.
|
||||
|
||||
:param flipCode: Flip mode for the source:
|
||||
|
||||
* ``0`` Flips around x-axis.
|
||||
|
||||
* ``>0`` Flips around y-axis.
|
||||
|
||||
* ``<0`` Flips around both axes.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`flip`
|
||||
|
||||
|
||||
|
||||
gpu::LUT
|
||||
------------
|
||||
Transforms the source matrix into the destination matrix using the given look-up table: ``dst(I) = lut(src(I))``
|
||||
|
||||
.. ocv:function:: void gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source matrix. ``CV_8UC1`` and ``CV_8UC3`` matrices are supported for now.
|
||||
|
||||
:param lut: Look-up table of 256 elements. It is a continuous ``CV_8U`` matrix.
|
||||
|
||||
:param dst: Destination matrix with the same depth as ``lut`` and the same number of channels as ``src`` .
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`LUT`
|
@ -1,5 +1,5 @@
|
||||
Per-element Operations
|
||||
=======================
|
||||
======================
|
||||
|
||||
.. highlight:: cpp
|
||||
|
||||
@ -112,6 +112,7 @@ This function, in contrast to :ocv:func:`divide`, uses a round-down rounding mod
|
||||
.. seealso:: :ocv:func:`divide`
|
||||
|
||||
|
||||
|
||||
gpu::addWeighted
|
||||
----------------
|
||||
Computes the weighted sum of two arrays.
|
||||
@ -443,3 +444,131 @@ Computes the per-element maximum of two matrices (or a matrix and a scalar).
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`max`
|
||||
|
||||
|
||||
|
||||
gpu::threshold
|
||||
------------------
|
||||
Applies a fixed-level threshold to each array element.
|
||||
|
||||
.. ocv:function:: double gpu::threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source array (single-channel).
|
||||
|
||||
:param dst: Destination array with the same size and type as ``src`` .
|
||||
|
||||
:param thresh: Threshold value.
|
||||
|
||||
:param maxval: Maximum value to use with ``THRESH_BINARY`` and ``THRESH_BINARY_INV`` threshold types.
|
||||
|
||||
:param type: Threshold type. For details, see :ocv:func:`threshold` . The ``THRESH_OTSU`` threshold type is not supported.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`threshold`
|
||||
|
||||
|
||||
|
||||
gpu::magnitude
|
||||
------------------
|
||||
Computes magnitudes of complex matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::magnitude( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
|
||||
|
||||
.. ocv:function:: void gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
|
||||
|
||||
:param xy: Source complex matrix in the interleaved format ( ``CV_32FC2`` ).
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param magnitude: Destination matrix of float magnitudes ( ``CV_32FC1`` ).
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`magnitude`
|
||||
|
||||
|
||||
|
||||
gpu::magnitudeSqr
|
||||
---------------------
|
||||
Computes squared magnitudes of complex matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::magnitudeSqr( const GpuMat& xy, GpuMat& magnitude, Stream& stream=Stream::Null() )
|
||||
|
||||
.. ocv:function:: void gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null())
|
||||
|
||||
:param xy: Source complex matrix in the interleaved format ( ``CV_32FC2`` ).
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param magnitude: Destination matrix of float magnitude squares ( ``CV_32FC1`` ).
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
|
||||
|
||||
gpu::phase
|
||||
--------------
|
||||
Computes polar angles of complex matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees=false, Stream& stream = Stream::Null())
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param angle: Destination matrix of angles ( ``CV_32FC1`` ).
|
||||
|
||||
:param angleInDegrees: Flag for angles that must be evaluated in degrees.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`phase`
|
||||
|
||||
|
||||
|
||||
gpu::cartToPolar
|
||||
--------------------
|
||||
Converts Cartesian coordinates into polar.
|
||||
|
||||
.. ocv:function:: void gpu::cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees=false, Stream& stream = Stream::Null())
|
||||
|
||||
:param x: Source matrix containing real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Source matrix containing imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param magnitude: Destination matrix of float magnitudes ( ``CV_32FC1`` ).
|
||||
|
||||
:param angle: Destination matrix of angles ( ``CV_32FC1`` ).
|
||||
|
||||
:param angleInDegrees: Flag for angles that must be evaluated in degrees.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`cartToPolar`
|
||||
|
||||
|
||||
|
||||
gpu::polarToCart
|
||||
--------------------
|
||||
Converts polar coordinates into Cartesian.
|
||||
|
||||
.. ocv:function:: void gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees=false, Stream& stream = Stream::Null())
|
||||
|
||||
:param magnitude: Source matrix containing magnitudes ( ``CV_32FC1`` ).
|
||||
|
||||
:param angle: Source matrix containing angles ( ``CV_32FC1`` ).
|
||||
|
||||
:param x: Destination matrix of real components ( ``CV_32FC1`` ).
|
||||
|
||||
:param y: Destination matrix of imaginary components ( ``CV_32FC1`` ).
|
||||
|
||||
:param angleInDegrees: Flag that indicates angles in degrees.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`polarToCart`
|
11
modules/gpuarithm/doc/gpuarithm.rst
Normal file
11
modules/gpuarithm/doc/gpuarithm.rst
Normal file
@ -0,0 +1,11 @@
|
||||
*************************************************
|
||||
gpuarithm. GPU-accelerated Operations on Matrices
|
||||
*************************************************
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
core
|
||||
element_operations
|
||||
reductions
|
||||
arithm
|
@ -5,25 +5,6 @@ Matrix Reductions
|
||||
|
||||
|
||||
|
||||
gpu::meanStdDev
|
||||
-------------------
|
||||
Computes a mean value and a standard deviation of matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev)
|
||||
.. ocv:function:: void gpu::meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev, GpuMat& buf)
|
||||
|
||||
:param mtx: Source matrix. ``CV_8UC1`` matrices are supported for now.
|
||||
|
||||
:param mean: Mean value.
|
||||
|
||||
:param stddev: Standard deviation value.
|
||||
|
||||
:param buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
|
||||
|
||||
.. seealso:: :ocv:func:`meanStdDev`
|
||||
|
||||
|
||||
|
||||
gpu::norm
|
||||
-------------
|
||||
Returns the norm of a matrix (or difference of two matrices).
|
||||
@ -205,3 +186,70 @@ Reduces a matrix to a vector.
|
||||
The function ``reduce`` reduces the matrix to a vector by treating the matrix rows/columns as a set of 1D vectors and performing the specified operation on the vectors until a single row/column is obtained. For example, the function can be used to compute horizontal and vertical projections of a raster image. In case of ``CV_REDUCE_SUM`` and ``CV_REDUCE_AVG`` , the output may have a larger element bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction modes.
|
||||
|
||||
.. seealso:: :ocv:func:`reduce`
|
||||
|
||||
|
||||
|
||||
gpu::normalize
|
||||
--------------
|
||||
Normalizes the norm or value range of an array.
|
||||
|
||||
.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat())
|
||||
|
||||
.. ocv:function:: void gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
|
||||
|
||||
:param src: input array.
|
||||
|
||||
:param dst: output array of the same size as ``src`` .
|
||||
|
||||
:param alpha: norm value to normalize to or the lower range boundary in case of the range normalization.
|
||||
|
||||
:param beta: upper range boundary in case of the range normalization; it is not used for the norm normalization.
|
||||
|
||||
:param normType: normalization type (see the details below).
|
||||
|
||||
:param dtype: when negative, the output array has the same type as ``src``; otherwise, it has the same number of channels as ``src`` and the depth ``=CV_MAT_DEPTH(dtype)``.
|
||||
|
||||
:param mask: optional operation mask.
|
||||
|
||||
:param norm_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
|
||||
|
||||
:param cvt_buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
|
||||
|
||||
.. seealso:: :ocv:func:`normalize`
|
||||
|
||||
|
||||
|
||||
gpu::meanStdDev
|
||||
-------------------
|
||||
Computes a mean value and a standard deviation of matrix elements.
|
||||
|
||||
.. ocv:function:: void gpu::meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev)
|
||||
.. ocv:function:: void gpu::meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev, GpuMat& buf)
|
||||
|
||||
:param mtx: Source matrix. ``CV_8UC1`` matrices are supported for now.
|
||||
|
||||
:param mean: Mean value.
|
||||
|
||||
:param stddev: Standard deviation value.
|
||||
|
||||
:param buf: Optional buffer to avoid extra memory allocations. It is resized automatically.
|
||||
|
||||
.. seealso:: :ocv:func:`meanStdDev`
|
||||
|
||||
|
||||
|
||||
gpu::rectStdDev
|
||||
-------------------
|
||||
Computes a standard deviation of integral images.
|
||||
|
||||
.. ocv:function:: void gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source image. Only the ``CV_32SC1`` type is supported.
|
||||
|
||||
:param sqr: Squared source image. Only the ``CV_32FC1`` type is supported.
|
||||
|
||||
:param dst: Destination image with the same type and size as ``src`` .
|
||||
|
||||
:param rect: Rectangular window.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
346
modules/gpuarithm/include/opencv2/gpuarithm.hpp
Normal file
346
modules/gpuarithm/include/opencv2/gpuarithm.hpp
Normal file
@ -0,0 +1,346 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_GPUARITHM_HPP__
|
||||
#define __OPENCV_GPUARITHM_HPP__
|
||||
|
||||
#ifndef __cplusplus
|
||||
# error gpuarithm.hpp header must be compiled as C++
|
||||
#endif
|
||||
|
||||
#include "opencv2/core/gpumat.hpp"
|
||||
|
||||
namespace cv { namespace gpu {
|
||||
|
||||
//! adds one matrix to another (c = a + b)
|
||||
CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
|
||||
//! adds scalar to a matrix (c = a + s)
|
||||
CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
|
||||
|
||||
//! subtracts one matrix from another (c = a - b)
|
||||
CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
|
||||
//! subtracts scalar from a matrix (c = a - s)
|
||||
CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes element-wise weighted product of the two arrays (c = scale * a * b)
|
||||
CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
|
||||
//! weighted multiplies matrix to a scalar (c = scale * a * s)
|
||||
CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes element-wise weighted quotient of the two arrays (c = a / b)
|
||||
CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
|
||||
//! computes element-wise weighted quotient of matrix and scalar (c = a / s)
|
||||
CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
|
||||
//! computes element-wise weighted reciprocal of an array (dst = scale/src2)
|
||||
CV_EXPORTS void divide(double scale, const GpuMat& b, GpuMat& c, int dtype = -1, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
|
||||
CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst,
|
||||
int dtype = -1, Stream& stream = Stream::Null());
|
||||
|
||||
//! adds scaled array to another one (dst = alpha*src1 + src2)
|
||||
static inline void scaleAdd(const GpuMat& src1, double alpha, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
{
|
||||
addWeighted(src1, alpha, src2, 1.0, 0.0, dst, -1, stream);
|
||||
}
|
||||
|
||||
//! computes element-wise absolute difference of two arrays (c = abs(a - b))
|
||||
CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
|
||||
//! computes element-wise absolute difference of array and scalar (c = abs(a - s))
|
||||
CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes absolute value of each matrix element
|
||||
//! supports CV_16S and CV_32F depth
|
||||
CV_EXPORTS void abs(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes square of each pixel in an image
|
||||
//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
|
||||
CV_EXPORTS void sqr(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes square root of each pixel in an image
|
||||
//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
|
||||
CV_EXPORTS void sqrt(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes exponent of each matrix element (b = e**a)
|
||||
//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
|
||||
CV_EXPORTS void exp(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes natural logarithm of absolute value of each matrix element: b = log(abs(a))
|
||||
//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
|
||||
CV_EXPORTS void log(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes power of each matrix element:
|
||||
// (dst(i,j) = pow( src(i,j) , power), if src.type() is integer
|
||||
// (dst(i,j) = pow(fabs(src(i,j)), power), otherwise
|
||||
//! supports all, except depth == CV_64F
|
||||
CV_EXPORTS void pow(const GpuMat& src, double power, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! compares elements of two arrays (c = a <cmpop> b)
|
||||
CV_EXPORTS void compare(const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS void compare(const GpuMat& a, Scalar sc, GpuMat& c, int cmpop, Stream& stream = Stream::Null());
|
||||
|
||||
//! performs per-elements bit-wise inversion
|
||||
CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
|
||||
|
||||
//! calculates per-element bit-wise disjunction of two arrays
|
||||
CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
|
||||
//! calculates per-element bit-wise disjunction of array and scalar
|
||||
//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
|
||||
CV_EXPORTS void bitwise_or(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! calculates per-element bit-wise conjunction of two arrays
|
||||
CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
|
||||
//! calculates per-element bit-wise conjunction of array and scalar
|
||||
//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
|
||||
CV_EXPORTS void bitwise_and(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! calculates per-element bit-wise "exclusive or" operation
|
||||
CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
|
||||
//! calculates per-element bit-wise "exclusive or" of array and scalar
|
||||
//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
|
||||
CV_EXPORTS void bitwise_xor(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! pixel by pixel right shift of an image by a constant value
|
||||
//! supports 1, 3 and 4 channels images with integers elements
|
||||
CV_EXPORTS void rshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! pixel by pixel left shift of an image by a constant value
|
||||
//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
|
||||
CV_EXPORTS void lshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes per-element minimum of two arrays (dst = min(src1, src2))
|
||||
CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes per-element minimum of array and scalar (dst = min(src1, src2))
|
||||
CV_EXPORTS void min(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes per-element maximum of two arrays (dst = max(src1, src2))
|
||||
CV_EXPORTS void max(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes per-element maximum of array and scalar (dst = max(src1, src2))
|
||||
CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! implements generalized matrix product algorithm GEMM from BLAS
|
||||
CV_EXPORTS void gemm(const GpuMat& src1, const GpuMat& src2, double alpha,
|
||||
const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null());
|
||||
|
||||
//! transposes the matrix
|
||||
//! supports matrix with element size = 1, 4 and 8 bytes (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc)
|
||||
CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! reverses the order of the rows, columns or both in a matrix
|
||||
//! supports 1, 3 and 4 channels images with CV_8U, CV_16U, CV_32S or CV_32F depth
|
||||
CV_EXPORTS void flip(const GpuMat& a, GpuMat& b, int flipCode, Stream& stream = Stream::Null());
|
||||
|
||||
//! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i))
|
||||
//! destination array will have the depth type as lut and the same channels number as source
|
||||
//! supports CV_8UC1, CV_8UC3 types
|
||||
CV_EXPORTS void LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! makes multi-channel array out of several single-channel arrays
|
||||
CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! makes multi-channel array out of several single-channel arrays
|
||||
CV_EXPORTS void merge(const std::vector<GpuMat>& src, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! copies each plane of a multi-channel array to a dedicated array
|
||||
CV_EXPORTS void split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! copies each plane of a multi-channel array to a dedicated array
|
||||
CV_EXPORTS void split(const GpuMat& src, std::vector<GpuMat>& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes magnitude of complex (x(i).re, x(i).im) vector
|
||||
//! supports only CV_32FC2 type
|
||||
CV_EXPORTS void magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes squared magnitude of complex (x(i).re, x(i).im) vector
|
||||
//! supports only CV_32FC2 type
|
||||
CV_EXPORTS void magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes magnitude of each (x(i), y(i)) vector
|
||||
//! supports only floating-point source
|
||||
CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes squared magnitude of each (x(i), y(i)) vector
|
||||
//! supports only floating-point source
|
||||
CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes angle (angle(i)) of each (x(i), y(i)) vector
|
||||
//! supports only floating-point source
|
||||
CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
|
||||
|
||||
//! converts Cartesian coordinates to polar
|
||||
//! supports only floating-point source
|
||||
CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
|
||||
|
||||
//! converts polar coordinates to Cartesian
|
||||
//! supports only floating-point source
|
||||
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false, Stream& stream = Stream::Null());
|
||||
|
||||
//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
|
||||
CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0,
|
||||
int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat());
|
||||
CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double a, double b,
|
||||
int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf);
|
||||
|
||||
//! computes norm of array
|
||||
//! supports NORM_INF, NORM_L1, NORM_L2
|
||||
//! supports all matrices except 64F
|
||||
CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2);
|
||||
CV_EXPORTS double norm(const GpuMat& src1, int normType, GpuMat& buf);
|
||||
CV_EXPORTS double norm(const GpuMat& src1, int normType, const GpuMat& mask, GpuMat& buf);
|
||||
|
||||
//! computes norm of the difference between two arrays
|
||||
//! supports NORM_INF, NORM_L1, NORM_L2
|
||||
//! supports only CV_8UC1 type
|
||||
CV_EXPORTS double norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2);
|
||||
|
||||
//! computes sum of array elements
|
||||
//! supports only single channel images
|
||||
CV_EXPORTS Scalar sum(const GpuMat& src);
|
||||
CV_EXPORTS Scalar sum(const GpuMat& src, GpuMat& buf);
|
||||
CV_EXPORTS Scalar sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf);
|
||||
|
||||
//! computes sum of array elements absolute values
|
||||
//! supports only single channel images
|
||||
CV_EXPORTS Scalar absSum(const GpuMat& src);
|
||||
CV_EXPORTS Scalar absSum(const GpuMat& src, GpuMat& buf);
|
||||
CV_EXPORTS Scalar absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf);
|
||||
|
||||
//! computes squared sum of array elements
|
||||
//! supports only single channel images
|
||||
CV_EXPORTS Scalar sqrSum(const GpuMat& src);
|
||||
CV_EXPORTS Scalar sqrSum(const GpuMat& src, GpuMat& buf);
|
||||
CV_EXPORTS Scalar sqrSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf);
|
||||
|
||||
//! finds global minimum and maximum array elements and returns their values
|
||||
CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal=0, const GpuMat& mask=GpuMat());
|
||||
CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask, GpuMat& buf);
|
||||
|
||||
//! finds global minimum and maximum array elements and returns their values with locations
|
||||
CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0,
|
||||
const GpuMat& mask=GpuMat());
|
||||
CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
|
||||
const GpuMat& mask, GpuMat& valbuf, GpuMat& locbuf);
|
||||
|
||||
//! counts non-zero array elements
|
||||
CV_EXPORTS int countNonZero(const GpuMat& src);
|
||||
CV_EXPORTS int countNonZero(const GpuMat& src, GpuMat& buf);
|
||||
|
||||
//! reduces a matrix to a vector
|
||||
CV_EXPORTS void reduce(const GpuMat& mtx, GpuMat& vec, int dim, int reduceOp, int dtype = -1, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes mean value and standard deviation of all or selected array elements
|
||||
//! supports only CV_8UC1 type
|
||||
CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev);
|
||||
//! buffered version
|
||||
CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev, GpuMat& buf);
|
||||
|
||||
//! computes the standard deviation of integral images
|
||||
//! supports only CV_32SC1 source type and CV_32FC1 sqr type
|
||||
//! output will have CV_32FC1 type
|
||||
CV_EXPORTS void rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& stream = Stream::Null());
|
||||
|
||||
//! copies 2D array to a larger destination array and pads borders with user-specifiable constant
|
||||
CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType,
|
||||
const Scalar& value = Scalar(), Stream& stream = Stream::Null());
|
||||
|
||||
//! applies fixed threshold to the image
|
||||
CV_EXPORTS double threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes the integral image
|
||||
//! sum will have CV_32S type, but will contain unsigned int values
|
||||
//! supports only CV_8UC1 source type
|
||||
CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum, Stream& stream = Stream::Null());
|
||||
//! buffered version
|
||||
CV_EXPORTS void integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& stream = Stream::Null());
|
||||
|
||||
//! computes squared integral image
|
||||
//! result matrix will have 64F type, but will contain 64U values
|
||||
//! supports source images of 8UC1 type only
|
||||
CV_EXPORTS void sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& stream = Stream::Null());
|
||||
|
||||
//! performs per-element multiplication of two full (not packed) Fourier spectrums
|
||||
//! supports 32FC2 matrixes only (interleaved format)
|
||||
CV_EXPORTS void mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false, Stream& stream = Stream::Null());
|
||||
|
||||
//! performs per-element multiplication of two full (not packed) Fourier spectrums
|
||||
//! supports 32FC2 matrixes only (interleaved format)
|
||||
CV_EXPORTS void mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false, Stream& stream = Stream::Null());
|
||||
|
||||
//! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
|
||||
//! Param dft_size is the size of DFT transform.
|
||||
//!
|
||||
//! If the source matrix is not continous, then additional copy will be done,
|
||||
//! so to avoid copying ensure the source matrix is continous one. If you want to use
|
||||
//! preallocated output ensure it is continuous too, otherwise it will be reallocated.
|
||||
//!
|
||||
//! Being implemented via CUFFT real-to-complex transform result contains only non-redundant values
|
||||
//! in CUFFT's format. Result as full complex matrix for such kind of transform cannot be retrieved.
|
||||
//!
|
||||
//! For complex-to-real transform it is assumed that the source matrix is packed in CUFFT's format.
|
||||
CV_EXPORTS void dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0, Stream& stream = Stream::Null());
|
||||
|
||||
struct CV_EXPORTS ConvolveBuf
|
||||
{
|
||||
Size result_size;
|
||||
Size block_size;
|
||||
Size user_block_size;
|
||||
Size dft_size;
|
||||
int spect_len;
|
||||
|
||||
GpuMat image_spect, templ_spect, result_spect;
|
||||
GpuMat image_block, templ_block, result_data;
|
||||
|
||||
void create(Size image_size, Size templ_size);
|
||||
static Size estimateBlockSize(Size result_size, Size templ_size);
|
||||
};
|
||||
|
||||
//! computes convolution (or cross-correlation) of two images using discrete Fourier transform
|
||||
//! supports source images of 32FC1 type only
|
||||
//! result matrix will have 32FC1 type
|
||||
CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr = false);
|
||||
CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream = Stream::Null());
|
||||
|
||||
}} // namespace cv { namespace gpu {
|
||||
|
||||
#endif /* __OPENCV_GPUARITHM_HPP__ */
|
306
modules/gpuarithm/perf/perf_arithm.cpp
Normal file
306
modules/gpuarithm/perf/perf_arithm.cpp
Normal file
@ -0,0 +1,306 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace testing;
|
||||
using namespace perf;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// GEMM
|
||||
|
||||
CV_FLAGS(GemmFlags, 0, cv::GEMM_1_T, cv::GEMM_2_T, cv::GEMM_3_T)
|
||||
#define ALL_GEMM_FLAGS Values(GemmFlags(0), GemmFlags(cv::GEMM_1_T), GemmFlags(cv::GEMM_2_T), GemmFlags(cv::GEMM_3_T), \
|
||||
GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T | cv::GEMM_3_T))
|
||||
|
||||
DEF_PARAM_TEST(Sz_Type_Flags, cv::Size, MatType, GemmFlags);
|
||||
|
||||
PERF_TEST_P(Sz_Type_Flags, GEMM,
|
||||
Combine(Values(cv::Size(512, 512), cv::Size(1024, 1024)),
|
||||
Values(CV_32FC1, CV_32FC2, CV_64FC1),
|
||||
ALL_GEMM_FLAGS))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int type = GET_PARAM(1);
|
||||
const int flags = GET_PARAM(2);
|
||||
|
||||
cv::Mat src1(size, type);
|
||||
declare.in(src1, WARMUP_RNG);
|
||||
|
||||
cv::Mat src2(size, type);
|
||||
declare.in(src2, WARMUP_RNG);
|
||||
|
||||
cv::Mat src3(size, type);
|
||||
declare.in(src3, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
declare.time(5.0);
|
||||
|
||||
const cv::gpu::GpuMat d_src1(src1);
|
||||
const cv::gpu::GpuMat d_src2(src2);
|
||||
const cv::gpu::GpuMat d_src3(src3);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, dst, flags);
|
||||
|
||||
GPU_SANITY_CHECK(dst, 1e-6);
|
||||
}
|
||||
else
|
||||
{
|
||||
declare.time(50.0);
|
||||
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::gemm(src1, src2, 1.0, src3, 1.0, dst, flags);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// MulSpectrums
|
||||
|
||||
CV_FLAGS(DftFlags, 0, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX_OUTPUT, cv::DFT_REAL_OUTPUT)
|
||||
|
||||
DEF_PARAM_TEST(Sz_Flags, cv::Size, DftFlags);
|
||||
|
||||
PERF_TEST_P(Sz_Flags, MulSpectrums,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(0, DftFlags(cv::DFT_ROWS))))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int flag = GET_PARAM(1);
|
||||
|
||||
cv::Mat a(size, CV_32FC2);
|
||||
cv::Mat b(size, CV_32FC2);
|
||||
declare.in(a, b, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_a(a);
|
||||
const cv::gpu::GpuMat d_b(b);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::mulSpectrums(d_a, d_b, dst, flag);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::mulSpectrums(a, b, dst, flag);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// MulAndScaleSpectrums
|
||||
|
||||
PERF_TEST_P(Sz, MulAndScaleSpectrums,
|
||||
GPU_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
const cv::Size size = GetParam();
|
||||
|
||||
const float scale = 1.f / size.area();
|
||||
|
||||
cv::Mat src1(size, CV_32FC2);
|
||||
cv::Mat src2(size, CV_32FC2);
|
||||
declare.in(src1,src2, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src1(src1);
|
||||
const cv::gpu::GpuMat d_src2(src2);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::mulAndScaleSpectrums(d_src1, d_src2, dst, cv::DFT_ROWS, scale, false);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Dft
|
||||
|
||||
PERF_TEST_P(Sz_Flags, Dft,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(0, DftFlags(cv::DFT_ROWS), DftFlags(cv::DFT_INVERSE))))
|
||||
{
|
||||
declare.time(10.0);
|
||||
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int flag = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, CV_32FC2);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::dft(d_src, dst, size, flag);
|
||||
|
||||
GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::dft(src, dst, flag);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Convolve
|
||||
|
||||
DEF_PARAM_TEST(Sz_KernelSz_Ccorr, cv::Size, int, bool);
|
||||
|
||||
PERF_TEST_P(Sz_KernelSz_Ccorr, Convolve,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(17, 27, 32, 64),
|
||||
Bool()))
|
||||
{
|
||||
declare.time(10.0);
|
||||
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int templ_size = GET_PARAM(1);
|
||||
const bool ccorr = GET_PARAM(2);
|
||||
|
||||
const cv::Mat image(size, CV_32FC1);
|
||||
const cv::Mat templ(templ_size, templ_size, CV_32FC1);
|
||||
declare.in(image, templ, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
cv::gpu::GpuMat d_image = cv::gpu::createContinuous(size, CV_32FC1);
|
||||
d_image.upload(image);
|
||||
|
||||
cv::gpu::GpuMat d_templ = cv::gpu::createContinuous(templ_size, templ_size, CV_32FC1);
|
||||
d_templ.upload(templ);
|
||||
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::ConvolveBuf d_buf;
|
||||
|
||||
TEST_CYCLE() cv::gpu::convolve(d_image, d_templ, dst, ccorr, d_buf);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (ccorr)
|
||||
FAIL_NO_CPU();
|
||||
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::filter2D(image, dst, image.depth(), templ);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Integral
|
||||
|
||||
PERF_TEST_P(Sz, Integral,
|
||||
GPU_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
const cv::Size size = GetParam();
|
||||
|
||||
cv::Mat src(size, CV_8UC1);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::GpuMat d_buf;
|
||||
|
||||
TEST_CYCLE() cv::gpu::integralBuffered(d_src, dst, d_buf);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::integral(src, dst);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// IntegralSqr
|
||||
|
||||
PERF_TEST_P(Sz, IntegralSqr,
|
||||
GPU_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
const cv::Size size = GetParam();
|
||||
|
||||
cv::Mat src(size, CV_8UC1);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::sqrIntegral(d_src, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
317
modules/gpuarithm/perf/perf_core.cpp
Normal file
317
modules/gpuarithm/perf/perf_core.cpp
Normal file
@ -0,0 +1,317 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace testing;
|
||||
using namespace perf;
|
||||
|
||||
#define ARITHM_MAT_DEPTH Values(CV_8U, CV_16U, CV_32F, CV_64F)
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Merge
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn, Merge,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
ARITHM_MAT_DEPTH,
|
||||
Values(2, 3, 4)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
|
||||
std::vector<cv::Mat> src(channels);
|
||||
for (int i = 0; i < channels; ++i)
|
||||
{
|
||||
src[i].create(size, depth);
|
||||
declare.in(src[i], WARMUP_RNG);
|
||||
}
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
std::vector<cv::gpu::GpuMat> d_src(channels);
|
||||
for (int i = 0; i < channels; ++i)
|
||||
d_src[i].upload(src[i]);
|
||||
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::merge(d_src, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst, 1e-10);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::merge(src, dst);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Split
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn, Split,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
ARITHM_MAT_DEPTH,
|
||||
Values(2, 3, 4)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
|
||||
cv::Mat src(size, CV_MAKE_TYPE(depth, channels));
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
std::vector<cv::gpu::GpuMat> dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::split(d_src, dst);
|
||||
|
||||
const cv::gpu::GpuMat& dst0 = dst[0];
|
||||
const cv::gpu::GpuMat& dst1 = dst[1];
|
||||
|
||||
GPU_SANITY_CHECK(dst0, 1e-10);
|
||||
GPU_SANITY_CHECK(dst1, 1e-10);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<cv::Mat> dst;
|
||||
|
||||
TEST_CYCLE() cv::split(src, dst);
|
||||
|
||||
const cv::Mat& dst0 = dst[0];
|
||||
const cv::Mat& dst1 = dst[1];
|
||||
|
||||
CPU_SANITY_CHECK(dst0);
|
||||
CPU_SANITY_CHECK(dst1);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Transpose
|
||||
|
||||
PERF_TEST_P(Sz_Type, Transpose,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8UC1, CV_8UC4, CV_16UC2, CV_16SC2, CV_32SC1, CV_32SC2, CV_64FC1)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int type = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::transpose(d_src, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst, 1e-10);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::transpose(src, dst);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Flip
|
||||
|
||||
enum {FLIP_BOTH = 0, FLIP_X = 1, FLIP_Y = -1};
|
||||
CV_ENUM(FlipCode, FLIP_BOTH, FLIP_X, FLIP_Y)
|
||||
|
||||
DEF_PARAM_TEST(Sz_Depth_Cn_Code, cv::Size, MatDepth, MatCn, FlipCode);
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn_Code, Flip,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F),
|
||||
GPU_CHANNELS_1_3_4,
|
||||
FlipCode::all()))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
const int flipCode = GET_PARAM(3);
|
||||
|
||||
const int type = CV_MAKE_TYPE(depth, channels);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::flip(d_src, dst, flipCode);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::flip(src, dst, flipCode);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// LutOneChannel
|
||||
|
||||
PERF_TEST_P(Sz_Type, LutOneChannel,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8UC1, CV_8UC3)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int type = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
cv::Mat lut(1, 256, CV_8UC1);
|
||||
declare.in(lut, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::LUT(d_src, lut, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::LUT(src, lut, dst);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// LutMultiChannel
|
||||
|
||||
PERF_TEST_P(Sz_Type, LutMultiChannel,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values<MatType>(CV_8UC3)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int type = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
cv::Mat lut(1, 256, CV_MAKE_TYPE(CV_8U, src.channels()));
|
||||
declare.in(lut, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::LUT(d_src, lut, dst);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::LUT(src, lut, dst);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// CopyMakeBorder
|
||||
|
||||
DEF_PARAM_TEST(Sz_Depth_Cn_Border, cv::Size, MatDepth, MatCn, BorderMode);
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn_Border, CopyMakeBorder,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F),
|
||||
GPU_CHANNELS_1_3_4,
|
||||
ALL_BORDER_MODES))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
const int borderMode = GET_PARAM(3);
|
||||
|
||||
const int type = CV_MAKE_TYPE(depth, channels);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::copyMakeBorder(d_src, dst, 5, 5, 5, 5, borderMode);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::copyMakeBorder(src, dst, 5, 5, 5, 5, borderMode);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
47
modules/gpuarithm/perf/perf_main.cpp
Normal file
47
modules/gpuarithm/perf/perf_main.cpp
Normal file
@ -0,0 +1,47 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
using namespace perf;
|
||||
|
||||
CV_PERF_TEST_MAIN(gpuarithm, printCudaInfo())
|
@ -40,4 +40,4 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "opencv2/core/gpumat.hpp"
|
||||
#include "perf_precomp.hpp"
|
65
modules/gpuarithm/perf/perf_precomp.hpp
Normal file
65
modules/gpuarithm/perf/perf_precomp.hpp
Normal file
@ -0,0 +1,65 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifdef __GNUC__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
# if defined __clang__ || defined __APPLE__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
|
||||
# pragma GCC diagnostic ignored "-Wextra"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/ts/gpu_perf.hpp"
|
||||
|
||||
#include "opencv2/gpuarithm.hpp"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
||||
#ifdef GTEST_CREATE_SHARED_LIBRARY
|
||||
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||
#endif
|
||||
|
||||
#endif
|
466
modules/gpuarithm/perf/perf_reductions.cpp
Normal file
466
modules/gpuarithm/perf/perf_reductions.cpp
Normal file
@ -0,0 +1,466 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace testing;
|
||||
using namespace perf;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Norm
|
||||
|
||||
DEF_PARAM_TEST(Sz_Depth_Norm, cv::Size, MatDepth, NormType);
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Norm, Norm,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32S, CV_32F),
|
||||
Values(NormType(cv::NORM_INF), NormType(cv::NORM_L1), NormType(cv::NORM_L2))))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int normType = GET_PARAM(2);
|
||||
|
||||
cv::Mat src(size, depth);
|
||||
if (depth == CV_8U)
|
||||
cv::randu(src, 0, 254);
|
||||
else
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_buf;
|
||||
double gpu_dst;
|
||||
|
||||
TEST_CYCLE() gpu_dst = cv::gpu::norm(d_src, normType, d_buf);
|
||||
|
||||
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
|
||||
}
|
||||
else
|
||||
{
|
||||
double cpu_dst;
|
||||
|
||||
TEST_CYCLE() cpu_dst = cv::norm(src, normType);
|
||||
|
||||
SANITY_CHECK(cpu_dst, 1e-6, ERROR_RELATIVE);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// NormDiff
|
||||
|
||||
DEF_PARAM_TEST(Sz_Norm, cv::Size, NormType);
|
||||
|
||||
PERF_TEST_P(Sz_Norm, NormDiff,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(NormType(cv::NORM_INF), NormType(cv::NORM_L1), NormType(cv::NORM_L2))))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int normType = GET_PARAM(1);
|
||||
|
||||
cv::Mat src1(size, CV_8UC1);
|
||||
declare.in(src1, WARMUP_RNG);
|
||||
|
||||
cv::Mat src2(size, CV_8UC1);
|
||||
declare.in(src2, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src1(src1);
|
||||
const cv::gpu::GpuMat d_src2(src2);
|
||||
double gpu_dst;
|
||||
|
||||
TEST_CYCLE() gpu_dst = cv::gpu::norm(d_src1, d_src2, normType);
|
||||
|
||||
SANITY_CHECK(gpu_dst);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
double cpu_dst;
|
||||
|
||||
TEST_CYCLE() cpu_dst = cv::norm(src1, src2, normType);
|
||||
|
||||
SANITY_CHECK(cpu_dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Sum
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn, Sum,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F),
|
||||
GPU_CHANNELS_1_3_4))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
|
||||
const int type = CV_MAKE_TYPE(depth, channels);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_buf;
|
||||
cv::Scalar gpu_dst;
|
||||
|
||||
TEST_CYCLE() gpu_dst = cv::gpu::sum(d_src, d_buf);
|
||||
|
||||
SANITY_CHECK(gpu_dst, 1e-5, ERROR_RELATIVE);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Scalar cpu_dst;
|
||||
|
||||
TEST_CYCLE() cpu_dst = cv::sum(src);
|
||||
|
||||
SANITY_CHECK(cpu_dst, 1e-6, ERROR_RELATIVE);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// SumAbs
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn, SumAbs,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F),
|
||||
GPU_CHANNELS_1_3_4))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
|
||||
const int type = CV_MAKE_TYPE(depth, channels);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_buf;
|
||||
cv::Scalar gpu_dst;
|
||||
|
||||
TEST_CYCLE() gpu_dst = cv::gpu::absSum(d_src, d_buf);
|
||||
|
||||
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// SumSqr
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn, SumSqr,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values<MatDepth>(CV_8U, CV_16U, CV_32F),
|
||||
GPU_CHANNELS_1_3_4))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
|
||||
const int type = CV_MAKE_TYPE(depth, channels);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_buf;
|
||||
cv::Scalar gpu_dst;
|
||||
|
||||
TEST_CYCLE() gpu_dst = cv::gpu::sqrSum(d_src, d_buf);
|
||||
|
||||
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
|
||||
}
|
||||
else
|
||||
{
|
||||
FAIL_NO_CPU();
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// MinMax
|
||||
|
||||
PERF_TEST_P(Sz_Depth, MinMax,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, depth);
|
||||
if (depth == CV_8U)
|
||||
cv::randu(src, 0, 254);
|
||||
else
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_buf;
|
||||
double gpu_minVal, gpu_maxVal;
|
||||
|
||||
TEST_CYCLE() cv::gpu::minMax(d_src, &gpu_minVal, &gpu_maxVal, cv::gpu::GpuMat(), d_buf);
|
||||
|
||||
SANITY_CHECK(gpu_minVal, 1e-10);
|
||||
SANITY_CHECK(gpu_maxVal, 1e-10);
|
||||
}
|
||||
else
|
||||
{
|
||||
double cpu_minVal, cpu_maxVal;
|
||||
|
||||
TEST_CYCLE() cv::minMaxLoc(src, &cpu_minVal, &cpu_maxVal);
|
||||
|
||||
SANITY_CHECK(cpu_minVal);
|
||||
SANITY_CHECK(cpu_maxVal);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// MinMaxLoc
|
||||
|
||||
PERF_TEST_P(Sz_Depth, MinMaxLoc,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, depth);
|
||||
if (depth == CV_8U)
|
||||
cv::randu(src, 0, 254);
|
||||
else
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_valbuf, d_locbuf;
|
||||
double gpu_minVal, gpu_maxVal;
|
||||
cv::Point gpu_minLoc, gpu_maxLoc;
|
||||
|
||||
TEST_CYCLE() cv::gpu::minMaxLoc(d_src, &gpu_minVal, &gpu_maxVal, &gpu_minLoc, &gpu_maxLoc, cv::gpu::GpuMat(), d_valbuf, d_locbuf);
|
||||
|
||||
SANITY_CHECK(gpu_minVal, 1e-10);
|
||||
SANITY_CHECK(gpu_maxVal, 1e-10);
|
||||
}
|
||||
else
|
||||
{
|
||||
double cpu_minVal, cpu_maxVal;
|
||||
cv::Point cpu_minLoc, cpu_maxLoc;
|
||||
|
||||
TEST_CYCLE() cv::minMaxLoc(src, &cpu_minVal, &cpu_maxVal, &cpu_minLoc, &cpu_maxLoc);
|
||||
|
||||
SANITY_CHECK(cpu_minVal);
|
||||
SANITY_CHECK(cpu_maxVal);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// CountNonZero
|
||||
|
||||
PERF_TEST_P(Sz_Depth, CountNonZero,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
|
||||
cv::Mat src(size, depth);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_buf;
|
||||
int gpu_dst = 0;
|
||||
|
||||
TEST_CYCLE() gpu_dst = cv::gpu::countNonZero(d_src, d_buf);
|
||||
|
||||
SANITY_CHECK(gpu_dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
int cpu_dst = 0;
|
||||
|
||||
TEST_CYCLE() cpu_dst = cv::countNonZero(src);
|
||||
|
||||
SANITY_CHECK(cpu_dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Reduce
|
||||
|
||||
CV_ENUM(ReduceCode, REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN)
|
||||
|
||||
enum {Rows = 0, Cols = 1};
|
||||
CV_ENUM(ReduceDim, Rows, Cols)
|
||||
|
||||
DEF_PARAM_TEST(Sz_Depth_Cn_Code_Dim, cv::Size, MatDepth, MatCn, ReduceCode, ReduceDim);
|
||||
|
||||
PERF_TEST_P(Sz_Depth_Cn_Code_Dim, Reduce,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_16S, CV_32F),
|
||||
Values(1, 2, 3, 4),
|
||||
ReduceCode::all(),
|
||||
ReduceDim::all()))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int depth = GET_PARAM(1);
|
||||
const int channels = GET_PARAM(2);
|
||||
const int reduceOp = GET_PARAM(3);
|
||||
const int dim = GET_PARAM(4);
|
||||
|
||||
const int type = CV_MAKE_TYPE(depth, channels);
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
|
||||
TEST_CYCLE() cv::gpu::reduce(d_src, dst, dim, reduceOp);
|
||||
|
||||
GPU_SANITY_CHECK(dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::reduce(src, dst, dim, reduceOp);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Normalize
|
||||
|
||||
DEF_PARAM_TEST(Sz_Depth_NormType, cv::Size, MatDepth, NormType);
|
||||
|
||||
PERF_TEST_P(Sz_Depth_NormType, Normalize,
|
||||
Combine(GPU_TYPICAL_MAT_SIZES,
|
||||
Values(CV_8U, CV_16U, CV_32F, CV_64F),
|
||||
Values(NormType(cv::NORM_INF),
|
||||
NormType(cv::NORM_L1),
|
||||
NormType(cv::NORM_L2),
|
||||
NormType(cv::NORM_MINMAX))))
|
||||
{
|
||||
const cv::Size size = GET_PARAM(0);
|
||||
const int type = GET_PARAM(1);
|
||||
const int norm_type = GET_PARAM(2);
|
||||
|
||||
const double alpha = 1;
|
||||
const double beta = 0;
|
||||
|
||||
cv::Mat src(size, type);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::GpuMat d_norm_buf, d_cvt_buf;
|
||||
|
||||
TEST_CYCLE() cv::gpu::normalize(d_src, dst, alpha, beta, norm_type, type, cv::gpu::GpuMat(), d_norm_buf, d_cvt_buf);
|
||||
|
||||
GPU_SANITY_CHECK(dst, 1e-6);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE() cv::normalize(src, dst, alpha, beta, norm_type, type);
|
||||
|
||||
CPU_SANITY_CHECK(dst);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// MeanStdDev
|
||||
|
||||
PERF_TEST_P(Sz, MeanStdDev,
|
||||
GPU_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
const cv::Size size = GetParam();
|
||||
|
||||
cv::Mat src(size, CV_8UC1);
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
|
||||
if (PERF_RUN_GPU())
|
||||
{
|
||||
const cv::gpu::GpuMat d_src(src);
|
||||
cv::gpu::GpuMat d_buf;
|
||||
cv::Scalar gpu_mean;
|
||||
cv::Scalar gpu_stddev;
|
||||
|
||||
TEST_CYCLE() cv::gpu::meanStdDev(d_src, gpu_mean, gpu_stddev, d_buf);
|
||||
|
||||
SANITY_CHECK(gpu_mean);
|
||||
SANITY_CHECK(gpu_stddev);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Scalar cpu_mean;
|
||||
cv::Scalar cpu_stddev;
|
||||
|
||||
TEST_CYCLE() cv::meanStdDev(src, cpu_mean, cpu_stddev);
|
||||
|
||||
SANITY_CHECK(cpu_mean);
|
||||
SANITY_CHECK(cpu_stddev);
|
||||
}
|
||||
}
|
735
modules/gpuarithm/src/arithm.cpp
Normal file
735
modules/gpuarithm/src/arithm.cpp
Normal file
@ -0,0 +1,735 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
void cv::gpu::gemm(const GpuMat&, const GpuMat&, double, const GpuMat&, double, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::integral(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::integralBuffered(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::sqrIntegral(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::mulSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, bool, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::mulAndScaleSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, float, bool, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::dft(const GpuMat&, GpuMat&, Size, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::ConvolveBuf::create(Size, Size) { throw_no_cuda(); }
|
||||
void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool) { throw_no_cuda(); }
|
||||
void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool, ConvolveBuf&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
namespace
|
||||
{
|
||||
#define error_entry(entry) { entry, #entry }
|
||||
|
||||
struct ErrorEntry
|
||||
{
|
||||
int code;
|
||||
const char* str;
|
||||
};
|
||||
|
||||
struct ErrorEntryComparer
|
||||
{
|
||||
int code;
|
||||
ErrorEntryComparer(int code_) : code(code_) {}
|
||||
bool operator()(const ErrorEntry& e) const { return e.code == code; }
|
||||
};
|
||||
|
||||
String getErrorString(int code, const ErrorEntry* errors, size_t n)
|
||||
{
|
||||
size_t idx = std::find_if(errors, errors + n, ErrorEntryComparer(code)) - errors;
|
||||
|
||||
const char* msg = (idx != n) ? errors[idx].str : "Unknown error code";
|
||||
String str = cv::format("%s [Code = %d]", msg, code);
|
||||
|
||||
return str;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HAVE_CUBLAS
|
||||
namespace
|
||||
{
|
||||
const ErrorEntry cublas_errors[] =
|
||||
{
|
||||
error_entry( CUBLAS_STATUS_SUCCESS ),
|
||||
error_entry( CUBLAS_STATUS_NOT_INITIALIZED ),
|
||||
error_entry( CUBLAS_STATUS_ALLOC_FAILED ),
|
||||
error_entry( CUBLAS_STATUS_INVALID_VALUE ),
|
||||
error_entry( CUBLAS_STATUS_ARCH_MISMATCH ),
|
||||
error_entry( CUBLAS_STATUS_MAPPING_ERROR ),
|
||||
error_entry( CUBLAS_STATUS_EXECUTION_FAILED ),
|
||||
error_entry( CUBLAS_STATUS_INTERNAL_ERROR )
|
||||
};
|
||||
|
||||
const size_t cublas_error_num = sizeof(cublas_errors) / sizeof(cublas_errors[0]);
|
||||
|
||||
static inline void ___cublasSafeCall(cublasStatus_t err, const char* file, const int line, const char* func)
|
||||
{
|
||||
if (CUBLAS_STATUS_SUCCESS != err)
|
||||
{
|
||||
String msg = getErrorString(err, cublas_errors, cublas_error_num);
|
||||
cv::error(cv::Error::GpuApiCallError, msg, func, file, line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define cublasSafeCall(expr) ___cublasSafeCall(expr, __FILE__, __LINE__, __func__)
|
||||
#else /* defined(__CUDACC__) || defined(__MSVC__) */
|
||||
#define cublasSafeCall(expr) ___cublasSafeCall(expr, __FILE__, __LINE__, "")
|
||||
#endif
|
||||
#endif // HAVE_CUBLAS
|
||||
|
||||
#ifdef HAVE_CUFFT
|
||||
namespace
|
||||
{
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// CUFFT errors
|
||||
|
||||
const ErrorEntry cufft_errors[] =
|
||||
{
|
||||
error_entry( CUFFT_INVALID_PLAN ),
|
||||
error_entry( CUFFT_ALLOC_FAILED ),
|
||||
error_entry( CUFFT_INVALID_TYPE ),
|
||||
error_entry( CUFFT_INVALID_VALUE ),
|
||||
error_entry( CUFFT_INTERNAL_ERROR ),
|
||||
error_entry( CUFFT_EXEC_FAILED ),
|
||||
error_entry( CUFFT_SETUP_FAILED ),
|
||||
error_entry( CUFFT_INVALID_SIZE ),
|
||||
error_entry( CUFFT_UNALIGNED_DATA )
|
||||
};
|
||||
|
||||
const int cufft_error_num = sizeof(cufft_errors) / sizeof(cufft_errors[0]);
|
||||
|
||||
void ___cufftSafeCall(int err, const char* file, const int line, const char* func)
|
||||
{
|
||||
if (CUFFT_SUCCESS != err)
|
||||
{
|
||||
String msg = getErrorString(err, cufft_errors, cufft_error_num);
|
||||
cv::error(cv::Error::GpuApiCallError, msg, func, file, line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define cufftSafeCall(expr) ___cufftSafeCall(expr, __FILE__, __LINE__, __func__)
|
||||
#else /* defined(__CUDACC__) || defined(__MSVC__) */
|
||||
#define cufftSafeCall(expr) ___cufftSafeCall(expr, __FILE__, __LINE__, "")
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// gemm
|
||||
|
||||
void cv::gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags, Stream& stream)
|
||||
{
|
||||
#ifndef HAVE_CUBLAS
|
||||
(void)src1;
|
||||
(void)src2;
|
||||
(void)alpha;
|
||||
(void)src3;
|
||||
(void)beta;
|
||||
(void)dst;
|
||||
(void)flags;
|
||||
(void)stream;
|
||||
CV_Error(cv::Error::StsNotImplemented, "The library was build without CUBLAS");
|
||||
#else
|
||||
// CUBLAS works with column-major matrices
|
||||
|
||||
CV_Assert(src1.type() == CV_32FC1 || src1.type() == CV_32FC2 || src1.type() == CV_64FC1 || src1.type() == CV_64FC2);
|
||||
CV_Assert(src2.type() == src1.type() && (src3.empty() || src3.type() == src1.type()));
|
||||
|
||||
if (src1.depth() == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
bool tr1 = (flags & GEMM_1_T) != 0;
|
||||
bool tr2 = (flags & GEMM_2_T) != 0;
|
||||
bool tr3 = (flags & GEMM_3_T) != 0;
|
||||
|
||||
if (src1.type() == CV_64FC2)
|
||||
{
|
||||
if (tr1 || tr2 || tr3)
|
||||
CV_Error(cv::Error::StsNotImplemented, "transpose operation doesn't implemented for CV_64FC2 type");
|
||||
}
|
||||
|
||||
Size src1Size = tr1 ? Size(src1.rows, src1.cols) : src1.size();
|
||||
Size src2Size = tr2 ? Size(src2.rows, src2.cols) : src2.size();
|
||||
Size src3Size = tr3 ? Size(src3.rows, src3.cols) : src3.size();
|
||||
Size dstSize(src2Size.width, src1Size.height);
|
||||
|
||||
CV_Assert(src1Size.width == src2Size.height);
|
||||
CV_Assert(src3.empty() || src3Size == dstSize);
|
||||
|
||||
dst.create(dstSize, src1.type());
|
||||
|
||||
if (beta != 0)
|
||||
{
|
||||
if (src3.empty())
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueMemSet(dst, Scalar::all(0));
|
||||
else
|
||||
dst.setTo(Scalar::all(0));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (tr3)
|
||||
{
|
||||
gpu::transpose(src3, dst, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueCopy(src3, dst);
|
||||
else
|
||||
src3.copyTo(dst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cublasHandle_t handle;
|
||||
cublasSafeCall( cublasCreate_v2(&handle) );
|
||||
|
||||
cublasSafeCall( cublasSetStream_v2(handle, StreamAccessor::getStream(stream)) );
|
||||
|
||||
cublasSafeCall( cublasSetPointerMode_v2(handle, CUBLAS_POINTER_MODE_HOST) );
|
||||
|
||||
const float alphaf = static_cast<float>(alpha);
|
||||
const float betaf = static_cast<float>(beta);
|
||||
|
||||
const cuComplex alphacf = make_cuComplex(alphaf, 0);
|
||||
const cuComplex betacf = make_cuComplex(betaf, 0);
|
||||
|
||||
const cuDoubleComplex alphac = make_cuDoubleComplex(alpha, 0);
|
||||
const cuDoubleComplex betac = make_cuDoubleComplex(beta, 0);
|
||||
|
||||
cublasOperation_t transa = tr2 ? CUBLAS_OP_T : CUBLAS_OP_N;
|
||||
cublasOperation_t transb = tr1 ? CUBLAS_OP_T : CUBLAS_OP_N;
|
||||
|
||||
switch (src1.type())
|
||||
{
|
||||
case CV_32FC1:
|
||||
cublasSafeCall( cublasSgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alphaf,
|
||||
src2.ptr<float>(), static_cast<int>(src2.step / sizeof(float)),
|
||||
src1.ptr<float>(), static_cast<int>(src1.step / sizeof(float)),
|
||||
&betaf,
|
||||
dst.ptr<float>(), static_cast<int>(dst.step / sizeof(float))) );
|
||||
break;
|
||||
|
||||
case CV_64FC1:
|
||||
cublasSafeCall( cublasDgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alpha,
|
||||
src2.ptr<double>(), static_cast<int>(src2.step / sizeof(double)),
|
||||
src1.ptr<double>(), static_cast<int>(src1.step / sizeof(double)),
|
||||
&beta,
|
||||
dst.ptr<double>(), static_cast<int>(dst.step / sizeof(double))) );
|
||||
break;
|
||||
|
||||
case CV_32FC2:
|
||||
cublasSafeCall( cublasCgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alphacf,
|
||||
src2.ptr<cuComplex>(), static_cast<int>(src2.step / sizeof(cuComplex)),
|
||||
src1.ptr<cuComplex>(), static_cast<int>(src1.step / sizeof(cuComplex)),
|
||||
&betacf,
|
||||
dst.ptr<cuComplex>(), static_cast<int>(dst.step / sizeof(cuComplex))) );
|
||||
break;
|
||||
|
||||
case CV_64FC2:
|
||||
cublasSafeCall( cublasZgemm_v2(handle, transa, transb, tr2 ? src2.rows : src2.cols, tr1 ? src1.cols : src1.rows, tr2 ? src2.cols : src2.rows,
|
||||
&alphac,
|
||||
src2.ptr<cuDoubleComplex>(), static_cast<int>(src2.step / sizeof(cuDoubleComplex)),
|
||||
src1.ptr<cuDoubleComplex>(), static_cast<int>(src1.step / sizeof(cuDoubleComplex)),
|
||||
&betac,
|
||||
dst.ptr<cuDoubleComplex>(), static_cast<int>(dst.step / sizeof(cuDoubleComplex))) );
|
||||
break;
|
||||
}
|
||||
|
||||
cublasSafeCall( cublasDestroy_v2(handle) );
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// integral
|
||||
|
||||
void cv::gpu::integral(const GpuMat& src, GpuMat& sum, Stream& s)
|
||||
{
|
||||
GpuMat buffer;
|
||||
gpu::integralBuffered(src, sum, buffer, s);
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
void shfl_integral_gpu(const PtrStepSzb& img, PtrStepSz<unsigned int> integral, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
void cv::gpu::integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& s)
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1);
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
cv::Size whole;
|
||||
cv::Point offset;
|
||||
|
||||
src.locateROI(whole, offset);
|
||||
|
||||
if (deviceSupports(WARP_SHUFFLE_FUNCTIONS) && src.cols <= 2048
|
||||
&& offset.x % 16 == 0 && ((src.cols + 63) / 64) * 64 <= (static_cast<int>(src.step) - offset.x))
|
||||
{
|
||||
ensureSizeIsEnough(((src.rows + 7) / 8) * 8, ((src.cols + 63) / 64) * 64, CV_32SC1, buffer);
|
||||
|
||||
cv::gpu::cudev::imgproc::shfl_integral_gpu(src, buffer, stream);
|
||||
|
||||
sum.create(src.rows + 1, src.cols + 1, CV_32SC1);
|
||||
if (s)
|
||||
s.enqueueMemSet(sum, Scalar::all(0));
|
||||
else
|
||||
sum.setTo(Scalar::all(0));
|
||||
|
||||
GpuMat inner = sum(Rect(1, 1, src.cols, src.rows));
|
||||
GpuMat res = buffer(Rect(0, 0, src.cols, src.rows));
|
||||
|
||||
if (s)
|
||||
s.enqueueCopy(res, inner);
|
||||
else
|
||||
res.copyTo(inner);
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifndef HAVE_OPENCV_GPULEGACY
|
||||
throw_no_cuda();
|
||||
#else
|
||||
sum.create(src.rows + 1, src.cols + 1, CV_32SC1);
|
||||
|
||||
NcvSize32u roiSize;
|
||||
roiSize.width = src.cols;
|
||||
roiSize.height = src.rows;
|
||||
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
|
||||
|
||||
Ncv32u bufSize;
|
||||
ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
|
||||
ensureSizeIsEnough(1, bufSize, CV_8UC1, buffer);
|
||||
|
||||
NppStStreamHandler h(stream);
|
||||
|
||||
ncvSafeCall( nppiStIntegral_8u32u_C1R(const_cast<Ncv8u*>(src.ptr<Ncv8u>()), static_cast<int>(src.step),
|
||||
sum.ptr<Ncv32u>(), static_cast<int>(sum.step), roiSize, buffer.ptr<Ncv8u>(), bufSize, prop) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// sqrIntegral
|
||||
|
||||
void cv::gpu::sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& s)
|
||||
{
|
||||
#ifndef HAVE_OPENCV_GPULEGACY
|
||||
(void) src;
|
||||
(void) sqsum;
|
||||
(void) s;
|
||||
throw_no_cuda();
|
||||
#else
|
||||
CV_Assert(src.type() == CV_8U);
|
||||
|
||||
NcvSize32u roiSize;
|
||||
roiSize.width = src.cols;
|
||||
roiSize.height = src.rows;
|
||||
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
|
||||
|
||||
Ncv32u bufSize;
|
||||
ncvSafeCall(nppiStSqrIntegralGetSize_8u64u(roiSize, &bufSize, prop));
|
||||
GpuMat buf(1, bufSize, CV_8U);
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
NppStStreamHandler h(stream);
|
||||
|
||||
sqsum.create(src.rows + 1, src.cols + 1, CV_64F);
|
||||
ncvSafeCall(nppiStSqrIntegral_8u64u_C1R(const_cast<Ncv8u*>(src.ptr<Ncv8u>(0)), static_cast<int>(src.step),
|
||||
sqsum.ptr<Ncv64u>(0), static_cast<int>(sqsum.step), roiSize, buf.ptr<Ncv8u>(0), bufSize, prop));
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
#endif
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// mulSpectrums
|
||||
|
||||
#ifdef HAVE_CUFFT
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream);
|
||||
|
||||
void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream);
|
||||
}}}
|
||||
|
||||
#endif
|
||||
|
||||
void cv::gpu::mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB, Stream& stream)
|
||||
{
|
||||
#ifndef HAVE_CUFFT
|
||||
(void) a;
|
||||
(void) b;
|
||||
(void) c;
|
||||
(void) flags;
|
||||
(void) conjB;
|
||||
(void) stream;
|
||||
throw_no_cuda();
|
||||
#else
|
||||
(void) flags;
|
||||
|
||||
typedef void (*Caller)(const PtrStep<cufftComplex>, const PtrStep<cufftComplex>, PtrStepSz<cufftComplex>, cudaStream_t stream);
|
||||
|
||||
static Caller callers[] = { cudev::mulSpectrums, cudev::mulSpectrums_CONJ };
|
||||
|
||||
CV_Assert(a.type() == b.type() && a.type() == CV_32FC2);
|
||||
CV_Assert(a.size() == b.size());
|
||||
|
||||
c.create(a.size(), CV_32FC2);
|
||||
|
||||
Caller caller = callers[(int)conjB];
|
||||
caller(a, b, c, StreamAccessor::getStream(stream));
|
||||
#endif
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// mulAndScaleSpectrums
|
||||
|
||||
#ifdef HAVE_CUFFT
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream);
|
||||
|
||||
void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream);
|
||||
}}}
|
||||
|
||||
#endif
|
||||
|
||||
void cv::gpu::mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB, Stream& stream)
|
||||
{
|
||||
#ifndef HAVE_CUFFT
|
||||
(void) a;
|
||||
(void) b;
|
||||
(void) c;
|
||||
(void) flags;
|
||||
(void) scale;
|
||||
(void) conjB;
|
||||
(void) stream;
|
||||
throw_no_cuda();
|
||||
#else
|
||||
(void)flags;
|
||||
|
||||
typedef void (*Caller)(const PtrStep<cufftComplex>, const PtrStep<cufftComplex>, float scale, PtrStepSz<cufftComplex>, cudaStream_t stream);
|
||||
static Caller callers[] = { cudev::mulAndScaleSpectrums, cudev::mulAndScaleSpectrums_CONJ };
|
||||
|
||||
CV_Assert(a.type() == b.type() && a.type() == CV_32FC2);
|
||||
CV_Assert(a.size() == b.size());
|
||||
|
||||
c.create(a.size(), CV_32FC2);
|
||||
|
||||
Caller caller = callers[(int)conjB];
|
||||
caller(a, b, scale, c, StreamAccessor::getStream(stream));
|
||||
#endif
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// dft
|
||||
|
||||
void cv::gpu::dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags, Stream& stream)
|
||||
{
|
||||
#ifndef HAVE_CUFFT
|
||||
(void) src;
|
||||
(void) dst;
|
||||
(void) dft_size;
|
||||
(void) flags;
|
||||
(void) stream;
|
||||
throw_no_cuda();
|
||||
#else
|
||||
|
||||
CV_Assert(src.type() == CV_32F || src.type() == CV_32FC2);
|
||||
|
||||
// We don't support unpacked output (in the case of real input)
|
||||
CV_Assert(!(flags & DFT_COMPLEX_OUTPUT));
|
||||
|
||||
bool is_1d_input = (dft_size.height == 1) || (dft_size.width == 1);
|
||||
int is_row_dft = flags & DFT_ROWS;
|
||||
int is_scaled_dft = flags & DFT_SCALE;
|
||||
int is_inverse = flags & DFT_INVERSE;
|
||||
bool is_complex_input = src.channels() == 2;
|
||||
bool is_complex_output = !(flags & DFT_REAL_OUTPUT);
|
||||
|
||||
// We don't support real-to-real transform
|
||||
CV_Assert(is_complex_input || is_complex_output);
|
||||
|
||||
GpuMat src_data;
|
||||
|
||||
// Make sure here we work with the continuous input,
|
||||
// as CUFFT can't handle gaps
|
||||
src_data = src;
|
||||
createContinuous(src.rows, src.cols, src.type(), src_data);
|
||||
if (src_data.data != src.data)
|
||||
src.copyTo(src_data);
|
||||
|
||||
Size dft_size_opt = dft_size;
|
||||
if (is_1d_input && !is_row_dft)
|
||||
{
|
||||
// If the source matrix is single column handle it as single row
|
||||
dft_size_opt.width = std::max(dft_size.width, dft_size.height);
|
||||
dft_size_opt.height = std::min(dft_size.width, dft_size.height);
|
||||
}
|
||||
|
||||
cufftType dft_type = CUFFT_R2C;
|
||||
if (is_complex_input)
|
||||
dft_type = is_complex_output ? CUFFT_C2C : CUFFT_C2R;
|
||||
|
||||
CV_Assert(dft_size_opt.width > 1);
|
||||
|
||||
cufftHandle plan;
|
||||
if (is_1d_input || is_row_dft)
|
||||
cufftPlan1d(&plan, dft_size_opt.width, dft_type, dft_size_opt.height);
|
||||
else
|
||||
cufftPlan2d(&plan, dft_size_opt.height, dft_size_opt.width, dft_type);
|
||||
|
||||
cufftSafeCall( cufftSetStream(plan, StreamAccessor::getStream(stream)) );
|
||||
|
||||
if (is_complex_input)
|
||||
{
|
||||
if (is_complex_output)
|
||||
{
|
||||
createContinuous(dft_size, CV_32FC2, dst);
|
||||
cufftSafeCall(cufftExecC2C(
|
||||
plan, src_data.ptr<cufftComplex>(), dst.ptr<cufftComplex>(),
|
||||
is_inverse ? CUFFT_INVERSE : CUFFT_FORWARD));
|
||||
}
|
||||
else
|
||||
{
|
||||
createContinuous(dft_size, CV_32F, dst);
|
||||
cufftSafeCall(cufftExecC2R(
|
||||
plan, src_data.ptr<cufftComplex>(), dst.ptr<cufftReal>()));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// We could swap dft_size for efficiency. Here we must reflect it
|
||||
if (dft_size == dft_size_opt)
|
||||
createContinuous(Size(dft_size.width / 2 + 1, dft_size.height), CV_32FC2, dst);
|
||||
else
|
||||
createContinuous(Size(dft_size.width, dft_size.height / 2 + 1), CV_32FC2, dst);
|
||||
|
||||
cufftSafeCall(cufftExecR2C(
|
||||
plan, src_data.ptr<cufftReal>(), dst.ptr<cufftComplex>()));
|
||||
}
|
||||
|
||||
cufftSafeCall(cufftDestroy(plan));
|
||||
|
||||
if (is_scaled_dft)
|
||||
multiply(dst, Scalar::all(1. / dft_size.area()), dst, 1, -1, stream);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// convolve
|
||||
|
||||
void cv::gpu::ConvolveBuf::create(Size image_size, Size templ_size)
|
||||
{
|
||||
result_size = Size(image_size.width - templ_size.width + 1,
|
||||
image_size.height - templ_size.height + 1);
|
||||
|
||||
block_size = user_block_size;
|
||||
if (user_block_size.width == 0 || user_block_size.height == 0)
|
||||
block_size = estimateBlockSize(result_size, templ_size);
|
||||
|
||||
dft_size.width = 1 << int(ceil(std::log(block_size.width + templ_size.width - 1.) / std::log(2.)));
|
||||
dft_size.height = 1 << int(ceil(std::log(block_size.height + templ_size.height - 1.) / std::log(2.)));
|
||||
|
||||
// CUFFT has hard-coded kernels for power-of-2 sizes (up to 8192),
|
||||
// see CUDA Toolkit 4.1 CUFFT Library Programming Guide
|
||||
if (dft_size.width > 8192)
|
||||
dft_size.width = getOptimalDFTSize(block_size.width + templ_size.width - 1);
|
||||
if (dft_size.height > 8192)
|
||||
dft_size.height = getOptimalDFTSize(block_size.height + templ_size.height - 1);
|
||||
|
||||
// To avoid wasting time doing small DFTs
|
||||
dft_size.width = std::max(dft_size.width, 512);
|
||||
dft_size.height = std::max(dft_size.height, 512);
|
||||
|
||||
createContinuous(dft_size, CV_32F, image_block);
|
||||
createContinuous(dft_size, CV_32F, templ_block);
|
||||
createContinuous(dft_size, CV_32F, result_data);
|
||||
|
||||
spect_len = dft_size.height * (dft_size.width / 2 + 1);
|
||||
createContinuous(1, spect_len, CV_32FC2, image_spect);
|
||||
createContinuous(1, spect_len, CV_32FC2, templ_spect);
|
||||
createContinuous(1, spect_len, CV_32FC2, result_spect);
|
||||
|
||||
// Use maximum result matrix block size for the estimated DFT block size
|
||||
block_size.width = std::min(dft_size.width - templ_size.width + 1, result_size.width);
|
||||
block_size.height = std::min(dft_size.height - templ_size.height + 1, result_size.height);
|
||||
}
|
||||
|
||||
|
||||
Size cv::gpu::ConvolveBuf::estimateBlockSize(Size result_size, Size /*templ_size*/)
|
||||
{
|
||||
int width = (result_size.width + 2) / 3;
|
||||
int height = (result_size.height + 2) / 3;
|
||||
width = std::min(width, result_size.width);
|
||||
height = std::min(height, result_size.height);
|
||||
return Size(width, height);
|
||||
}
|
||||
|
||||
|
||||
void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr)
|
||||
{
|
||||
ConvolveBuf buf;
|
||||
gpu::convolve(image, templ, result, ccorr, buf);
|
||||
}
|
||||
|
||||
void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream)
|
||||
{
|
||||
#ifndef HAVE_CUFFT
|
||||
(void) image;
|
||||
(void) templ;
|
||||
(void) result;
|
||||
(void) ccorr;
|
||||
(void) buf;
|
||||
(void) stream;
|
||||
throw_no_cuda();
|
||||
#else
|
||||
using namespace cv::gpu::cudev::imgproc;
|
||||
|
||||
CV_Assert(image.type() == CV_32F);
|
||||
CV_Assert(templ.type() == CV_32F);
|
||||
|
||||
buf.create(image.size(), templ.size());
|
||||
result.create(buf.result_size, CV_32F);
|
||||
|
||||
Size& block_size = buf.block_size;
|
||||
Size& dft_size = buf.dft_size;
|
||||
|
||||
GpuMat& image_block = buf.image_block;
|
||||
GpuMat& templ_block = buf.templ_block;
|
||||
GpuMat& result_data = buf.result_data;
|
||||
|
||||
GpuMat& image_spect = buf.image_spect;
|
||||
GpuMat& templ_spect = buf.templ_spect;
|
||||
GpuMat& result_spect = buf.result_spect;
|
||||
|
||||
cufftHandle planR2C, planC2R;
|
||||
cufftSafeCall(cufftPlan2d(&planC2R, dft_size.height, dft_size.width, CUFFT_C2R));
|
||||
cufftSafeCall(cufftPlan2d(&planR2C, dft_size.height, dft_size.width, CUFFT_R2C));
|
||||
|
||||
cufftSafeCall( cufftSetStream(planR2C, StreamAccessor::getStream(stream)) );
|
||||
cufftSafeCall( cufftSetStream(planC2R, StreamAccessor::getStream(stream)) );
|
||||
|
||||
GpuMat templ_roi(templ.size(), CV_32F, templ.data, templ.step);
|
||||
gpu::copyMakeBorder(templ_roi, templ_block, 0, templ_block.rows - templ_roi.rows, 0,
|
||||
templ_block.cols - templ_roi.cols, 0, Scalar(), stream);
|
||||
|
||||
cufftSafeCall(cufftExecR2C(planR2C, templ_block.ptr<cufftReal>(),
|
||||
templ_spect.ptr<cufftComplex>()));
|
||||
|
||||
// Process all blocks of the result matrix
|
||||
for (int y = 0; y < result.rows; y += block_size.height)
|
||||
{
|
||||
for (int x = 0; x < result.cols; x += block_size.width)
|
||||
{
|
||||
Size image_roi_size(std::min(x + dft_size.width, image.cols) - x,
|
||||
std::min(y + dft_size.height, image.rows) - y);
|
||||
GpuMat image_roi(image_roi_size, CV_32F, (void*)(image.ptr<float>(y) + x),
|
||||
image.step);
|
||||
gpu::copyMakeBorder(image_roi, image_block, 0, image_block.rows - image_roi.rows,
|
||||
0, image_block.cols - image_roi.cols, 0, Scalar(), stream);
|
||||
|
||||
cufftSafeCall(cufftExecR2C(planR2C, image_block.ptr<cufftReal>(),
|
||||
image_spect.ptr<cufftComplex>()));
|
||||
gpu::mulAndScaleSpectrums(image_spect, templ_spect, result_spect, 0,
|
||||
1.f / dft_size.area(), ccorr, stream);
|
||||
cufftSafeCall(cufftExecC2R(planC2R, result_spect.ptr<cufftComplex>(),
|
||||
result_data.ptr<cufftReal>()));
|
||||
|
||||
Size result_roi_size(std::min(x + block_size.width, result.cols) - x,
|
||||
std::min(y + block_size.height, result.rows) - y);
|
||||
GpuMat result_roi(result_roi_size, result.type(),
|
||||
(void*)(result.ptr<float>(y) + x), result.step);
|
||||
GpuMat result_block(result_roi_size, result_data.type(),
|
||||
result_data.ptr(), result_data.step);
|
||||
|
||||
if (stream)
|
||||
stream.enqueueCopy(result_block, result_roi);
|
||||
else
|
||||
result_block.copyTo(result_roi);
|
||||
}
|
||||
}
|
||||
|
||||
cufftSafeCall(cufftDestroy(planR2C));
|
||||
cufftSafeCall(cufftDestroy(planC2R));
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
488
modules/gpuarithm/src/core.cpp
Normal file
488
modules/gpuarithm/src/core.cpp
Normal file
@ -0,0 +1,488 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
void cv::gpu::merge(const GpuMat* /*src*/, size_t /*count*/, GpuMat& /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
void cv::gpu::merge(const std::vector<GpuMat>& /*src*/, GpuMat& /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::split(const GpuMat& /*src*/, GpuMat* /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
void cv::gpu::split(const GpuMat& /*src*/, std::vector<GpuMat>& /*dst*/, Stream& /*stream*/) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::transpose(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::flip(const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::LUT(const GpuMat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, int, const Scalar&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// merge/split
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace split_merge
|
||||
{
|
||||
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst, int total_channels, size_t elem_size, const cudaStream_t& stream);
|
||||
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
namespace
|
||||
{
|
||||
void merge(const GpuMat* src, size_t n, GpuMat& dst, const cudaStream_t& stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::split_merge;
|
||||
|
||||
CV_Assert(src);
|
||||
CV_Assert(n > 0);
|
||||
|
||||
int depth = src[0].depth();
|
||||
Size size = src[0].size();
|
||||
|
||||
if (depth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
bool single_channel_only = true;
|
||||
int total_channels = 0;
|
||||
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
CV_Assert(src[i].size() == size);
|
||||
CV_Assert(src[i].depth() == depth);
|
||||
single_channel_only = single_channel_only && src[i].channels() == 1;
|
||||
total_channels += src[i].channels();
|
||||
}
|
||||
|
||||
CV_Assert(single_channel_only);
|
||||
CV_Assert(total_channels <= 4);
|
||||
|
||||
if (total_channels == 1)
|
||||
src[0].copyTo(dst);
|
||||
else
|
||||
{
|
||||
dst.create(size, CV_MAKETYPE(depth, total_channels));
|
||||
|
||||
PtrStepSzb src_as_devmem[4];
|
||||
for(size_t i = 0; i < n; ++i)
|
||||
src_as_devmem[i] = src[i];
|
||||
|
||||
PtrStepSzb dst_as_devmem(dst);
|
||||
merge_caller(src_as_devmem, dst_as_devmem, total_channels, CV_ELEM_SIZE(depth), stream);
|
||||
}
|
||||
}
|
||||
|
||||
void split(const GpuMat& src, GpuMat* dst, const cudaStream_t& stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::split_merge;
|
||||
|
||||
CV_Assert(dst);
|
||||
|
||||
int depth = src.depth();
|
||||
int num_channels = src.channels();
|
||||
|
||||
if (depth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
if (num_channels == 1)
|
||||
{
|
||||
src.copyTo(dst[0]);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
dst[i].create(src.size(), depth);
|
||||
|
||||
CV_Assert(num_channels <= 4);
|
||||
|
||||
PtrStepSzb dst_as_devmem[4];
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
dst_as_devmem[i] = dst[i];
|
||||
|
||||
PtrStepSzb src_as_devmem(src);
|
||||
split_caller(src_as_devmem, dst_as_devmem, num_channels, src.elemSize1(), stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
::merge(src, n, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
|
||||
void cv::gpu::merge(const std::vector<GpuMat>& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
::merge(&src[0], src.size(), dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::split(const GpuMat& src, GpuMat* dst, Stream& stream)
|
||||
{
|
||||
::split(src, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::split(const GpuMat& src, std::vector<GpuMat>& dst, Stream& stream)
|
||||
{
|
||||
dst.resize(src.channels());
|
||||
if(src.channels() > 0)
|
||||
::split(src, &dst[0], StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// transpose
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T> void transpose(PtrStepSz<T> src, PtrStepSz<T> dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
void cv::gpu::transpose(const GpuMat& src, GpuMat& dst, Stream& s)
|
||||
{
|
||||
CV_Assert( src.elemSize() == 1 || src.elemSize() == 4 || src.elemSize() == 8 );
|
||||
|
||||
dst.create( src.cols, src.rows, src.type() );
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
if (src.elemSize() == 1)
|
||||
{
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
nppSafeCall( nppiTranspose_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
else if (src.elemSize() == 4)
|
||||
{
|
||||
arithm::transpose<int>(src, dst, stream);
|
||||
}
|
||||
else // if (src.elemSize() == 8)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
|
||||
arithm::transpose<double>(src, dst, stream);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// flip
|
||||
|
||||
namespace
|
||||
{
|
||||
template<int DEPTH> struct NppTypeTraits;
|
||||
template<> struct NppTypeTraits<CV_8U> { typedef Npp8u npp_t; };
|
||||
template<> struct NppTypeTraits<CV_8S> { typedef Npp8s npp_t; };
|
||||
template<> struct NppTypeTraits<CV_16U> { typedef Npp16u npp_t; };
|
||||
template<> struct NppTypeTraits<CV_16S> { typedef Npp16s npp_t; };
|
||||
template<> struct NppTypeTraits<CV_32S> { typedef Npp32s npp_t; };
|
||||
template<> struct NppTypeTraits<CV_32F> { typedef Npp32f npp_t; };
|
||||
template<> struct NppTypeTraits<CV_64F> { typedef Npp64f npp_t; };
|
||||
|
||||
template <int DEPTH> struct NppMirrorFunc
|
||||
{
|
||||
typedef typename NppTypeTraits<DEPTH>::npp_t npp_t;
|
||||
|
||||
typedef NppStatus (*func_t)(const npp_t* pSrc, int nSrcStep, npp_t* pDst, int nDstStep, NppiSize oROI, NppiAxis flip);
|
||||
};
|
||||
|
||||
template <int DEPTH, typename NppMirrorFunc<DEPTH>::func_t func> struct NppMirror
|
||||
{
|
||||
typedef typename NppMirrorFunc<DEPTH>::npp_t npp_t;
|
||||
|
||||
static void call(const GpuMat& src, GpuMat& dst, int flipCode, cudaStream_t stream)
|
||||
{
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
nppSafeCall( func(src.ptr<npp_t>(), static_cast<int>(src.step),
|
||||
dst.ptr<npp_t>(), static_cast<int>(dst.step), sz,
|
||||
(flipCode == 0 ? NPP_HORIZONTAL_AXIS : (flipCode > 0 ? NPP_VERTICAL_AXIS : NPP_BOTH_AXIS))) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
void cv::gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode, Stream& stream)
|
||||
{
|
||||
typedef void (*func_t)(const GpuMat& src, GpuMat& dst, int flipCode, cudaStream_t stream);
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{NppMirror<CV_8U, nppiMirror_8u_C1R>::call, 0, NppMirror<CV_8U, nppiMirror_8u_C3R>::call, NppMirror<CV_8U, nppiMirror_8u_C4R>::call},
|
||||
{0,0,0,0},
|
||||
{NppMirror<CV_16U, nppiMirror_16u_C1R>::call, 0, NppMirror<CV_16U, nppiMirror_16u_C3R>::call, NppMirror<CV_16U, nppiMirror_16u_C4R>::call},
|
||||
{0,0,0,0},
|
||||
{NppMirror<CV_32S, nppiMirror_32s_C1R>::call, 0, NppMirror<CV_32S, nppiMirror_32s_C3R>::call, NppMirror<CV_32S, nppiMirror_32s_C4R>::call},
|
||||
{NppMirror<CV_32F, nppiMirror_32f_C1R>::call, 0, NppMirror<CV_32F, nppiMirror_32f_C3R>::call, NppMirror<CV_32F, nppiMirror_32f_C4R>::call}
|
||||
};
|
||||
|
||||
CV_Assert(src.depth() == CV_8U || src.depth() == CV_16U || src.depth() == CV_32S || src.depth() == CV_32F);
|
||||
CV_Assert(src.channels() == 1 || src.channels() == 3 || src.channels() == 4);
|
||||
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
funcs[src.depth()][src.channels() - 1](src, dst, flipCode, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// LUT
|
||||
|
||||
void cv::gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& s)
|
||||
{
|
||||
const int cn = src.channels();
|
||||
|
||||
CV_Assert( src.type() == CV_8UC1 || src.type() == CV_8UC3 );
|
||||
CV_Assert( lut.depth() == CV_8U );
|
||||
CV_Assert( lut.channels() == 1 || lut.channels() == cn );
|
||||
CV_Assert( lut.rows * lut.cols == 256 && lut.isContinuous() );
|
||||
|
||||
dst.create(src.size(), CV_MAKE_TYPE(lut.depth(), cn));
|
||||
|
||||
NppiSize sz;
|
||||
sz.height = src.rows;
|
||||
sz.width = src.cols;
|
||||
|
||||
Mat nppLut;
|
||||
lut.convertTo(nppLut, CV_32S);
|
||||
|
||||
int nValues3[] = {256, 256, 256};
|
||||
|
||||
Npp32s pLevels[256];
|
||||
for (int i = 0; i < 256; ++i)
|
||||
pLevels[i] = i;
|
||||
|
||||
const Npp32s* pLevels3[3];
|
||||
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
pLevels3[0] = pLevels3[1] = pLevels3[2] = pLevels;
|
||||
#else
|
||||
GpuMat d_pLevels;
|
||||
d_pLevels.upload(Mat(1, 256, CV_32S, pLevels));
|
||||
pLevels3[0] = pLevels3[1] = pLevels3[2] = d_pLevels.ptr<Npp32s>();
|
||||
#endif
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
if (src.type() == CV_8UC1)
|
||||
{
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
nppSafeCall( nppiLUT_Linear_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, nppLut.ptr<Npp32s>(), pLevels, 256) );
|
||||
#else
|
||||
GpuMat d_nppLut(Mat(1, 256, CV_32S, nppLut.data));
|
||||
nppSafeCall( nppiLUT_Linear_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, d_nppLut.ptr<Npp32s>(), d_pLevels.ptr<Npp32s>(), 256) );
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
const Npp32s* pValues3[3];
|
||||
|
||||
Mat nppLut3[3];
|
||||
if (nppLut.channels() == 1)
|
||||
{
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
pValues3[0] = pValues3[1] = pValues3[2] = nppLut.ptr<Npp32s>();
|
||||
#else
|
||||
GpuMat d_nppLut(Mat(1, 256, CV_32S, nppLut.data));
|
||||
pValues3[0] = pValues3[1] = pValues3[2] = d_nppLut.ptr<Npp32s>();
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::split(nppLut, nppLut3);
|
||||
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
pValues3[0] = nppLut3[0].ptr<Npp32s>();
|
||||
pValues3[1] = nppLut3[1].ptr<Npp32s>();
|
||||
pValues3[2] = nppLut3[2].ptr<Npp32s>();
|
||||
#else
|
||||
GpuMat d_nppLut0(Mat(1, 256, CV_32S, nppLut3[0].data));
|
||||
GpuMat d_nppLut1(Mat(1, 256, CV_32S, nppLut3[1].data));
|
||||
GpuMat d_nppLut2(Mat(1, 256, CV_32S, nppLut3[2].data));
|
||||
|
||||
pValues3[0] = d_nppLut0.ptr<Npp32s>();
|
||||
pValues3[1] = d_nppLut1.ptr<Npp32s>();
|
||||
pValues3[2] = d_nppLut2.ptr<Npp32s>();
|
||||
#endif
|
||||
}
|
||||
|
||||
nppSafeCall( nppiLUT_Linear_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, pValues3, pLevels3, nValues3) );
|
||||
}
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// copyMakeBorder
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const T* borderValue, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T, int cn> void copyMakeBorder_caller(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::imgproc;
|
||||
|
||||
Scalar_<T> val(saturate_cast<T>(value[0]), saturate_cast<T>(value[1]), saturate_cast<T>(value[2]), saturate_cast<T>(value[3]));
|
||||
|
||||
copyMakeBorder_gpu<T, cn>(src, dst, top, left, borderType, val.val, stream);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4
|
||||
typedef Npp32s __attribute__((__may_alias__)) Npp32s_a;
|
||||
#else
|
||||
typedef Npp32s Npp32s_a;
|
||||
#endif
|
||||
|
||||
void cv::gpu::copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value, Stream& s)
|
||||
{
|
||||
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
|
||||
CV_Assert(borderType == BORDER_REFLECT_101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
|
||||
|
||||
dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
if (borderType == BORDER_CONSTANT && (src.type() == CV_8UC1 || src.type() == CV_8UC4 || src.type() == CV_32SC1 || src.type() == CV_32FC1))
|
||||
{
|
||||
NppiSize srcsz;
|
||||
srcsz.width = src.cols;
|
||||
srcsz.height = src.rows;
|
||||
|
||||
NppiSize dstsz;
|
||||
dstsz.width = dst.cols;
|
||||
dstsz.height = dst.rows;
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
switch (src.type())
|
||||
{
|
||||
case CV_8UC1:
|
||||
{
|
||||
Npp8u nVal = saturate_cast<Npp8u>(value[0]);
|
||||
nppSafeCall( nppiCopyConstBorder_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_8UC4:
|
||||
{
|
||||
Npp8u nVal[] = {saturate_cast<Npp8u>(value[0]), saturate_cast<Npp8u>(value[1]), saturate_cast<Npp8u>(value[2]), saturate_cast<Npp8u>(value[3])};
|
||||
nppSafeCall( nppiCopyConstBorder_8u_C4R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_32SC1:
|
||||
{
|
||||
Npp32s nVal = saturate_cast<Npp32s>(value[0]);
|
||||
nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
case CV_32FC1:
|
||||
{
|
||||
Npp32f val = saturate_cast<Npp32f>(value[0]);
|
||||
Npp32s nVal = *(reinterpret_cast<Npp32s_a*>(&val));
|
||||
nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
|
||||
dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
else
|
||||
{
|
||||
typedef void (*caller_t)(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream);
|
||||
static const caller_t callers[6][4] =
|
||||
{
|
||||
{ copyMakeBorder_caller<uchar, 1> , copyMakeBorder_caller<uchar, 2> , copyMakeBorder_caller<uchar, 3> , copyMakeBorder_caller<uchar, 4>},
|
||||
{0/*copyMakeBorder_caller<schar, 1>*/, 0/*copyMakeBorder_caller<schar, 2>*/ , 0/*copyMakeBorder_caller<schar, 3>*/, 0/*copyMakeBorder_caller<schar, 4>*/},
|
||||
{ copyMakeBorder_caller<ushort, 1> , 0/*copyMakeBorder_caller<ushort, 2>*/, copyMakeBorder_caller<ushort, 3> , copyMakeBorder_caller<ushort, 4>},
|
||||
{ copyMakeBorder_caller<short, 1> , 0/*copyMakeBorder_caller<short, 2>*/ , copyMakeBorder_caller<short, 3> , copyMakeBorder_caller<short, 4>},
|
||||
{0/*copyMakeBorder_caller<int, 1>*/, 0/*copyMakeBorder_caller<int, 2>*/ , 0/*copyMakeBorder_caller<int, 3>*/, 0/*copyMakeBorder_caller<int , 4>*/},
|
||||
{ copyMakeBorder_caller<float, 1> , 0/*copyMakeBorder_caller<float, 2>*/ , copyMakeBorder_caller<float, 3> , copyMakeBorder_caller<float ,4>}
|
||||
};
|
||||
|
||||
caller_t func = callers[src.depth()][src.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
func(src, dst, top, left, borderType, value, stream);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
147
modules/gpuarithm/src/cuda/absdiff_mat.cu
Normal file
147
modules/gpuarithm/src/cuda/absdiff_mat.cu
Normal file
@ -0,0 +1,147 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct VAbsDiff4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vabsdiff4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VAbsDiff4() {}
|
||||
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4& other) {}
|
||||
};
|
||||
|
||||
struct VAbsDiff2 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vabsdiff2(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VAbsDiff2() {}
|
||||
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2& other) {}
|
||||
};
|
||||
|
||||
__device__ __forceinline__ int _abs(int a)
|
||||
{
|
||||
return ::abs(a);
|
||||
}
|
||||
__device__ __forceinline__ float _abs(float a)
|
||||
{
|
||||
return ::fabsf(a);
|
||||
}
|
||||
__device__ __forceinline__ double _abs(double a)
|
||||
{
|
||||
return ::fabs(a);
|
||||
}
|
||||
|
||||
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
|
||||
{
|
||||
__device__ __forceinline__ T operator ()(T a, T b) const
|
||||
{
|
||||
return saturate_cast<T>(_abs(a - b));
|
||||
}
|
||||
|
||||
__device__ __forceinline__ AbsDiffMat() {}
|
||||
__device__ __forceinline__ AbsDiffMat(const AbsDiffMat& other) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
98
modules/gpuarithm/src/cuda/absdiff_scalar.cu
Normal file
98
modules/gpuarithm/src/cuda/absdiff_scalar.cu
Normal file
@ -0,0 +1,98 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
|
||||
{
|
||||
S val;
|
||||
|
||||
explicit AbsDiffScalar(S val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ T operator ()(T a) const
|
||||
{
|
||||
abs_func<S> f;
|
||||
return saturate_cast<T>(f(a - val));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S>
|
||||
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
AbsDiffScalar<T, S> op(static_cast<S>(val));
|
||||
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
185
modules/gpuarithm/src/cuda/add_mat.cu
Normal file
185
modules/gpuarithm/src/cuda/add_mat.cu
Normal file
@ -0,0 +1,185 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct VAdd4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vadd4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VAdd4() {}
|
||||
__device__ __forceinline__ VAdd4(const VAdd4& other) {}
|
||||
};
|
||||
|
||||
struct VAdd2 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vadd2(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VAdd2() {}
|
||||
__device__ __forceinline__ VAdd2(const VAdd2& other) {}
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
|
||||
{
|
||||
__device__ __forceinline__ D operator ()(T a, T b) const
|
||||
{
|
||||
return saturate_cast<D>(a + b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ AddMat() {}
|
||||
__device__ __forceinline__ AddMat(const AddMat& other) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits< arithm::VAdd4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <> struct TransformFunctorTraits< arithm::VAdd2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct TransformFunctorTraits< arithm::AddMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void addMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VAdd4(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
void addMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VAdd2(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T, typename D>
|
||||
void addMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void addMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
template void addMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
148
modules/gpuarithm/src/cuda/add_scalar.cu
Normal file
148
modules/gpuarithm/src/cuda/add_scalar.cu
Normal file
@ -0,0 +1,148 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
|
||||
{
|
||||
S val;
|
||||
|
||||
explicit AddScalar(S val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T a) const
|
||||
{
|
||||
return saturate_cast<D>(a + val);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D>
|
||||
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
AddScalar<T, S, D> op(static_cast<S>(val));
|
||||
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
364
modules/gpuarithm/src/cuda/add_weighted.cu
Normal file
364
modules/gpuarithm/src/cuda/add_weighted.cu
Normal file
@ -0,0 +1,364 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T> struct UseDouble_
|
||||
{
|
||||
enum {value = 0};
|
||||
};
|
||||
template <> struct UseDouble_<double>
|
||||
{
|
||||
enum {value = 1};
|
||||
};
|
||||
template <typename T1, typename T2, typename D> struct UseDouble
|
||||
{
|
||||
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
|
||||
};
|
||||
|
||||
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
|
||||
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
|
||||
{
|
||||
float alpha;
|
||||
float beta;
|
||||
float gamma;
|
||||
|
||||
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
|
||||
{
|
||||
return saturate_cast<D>(a * alpha + b * beta + gamma);
|
||||
}
|
||||
};
|
||||
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
|
||||
{
|
||||
double alpha;
|
||||
double beta;
|
||||
double gamma;
|
||||
|
||||
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
|
||||
{
|
||||
return saturate_cast<D>(a * alpha + b * beta + gamma);
|
||||
}
|
||||
};
|
||||
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
|
||||
{
|
||||
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
|
||||
{
|
||||
};
|
||||
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T1, typename T2, typename D>
|
||||
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
|
||||
|
||||
cudev::transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
|
||||
|
||||
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
|
||||
|
||||
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
|
||||
|
||||
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
|
||||
|
||||
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
|
||||
|
||||
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
|
||||
|
||||
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
145
modules/gpuarithm/src/cuda/arithm_func_traits.hpp
Normal file
145
modules/gpuarithm/src/cuda/arithm_func_traits.hpp
Normal file
@ -0,0 +1,145 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __ARITHM_FUNC_TRAITS_HPP__
|
||||
#define __ARITHM_FUNC_TRAITS_HPP__
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <size_t src_size, size_t dst_size> struct ArithmFuncTraits
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 1 };
|
||||
};
|
||||
|
||||
template <> struct ArithmFuncTraits<1, 1>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
template <> struct ArithmFuncTraits<1, 2>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
template <> struct ArithmFuncTraits<1, 4>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
|
||||
template <> struct ArithmFuncTraits<2, 1>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
template <> struct ArithmFuncTraits<2, 2>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
template <> struct ArithmFuncTraits<2, 4>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
|
||||
template <> struct ArithmFuncTraits<4, 1>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
template <> struct ArithmFuncTraits<4, 2>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
template <> struct ArithmFuncTraits<4, 4>
|
||||
{
|
||||
enum { simple_block_dim_x = 32 };
|
||||
enum { simple_block_dim_y = 8 };
|
||||
|
||||
enum { smart_block_dim_x = 32 };
|
||||
enum { smart_block_dim_y = 8 };
|
||||
enum { smart_shift = 4 };
|
||||
};
|
||||
}
|
||||
|
||||
#endif // __ARITHM_FUNC_TRAITS_HPP__
|
126
modules/gpuarithm/src/cuda/bitwise_mat.cu
Normal file
126
modules/gpuarithm/src/cuda/bitwise_mat.cu
Normal file
@ -0,0 +1,126 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< bit_and<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< bit_or<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< bit_xor<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T> void bitMatNot(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void bitMatAnd(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void bitMatOr(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void bitMatXor(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void bitMatNot<uchar>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatNot<ushort>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatNot<uint>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
template void bitMatAnd<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatAnd<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatAnd<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
template void bitMatOr<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatOr<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatOr<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
template void bitMatXor<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatXor<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void bitMatXor<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
104
modules/gpuarithm/src/cuda/bitwise_scalar.cu
Normal file
104
modules/gpuarithm/src/cuda/bitwise_scalar.cu
Normal file
@ -0,0 +1,104 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< binder2nd< bit_or<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< binder2nd< bit_xor<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarAnd<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarAnd<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarAnd<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void bitScalarOr<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarOr<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarOr<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarOr<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void bitScalarXor<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarXor<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarXor<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void bitScalarXor<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
206
modules/gpuarithm/src/cuda/cmp_mat.cu
Normal file
206
modules/gpuarithm/src/cuda/cmp_mat.cu
Normal file
@ -0,0 +1,206 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct VCmpEq4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vcmpeq4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VCmpEq4() {}
|
||||
__device__ __forceinline__ VCmpEq4(const VCmpEq4& other) {}
|
||||
};
|
||||
struct VCmpNe4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vcmpne4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VCmpNe4() {}
|
||||
__device__ __forceinline__ VCmpNe4(const VCmpNe4& other) {}
|
||||
};
|
||||
struct VCmpLt4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vcmplt4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VCmpLt4() {}
|
||||
__device__ __forceinline__ VCmpLt4(const VCmpLt4& other) {}
|
||||
};
|
||||
struct VCmpLe4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vcmple4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VCmpLe4() {}
|
||||
__device__ __forceinline__ VCmpLe4(const VCmpLe4& other) {}
|
||||
};
|
||||
|
||||
template <class Op, typename T>
|
||||
struct Cmp : binary_function<T, T, uchar>
|
||||
{
|
||||
__device__ __forceinline__ uchar operator()(T a, T b) const
|
||||
{
|
||||
Op op;
|
||||
return -op(a, b);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits< arithm::VCmpEq4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
template <> struct TransformFunctorTraits< arithm::VCmpNe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
template <> struct TransformFunctorTraits< arithm::VCmpLt4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
template <> struct TransformFunctorTraits< arithm::VCmpLe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <class Op, typename T> struct TransformFunctorTraits< arithm::Cmp<Op, T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void cmpMatEq_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VCmpEq4(), WithOutMask(), stream);
|
||||
}
|
||||
void cmpMatNe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VCmpNe4(), WithOutMask(), stream);
|
||||
}
|
||||
void cmpMatLt_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VCmpLt4(), WithOutMask(), stream);
|
||||
}
|
||||
void cmpMatLe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VCmpLe4(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <template <typename> class Op, typename T>
|
||||
void cmpMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
Cmp<Op<T>, T> op;
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cmpMat<equal_to, T>(src1, src2, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cmpMat<not_equal_to, T>(src1, src2, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpMatLt(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cmpMat<less, T>(src1, src2, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpMatLe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cmpMat<less_equal, T>(src1, src2, dst, stream);
|
||||
}
|
||||
|
||||
template void cmpMatEq<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatEq<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatEq<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatEq<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatEq<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatEq<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatEq<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpMatNe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatNe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatNe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatNe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatNe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatNe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatNe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpMatLt<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLt<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLt<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLt<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLt<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLt<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLt<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpMatLe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpMatLe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
284
modules/gpuarithm/src/cuda/cmp_scalar.cu
Normal file
284
modules/gpuarithm/src/cuda/cmp_scalar.cu
Normal file
@ -0,0 +1,284 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <class Op, typename T>
|
||||
struct Cmp : binary_function<T, T, uchar>
|
||||
{
|
||||
__device__ __forceinline__ uchar operator()(T a, T b) const
|
||||
{
|
||||
Op op;
|
||||
return -op(a, b);
|
||||
}
|
||||
};
|
||||
|
||||
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
|
||||
|
||||
template <class Op, typename T, int cn> struct CmpScalar;
|
||||
template <class Op, typename T>
|
||||
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
|
||||
{
|
||||
T val;
|
||||
|
||||
__host__ explicit CmpScalar(T val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ uchar operator()(T src) const
|
||||
{
|
||||
Cmp<Op, T> op;
|
||||
return op(src, val);
|
||||
}
|
||||
};
|
||||
template <class Op, typename T>
|
||||
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
|
||||
{
|
||||
TYPE_VEC(T, 2) val;
|
||||
|
||||
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
|
||||
{
|
||||
Cmp<Op, T> op;
|
||||
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
|
||||
}
|
||||
};
|
||||
template <class Op, typename T>
|
||||
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
|
||||
{
|
||||
TYPE_VEC(T, 3) val;
|
||||
|
||||
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
|
||||
{
|
||||
Cmp<Op, T> op;
|
||||
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
|
||||
}
|
||||
};
|
||||
template <class Op, typename T>
|
||||
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
|
||||
{
|
||||
TYPE_VEC(T, 4) val;
|
||||
|
||||
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
|
||||
{
|
||||
Cmp<Op, T> op;
|
||||
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
|
||||
}
|
||||
};
|
||||
|
||||
#undef TYPE_VEC
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <template <typename> class Op, typename T, int cn>
|
||||
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef typename TypeVec<T, cn>::vec_type src_t;
|
||||
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
|
||||
|
||||
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
|
||||
src_t val1 = VecTraits<src_t>::make(sval);
|
||||
|
||||
CmpScalar<Op<T>, T, cn> op(val1);
|
||||
cudev::transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0,
|
||||
cmpScalar<equal_to, T, 1>,
|
||||
cmpScalar<equal_to, T, 2>,
|
||||
cmpScalar<equal_to, T, 3>,
|
||||
cmpScalar<equal_to, T, 4>
|
||||
};
|
||||
|
||||
funcs[cn](src, val, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0,
|
||||
cmpScalar<not_equal_to, T, 1>,
|
||||
cmpScalar<not_equal_to, T, 2>,
|
||||
cmpScalar<not_equal_to, T, 3>,
|
||||
cmpScalar<not_equal_to, T, 4>
|
||||
};
|
||||
|
||||
funcs[cn](src, val, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0,
|
||||
cmpScalar<less, T, 1>,
|
||||
cmpScalar<less, T, 2>,
|
||||
cmpScalar<less, T, 3>,
|
||||
cmpScalar<less, T, 4>
|
||||
};
|
||||
|
||||
funcs[cn](src, val, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0,
|
||||
cmpScalar<less_equal, T, 1>,
|
||||
cmpScalar<less_equal, T, 2>,
|
||||
cmpScalar<less_equal, T, 3>,
|
||||
cmpScalar<less_equal, T, 4>
|
||||
};
|
||||
|
||||
funcs[cn](src, val, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0,
|
||||
cmpScalar<greater, T, 1>,
|
||||
cmpScalar<greater, T, 2>,
|
||||
cmpScalar<greater, T, 3>,
|
||||
cmpScalar<greater, T, 4>
|
||||
};
|
||||
|
||||
funcs[cn](src, val, dst, stream);
|
||||
}
|
||||
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
0,
|
||||
cmpScalar<greater_equal, T, 1>,
|
||||
cmpScalar<greater_equal, T, 2>,
|
||||
cmpScalar<greater_equal, T, 3>,
|
||||
cmpScalar<greater_equal, T, 4>
|
||||
};
|
||||
|
||||
funcs[cn](src, val, dst, stream);
|
||||
}
|
||||
|
||||
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
@ -86,11 +86,11 @@ namespace cv { namespace gpu { namespace cudev
|
||||
|
||||
static const caller_t callers[5] =
|
||||
{
|
||||
CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdConstant, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdReflect, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdWrap, vec_type>::call
|
||||
CopyMakeBorderDispatcher<BrdWrap, vec_type>::call,
|
||||
CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call
|
||||
};
|
||||
|
||||
callers[borderMode](PtrStepSz<vec_type>(src), PtrStepSz<vec_type>(dst), top, left, borderValue, stream);
|
175
modules/gpuarithm/src/cuda/countnonzero.cu
Normal file
175
modules/gpuarithm/src/cuda/countnonzero.cu
Normal file
@ -0,0 +1,175 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
#include "opencv2/core/cuda/reduce.hpp"
|
||||
#include "opencv2/core/cuda/emulation.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace countNonZero
|
||||
{
|
||||
__device__ unsigned int blocks_finished = 0;
|
||||
|
||||
template <int BLOCK_SIZE, typename T>
|
||||
__global__ void kernel(const PtrStepSz<T> src, unsigned int* count, const int twidth, const int theight)
|
||||
{
|
||||
__shared__ unsigned int scount[BLOCK_SIZE];
|
||||
|
||||
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
|
||||
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
|
||||
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
unsigned int mycount = 0;
|
||||
|
||||
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
|
||||
{
|
||||
const T* ptr = src.ptr(y);
|
||||
|
||||
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
|
||||
{
|
||||
const T srcVal = ptr[x];
|
||||
|
||||
mycount += (srcVal != 0);
|
||||
}
|
||||
}
|
||||
|
||||
cudev::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
|
||||
|
||||
#if __CUDA_ARCH__ >= 200
|
||||
if (tid == 0)
|
||||
::atomicAdd(count, mycount);
|
||||
#else
|
||||
__shared__ bool is_last;
|
||||
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
count[bid] = mycount;
|
||||
|
||||
__threadfence();
|
||||
|
||||
unsigned int ticket = ::atomicInc(&blocks_finished, gridDim.x * gridDim.y);
|
||||
is_last = (ticket == gridDim.x * gridDim.y - 1);
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (is_last)
|
||||
{
|
||||
mycount = tid < gridDim.x * gridDim.y ? count[tid] : 0;
|
||||
|
||||
cudev::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
count[0] = mycount;
|
||||
|
||||
blocks_finished = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
const int threads_x = 32;
|
||||
const int threads_y = 8;
|
||||
|
||||
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
|
||||
{
|
||||
block = dim3(threads_x, threads_y);
|
||||
|
||||
grid = dim3(divUp(cols, block.x * block.y),
|
||||
divUp(rows, block.y * block.x));
|
||||
|
||||
grid.x = ::min(grid.x, block.x);
|
||||
grid.y = ::min(grid.y, block.y);
|
||||
}
|
||||
|
||||
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
|
||||
{
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(cols, rows, block, grid);
|
||||
|
||||
bufcols = grid.x * grid.y * sizeof(int);
|
||||
bufrows = 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
int run(const PtrStepSzb src, PtrStep<unsigned int> buf)
|
||||
{
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(src.cols, src.rows, block, grid);
|
||||
|
||||
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
|
||||
const int theight = divUp(divUp(src.rows, grid.y), block.y);
|
||||
|
||||
unsigned int* count_buf = buf.ptr(0);
|
||||
|
||||
cudaSafeCall( cudaMemset(count_buf, 0, sizeof(unsigned int)) );
|
||||
|
||||
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, count_buf, twidth, theight);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
unsigned int count;
|
||||
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
template int run<uchar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
|
||||
template int run<schar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
|
||||
template int run<ushort>(const PtrStepSzb src, PtrStep<unsigned int> buf);
|
||||
template int run<short >(const PtrStepSzb src, PtrStep<unsigned int> buf);
|
||||
template int run<int >(const PtrStepSzb src, PtrStep<unsigned int> buf);
|
||||
template int run<float >(const PtrStepSzb src, PtrStep<unsigned int> buf);
|
||||
template int run<double>(const PtrStepSzb src, PtrStep<unsigned int> buf);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
144
modules/gpuarithm/src/cuda/div_inv.cu
Normal file
144
modules/gpuarithm/src/cuda/div_inv.cu
Normal file
@ -0,0 +1,144 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D> struct DivInv : unary_function<T, D>
|
||||
{
|
||||
S val;
|
||||
|
||||
explicit DivInv(S val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T a) const
|
||||
{
|
||||
return a != 0 ? saturate_cast<D>(val / a) : 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D>
|
||||
void divInv(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
DivInv<T, S, D> op(static_cast<S>(val));
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void divInv<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void divInv<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divInv<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divInv<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divInv<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divInv<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divInv<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divInv<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divInv<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
230
modules/gpuarithm/src/cuda/div_mat.cu
Normal file
230
modules/gpuarithm/src/cuda/div_mat.cu
Normal file
@ -0,0 +1,230 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct Div_8uc4_32f : binary_function<uint, float, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, float b) const
|
||||
{
|
||||
uint res = 0;
|
||||
|
||||
if (b != 0)
|
||||
{
|
||||
b = 1.0f / b;
|
||||
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
|
||||
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
|
||||
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
|
||||
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
struct Div_16sc4_32f : binary_function<short4, float, short4>
|
||||
{
|
||||
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
|
||||
{
|
||||
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
|
||||
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
|
||||
: make_short4(0,0,0,0);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct Div : binary_function<T, T, D>
|
||||
{
|
||||
__device__ __forceinline__ D operator ()(T a, T b) const
|
||||
{
|
||||
return b != 0 ? saturate_cast<D>(a / b) : 0;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Div() {}
|
||||
__device__ __forceinline__ Div(const Div& other) {}
|
||||
};
|
||||
template <typename T> struct Div<T, float> : binary_function<T, T, float>
|
||||
{
|
||||
__device__ __forceinline__ float operator ()(T a, T b) const
|
||||
{
|
||||
return b != 0 ? static_cast<float>(a) / b : 0;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Div() {}
|
||||
__device__ __forceinline__ Div(const Div& other) {}
|
||||
};
|
||||
template <typename T> struct Div<T, double> : binary_function<T, T, double>
|
||||
{
|
||||
__device__ __forceinline__ double operator ()(T a, T b) const
|
||||
{
|
||||
return b != 0 ? static_cast<double>(a) / b : 0;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Div() {}
|
||||
__device__ __forceinline__ Div(const Div& other) {}
|
||||
};
|
||||
|
||||
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
|
||||
{
|
||||
S scale;
|
||||
|
||||
explicit DivScale(S scale_) : scale(scale_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T a, T b) const
|
||||
{
|
||||
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits<arithm::Div_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct TransformFunctorTraits< arithm::Div<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void divMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, Div_8uc4_32f(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
void divMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, Div_16sc4_32f(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T, typename S, typename D>
|
||||
void divMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
|
||||
{
|
||||
if (scale == 1)
|
||||
{
|
||||
Div<T, D> op;
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
DivScale<T, S, D> op(static_cast<S>(scale));
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void divMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
template void divMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void divMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void divMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void divMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void divMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void divMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void divMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void divMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
144
modules/gpuarithm/src/cuda/div_scalar.cu
Normal file
144
modules/gpuarithm/src/cuda/div_scalar.cu
Normal file
@ -0,0 +1,144 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D> struct DivScalar : unary_function<T, D>
|
||||
{
|
||||
S val;
|
||||
|
||||
explicit DivScalar(S val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T a) const
|
||||
{
|
||||
return saturate_cast<D>(a / val);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D>
|
||||
void divScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
DivScalar<T, S, D> op(static_cast<S>(val));
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void divScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
302
modules/gpuarithm/src/cuda/math.cu
Normal file
302
modules/gpuarithm/src/cuda/math.cu
Normal file
@ -0,0 +1,302 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
#include "opencv2/core/cuda/limits.hpp"
|
||||
#include "opencv2/core/cuda/type_traits.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// absMat
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< abs_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T>
|
||||
void absMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, abs_func<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void absMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void absMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// sqrMat
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T> struct Sqr : unary_function<T, T>
|
||||
{
|
||||
__device__ __forceinline__ T operator ()(T x) const
|
||||
{
|
||||
return saturate_cast<T>(x * x);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Sqr() {}
|
||||
__device__ __forceinline__ Sqr(const Sqr& other) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< arithm::Sqr<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T>
|
||||
void sqrMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Sqr<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void sqrMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// sqrtMat
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< sqrt_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T>
|
||||
void sqrtMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, sqrt_func<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void sqrtMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrtMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrtMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrtMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrtMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrtMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void sqrtMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// logMat
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< log_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T>
|
||||
void logMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, log_func<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void logMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void logMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void logMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void logMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void logMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void logMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void logMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// expMat
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T> struct Exp : unary_function<T, T>
|
||||
{
|
||||
__device__ __forceinline__ T operator ()(T x) const
|
||||
{
|
||||
exp_func<T> f;
|
||||
return saturate_cast<T>(f(x));
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Exp() {}
|
||||
__device__ __forceinline__ Exp(const Exp& other) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< arithm::Exp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T>
|
||||
void expMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Exp<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void expMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void expMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void expMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void expMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void expMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void expMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void expMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// pow
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template<typename T, bool Signed = numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
|
||||
{
|
||||
float power;
|
||||
|
||||
PowOp(double power_) : power(static_cast<float>(power_)) {}
|
||||
|
||||
__device__ __forceinline__ T operator()(T e) const
|
||||
{
|
||||
return saturate_cast<T>(__powf((float)e, power));
|
||||
}
|
||||
};
|
||||
template<typename T> struct PowOp<T, true> : unary_function<T, T>
|
||||
{
|
||||
float power;
|
||||
|
||||
PowOp(double power_) : power(static_cast<float>(power_)) {}
|
||||
|
||||
__device__ __forceinline__ T operator()(T e) const
|
||||
{
|
||||
T res = saturate_cast<T>(__powf((float)e, power));
|
||||
|
||||
if ((e < 0) && (1 & static_cast<int>(power)))
|
||||
res *= -1;
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
template<> struct PowOp<float> : unary_function<float, float>
|
||||
{
|
||||
float power;
|
||||
|
||||
PowOp(double power_) : power(static_cast<float>(power_)) {}
|
||||
|
||||
__device__ __forceinline__ float operator()(float e) const
|
||||
{
|
||||
return __powf(::fabs(e), power);
|
||||
}
|
||||
};
|
||||
template<> struct PowOp<double> : unary_function<double, double>
|
||||
{
|
||||
double power;
|
||||
|
||||
PowOp(double power_) : power(power_) {}
|
||||
|
||||
__device__ __forceinline__ double operator()(double e) const
|
||||
{
|
||||
return ::pow(::fabs(e), power);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< arithm::PowOp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template<typename T>
|
||||
void pow(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, PowOp<T>(power), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void pow<uchar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void pow<schar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void pow<short>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void pow<ushort>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void pow<int>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void pow<float>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void pow<double>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
246
modules/gpuarithm/src/cuda/minmax.cu
Normal file
246
modules/gpuarithm/src/cuda/minmax.cu
Normal file
@ -0,0 +1,246 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
#include "opencv2/core/cuda/reduce.hpp"
|
||||
#include "opencv2/core/cuda/emulation.hpp"
|
||||
#include "opencv2/core/cuda/limits.hpp"
|
||||
#include "opencv2/core/cuda/utility.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace minMax
|
||||
{
|
||||
__device__ unsigned int blocks_finished = 0;
|
||||
|
||||
// To avoid shared bank conflicts we convert each value into value of
|
||||
// appropriate type (32 bits minimum)
|
||||
template <typename T> struct MinMaxTypeTraits;
|
||||
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<schar> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
|
||||
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
|
||||
|
||||
template <int BLOCK_SIZE, typename R>
|
||||
struct GlobalReduce
|
||||
{
|
||||
static __device__ void run(R& mymin, R& mymax, R* minval, R* maxval, int tid, int bid, R* sminval, R* smaxval)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 200
|
||||
if (tid == 0)
|
||||
{
|
||||
Emulation::glob::atomicMin(minval, mymin);
|
||||
Emulation::glob::atomicMax(maxval, mymax);
|
||||
}
|
||||
#else
|
||||
__shared__ bool is_last;
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
minval[bid] = mymin;
|
||||
maxval[bid] = mymax;
|
||||
|
||||
__threadfence();
|
||||
|
||||
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
|
||||
is_last = (ticket == gridDim.x * gridDim.y - 1);
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (is_last)
|
||||
{
|
||||
int idx = ::min(tid, gridDim.x * gridDim.y - 1);
|
||||
|
||||
mymin = minval[idx];
|
||||
mymax = maxval[idx];
|
||||
|
||||
const minimum<R> minOp;
|
||||
const maximum<R> maxOp;
|
||||
cudev::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
minval[0] = mymin;
|
||||
maxval[0] = mymax;
|
||||
|
||||
blocks_finished = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template <int BLOCK_SIZE, typename T, typename R, class Mask>
|
||||
__global__ void kernel(const PtrStepSz<T> src, const Mask mask, R* minval, R* maxval, const int twidth, const int theight)
|
||||
{
|
||||
__shared__ R sminval[BLOCK_SIZE];
|
||||
__shared__ R smaxval[BLOCK_SIZE];
|
||||
|
||||
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
|
||||
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
|
||||
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
|
||||
|
||||
R mymin = numeric_limits<R>::max();
|
||||
R mymax = -numeric_limits<R>::max();
|
||||
|
||||
const minimum<R> minOp;
|
||||
const maximum<R> maxOp;
|
||||
|
||||
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
|
||||
{
|
||||
const T* ptr = src.ptr(y);
|
||||
|
||||
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
|
||||
{
|
||||
if (mask(y, x))
|
||||
{
|
||||
const R srcVal = ptr[x];
|
||||
|
||||
mymin = minOp(mymin, srcVal);
|
||||
mymax = maxOp(mymax, srcVal);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cudev::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
|
||||
|
||||
GlobalReduce<BLOCK_SIZE, R>::run(mymin, mymax, minval, maxval, tid, bid, sminval, smaxval);
|
||||
}
|
||||
|
||||
const int threads_x = 32;
|
||||
const int threads_y = 8;
|
||||
|
||||
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
|
||||
{
|
||||
block = dim3(threads_x, threads_y);
|
||||
|
||||
grid = dim3(divUp(cols, block.x * block.y),
|
||||
divUp(rows, block.y * block.x));
|
||||
|
||||
grid.x = ::min(grid.x, block.x);
|
||||
grid.y = ::min(grid.y, block.y);
|
||||
}
|
||||
|
||||
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
|
||||
{
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(cols, rows, block, grid);
|
||||
|
||||
bufcols = grid.x * grid.y * sizeof(double);
|
||||
bufrows = 2;
|
||||
}
|
||||
|
||||
__global__ void setDefaultKernel(int* minval_buf, int* maxval_buf)
|
||||
{
|
||||
*minval_buf = numeric_limits<int>::max();
|
||||
*maxval_buf = numeric_limits<int>::min();
|
||||
}
|
||||
__global__ void setDefaultKernel(float* minval_buf, float* maxval_buf)
|
||||
{
|
||||
*minval_buf = numeric_limits<float>::max();
|
||||
*maxval_buf = -numeric_limits<float>::max();
|
||||
}
|
||||
__global__ void setDefaultKernel(double* minval_buf, double* maxval_buf)
|
||||
{
|
||||
*minval_buf = numeric_limits<double>::max();
|
||||
*maxval_buf = -numeric_limits<double>::max();
|
||||
}
|
||||
|
||||
template <typename R>
|
||||
void setDefault(R* minval_buf, R* maxval_buf)
|
||||
{
|
||||
setDefaultKernel<<<1, 1>>>(minval_buf, maxval_buf);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
|
||||
{
|
||||
typedef typename MinMaxTypeTraits<T>::best_type R;
|
||||
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(src.cols, src.rows, block, grid);
|
||||
|
||||
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
|
||||
const int theight = divUp(divUp(src.rows, grid.y), block.y);
|
||||
|
||||
R* minval_buf = (R*) buf.ptr(0);
|
||||
R* maxval_buf = (R*) buf.ptr(1);
|
||||
|
||||
setDefault(minval_buf, maxval_buf);
|
||||
|
||||
if (mask.data)
|
||||
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, twidth, theight);
|
||||
else
|
||||
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, twidth, theight);
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
R minval_, maxval_;
|
||||
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(R), cudaMemcpyDeviceToHost) );
|
||||
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(R), cudaMemcpyDeviceToHost) );
|
||||
*minval = minval_;
|
||||
*maxval = maxval_;
|
||||
}
|
||||
|
||||
template void run<uchar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
|
||||
template void run<schar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
|
||||
template void run<ushort>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
|
||||
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
|
||||
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
|
||||
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
|
||||
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
228
modules/gpuarithm/src/cuda/minmax_mat.cu
Normal file
228
modules/gpuarithm/src/cuda/minmax_mat.cu
Normal file
@ -0,0 +1,228 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// min
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct VMin4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vmin4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VMin4() {}
|
||||
__device__ __forceinline__ VMin4(const VMin4& other) {}
|
||||
};
|
||||
|
||||
struct VMin2 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vmin2(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VMin2() {}
|
||||
__device__ __forceinline__ VMin2(const VMin2& other) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// max
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct VMax4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vmax4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VMax4() {}
|
||||
__device__ __forceinline__ VMax4(const VMax4& other) {}
|
||||
};
|
||||
|
||||
struct VMax2 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vmax2(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VMax2() {}
|
||||
__device__ __forceinline__ VMax2(const VMax2& other) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
235
modules/gpuarithm/src/cuda/minmaxloc.cu
Normal file
235
modules/gpuarithm/src/cuda/minmaxloc.cu
Normal file
@ -0,0 +1,235 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
#include "opencv2/core/cuda/reduce.hpp"
|
||||
#include "opencv2/core/cuda/emulation.hpp"
|
||||
#include "opencv2/core/cuda/limits.hpp"
|
||||
#include "opencv2/core/cuda/utility.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace minMaxLoc
|
||||
{
|
||||
// To avoid shared bank conflicts we convert each value into value of
|
||||
// appropriate type (32 bits minimum)
|
||||
template <typename T> struct MinMaxTypeTraits;
|
||||
template <> struct MinMaxTypeTraits<unsigned char> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<signed char> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<unsigned short> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
|
||||
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
|
||||
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
|
||||
|
||||
template <int BLOCK_SIZE, typename T, class Mask>
|
||||
__global__ void kernel_pass_1(const PtrStepSz<T> src, const Mask mask, T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, const int twidth, const int theight)
|
||||
{
|
||||
typedef typename MinMaxTypeTraits<T>::best_type work_type;
|
||||
|
||||
__shared__ work_type sminval[BLOCK_SIZE];
|
||||
__shared__ work_type smaxval[BLOCK_SIZE];
|
||||
__shared__ unsigned int sminloc[BLOCK_SIZE];
|
||||
__shared__ unsigned int smaxloc[BLOCK_SIZE];
|
||||
|
||||
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
|
||||
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
|
||||
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
|
||||
|
||||
work_type mymin = numeric_limits<work_type>::max();
|
||||
work_type mymax = -numeric_limits<work_type>::max();
|
||||
unsigned int myminloc = 0;
|
||||
unsigned int mymaxloc = 0;
|
||||
|
||||
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
|
||||
{
|
||||
const T* ptr = src.ptr(y);
|
||||
|
||||
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
|
||||
{
|
||||
if (mask(y, x))
|
||||
{
|
||||
const work_type srcVal = ptr[x];
|
||||
|
||||
if (srcVal < mymin)
|
||||
{
|
||||
mymin = srcVal;
|
||||
myminloc = y * src.cols + x;
|
||||
}
|
||||
|
||||
if (srcVal > mymax)
|
||||
{
|
||||
mymax = srcVal;
|
||||
mymaxloc = y * src.cols + x;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
|
||||
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
|
||||
tid,
|
||||
thrust::make_tuple(less<work_type>(), greater<work_type>()));
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
minval[bid] = (T) mymin;
|
||||
maxval[bid] = (T) mymax;
|
||||
minloc[bid] = myminloc;
|
||||
maxloc[bid] = mymaxloc;
|
||||
}
|
||||
}
|
||||
template <int BLOCK_SIZE, typename T>
|
||||
__global__ void kernel_pass_2(T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, int count)
|
||||
{
|
||||
typedef typename MinMaxTypeTraits<T>::best_type work_type;
|
||||
|
||||
__shared__ work_type sminval[BLOCK_SIZE];
|
||||
__shared__ work_type smaxval[BLOCK_SIZE];
|
||||
__shared__ unsigned int sminloc[BLOCK_SIZE];
|
||||
__shared__ unsigned int smaxloc[BLOCK_SIZE];
|
||||
|
||||
unsigned int idx = ::min(threadIdx.x, count - 1);
|
||||
|
||||
work_type mymin = minval[idx];
|
||||
work_type mymax = maxval[idx];
|
||||
unsigned int myminloc = minloc[idx];
|
||||
unsigned int mymaxloc = maxloc[idx];
|
||||
|
||||
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
|
||||
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
|
||||
threadIdx.x,
|
||||
thrust::make_tuple(less<work_type>(), greater<work_type>()));
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
{
|
||||
minval[0] = (T) mymin;
|
||||
maxval[0] = (T) mymax;
|
||||
minloc[0] = myminloc;
|
||||
maxloc[0] = mymaxloc;
|
||||
}
|
||||
}
|
||||
|
||||
const int threads_x = 32;
|
||||
const int threads_y = 8;
|
||||
|
||||
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
|
||||
{
|
||||
block = dim3(threads_x, threads_y);
|
||||
|
||||
grid = dim3(divUp(cols, block.x * block.y),
|
||||
divUp(rows, block.y * block.x));
|
||||
|
||||
grid.x = ::min(grid.x, block.x);
|
||||
grid.y = ::min(grid.y, block.y);
|
||||
}
|
||||
|
||||
void getBufSize(int cols, int rows, size_t elem_size, int& b1cols, int& b1rows, int& b2cols, int& b2rows)
|
||||
{
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(cols, rows, block, grid);
|
||||
|
||||
// For values
|
||||
b1cols = (int)(grid.x * grid.y * elem_size);
|
||||
b1rows = 2;
|
||||
|
||||
// For locations
|
||||
b2cols = grid.x * grid.y * sizeof(int);
|
||||
b2rows = 2;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf)
|
||||
{
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(src.cols, src.rows, block, grid);
|
||||
|
||||
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
|
||||
const int theight = divUp(divUp(src.rows, grid.y), block.y);
|
||||
|
||||
T* minval_buf = (T*) valbuf.ptr(0);
|
||||
T* maxval_buf = (T*) valbuf.ptr(1);
|
||||
unsigned int* minloc_buf = locbuf.ptr(0);
|
||||
unsigned int* maxloc_buf = locbuf.ptr(1);
|
||||
|
||||
if (mask.data)
|
||||
kernel_pass_1<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
|
||||
else
|
||||
kernel_pass_1<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
kernel_pass_2<threads_x * threads_y><<<1, threads_x * threads_y>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
T minval_, maxval_;
|
||||
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
|
||||
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
|
||||
*minval = minval_;
|
||||
*maxval = maxval_;
|
||||
|
||||
unsigned int minloc_, maxloc_;
|
||||
cudaSafeCall( cudaMemcpy(&minloc_, minloc_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
|
||||
cudaSafeCall( cudaMemcpy(&maxloc_, maxloc_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
|
||||
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
|
||||
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
|
||||
}
|
||||
|
||||
template void run<unsigned char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
|
||||
template void run<signed char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
|
||||
template void run<unsigned short>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
|
||||
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
|
||||
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
|
||||
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
|
||||
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
211
modules/gpuarithm/src/cuda/mul_mat.cu
Normal file
211
modules/gpuarithm/src/cuda/mul_mat.cu
Normal file
@ -0,0 +1,211 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct Mul_8uc4_32f : binary_function<uint, float, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, float b) const
|
||||
{
|
||||
uint res = 0;
|
||||
|
||||
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
|
||||
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
|
||||
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
|
||||
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Mul_8uc4_32f() {}
|
||||
__device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f& other) {}
|
||||
};
|
||||
|
||||
struct Mul_16sc4_32f : binary_function<short4, float, short4>
|
||||
{
|
||||
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
|
||||
{
|
||||
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
|
||||
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Mul_16sc4_32f() {}
|
||||
__device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f& other) {}
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct Mul : binary_function<T, T, D>
|
||||
{
|
||||
__device__ __forceinline__ D operator ()(T a, T b) const
|
||||
{
|
||||
return saturate_cast<D>(a * b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Mul() {}
|
||||
__device__ __forceinline__ Mul(const Mul& other) {}
|
||||
};
|
||||
|
||||
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
|
||||
{
|
||||
S scale;
|
||||
|
||||
explicit MulScale(S scale_) : scale(scale_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T a, T b) const
|
||||
{
|
||||
return saturate_cast<D>(scale * a * b);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T, typename S, typename D>
|
||||
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
|
||||
{
|
||||
if (scale == 1)
|
||||
{
|
||||
Mul<T, D> op;
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
MulScale<T, S, D> op(static_cast<S>(scale));
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
}
|
||||
|
||||
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
|
||||
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
144
modules/gpuarithm/src/cuda/mul_scalar.cu
Normal file
144
modules/gpuarithm/src/cuda/mul_scalar.cu
Normal file
@ -0,0 +1,144 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
|
||||
{
|
||||
S val;
|
||||
|
||||
explicit MulScalar(S val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T a) const
|
||||
{
|
||||
return saturate_cast<D>(a * val);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D>
|
||||
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
|
||||
{
|
||||
MulScalar<T, S, D> op(static_cast<S>(val));
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
|
||||
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
171
modules/gpuarithm/src/cuda/mul_spectrums.cu
Normal file
171
modules/gpuarithm/src/cuda/mul_spectrums.cu
Normal file
@ -0,0 +1,171 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "cvconfig.h"
|
||||
|
||||
#ifdef HAVE_CUFFT
|
||||
|
||||
#include <cufft.h>
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// mulSpectrums
|
||||
|
||||
__global__ void mulSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x < c.cols && y < c.rows)
|
||||
{
|
||||
c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream)
|
||||
{
|
||||
dim3 threads(256);
|
||||
dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
|
||||
|
||||
mulSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, c);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// mulSpectrums_CONJ
|
||||
|
||||
__global__ void mulSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x < c.cols && y < c.rows)
|
||||
{
|
||||
c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x]));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream)
|
||||
{
|
||||
dim3 threads(256);
|
||||
dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
|
||||
|
||||
mulSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, c);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// mulAndScaleSpectrums
|
||||
|
||||
__global__ void mulAndScaleSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x < c.cols && y < c.rows)
|
||||
{
|
||||
cufftComplex v = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]);
|
||||
c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream)
|
||||
{
|
||||
dim3 threads(256);
|
||||
dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
|
||||
|
||||
mulAndScaleSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, scale, c);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// mulAndScaleSpectrums_CONJ
|
||||
|
||||
__global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x < c.cols && y < c.rows)
|
||||
{
|
||||
cufftComplex v = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x]));
|
||||
c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream)
|
||||
{
|
||||
dim3 threads(256);
|
||||
dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
|
||||
|
||||
mulAndScaleSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, scale, c);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
}}} // namespace cv { namespace gpu { namespace cudev
|
||||
|
||||
#endif // HAVE_CUFFT
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
330
modules/gpuarithm/src/cuda/reduce.cu
Normal file
330
modules/gpuarithm/src/cuda/reduce.cu
Normal file
@ -0,0 +1,330 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
#include "opencv2/core/cuda/reduce.hpp"
|
||||
#include "opencv2/core/cuda/limits.hpp"
|
||||
|
||||
#include "unroll_detail.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace reduce
|
||||
{
|
||||
struct Sum
|
||||
{
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T startValue() const
|
||||
{
|
||||
return VecTraits<T>::all(0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T operator ()(T a, T b) const
|
||||
{
|
||||
return a + b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T result(T r, double) const
|
||||
{
|
||||
return r;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Sum() {}
|
||||
__device__ __forceinline__ Sum(const Sum&) {}
|
||||
};
|
||||
|
||||
struct Avg
|
||||
{
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T startValue() const
|
||||
{
|
||||
return VecTraits<T>::all(0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T operator ()(T a, T b) const
|
||||
{
|
||||
return a + b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
|
||||
{
|
||||
return r / sz;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Avg() {}
|
||||
__device__ __forceinline__ Avg(const Avg&) {}
|
||||
};
|
||||
|
||||
struct Min
|
||||
{
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T startValue() const
|
||||
{
|
||||
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T operator ()(T a, T b) const
|
||||
{
|
||||
minimum<T> minOp;
|
||||
return minOp(a, b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T result(T r, double) const
|
||||
{
|
||||
return r;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Min() {}
|
||||
__device__ __forceinline__ Min(const Min&) {}
|
||||
};
|
||||
|
||||
struct Max
|
||||
{
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T startValue() const
|
||||
{
|
||||
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T operator ()(T a, T b) const
|
||||
{
|
||||
maximum<T> maxOp;
|
||||
return maxOp(a, b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ __forceinline__ T result(T r, double) const
|
||||
{
|
||||
return r;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ Max() {}
|
||||
__device__ __forceinline__ Max(const Max&) {}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
template <typename T, typename S, typename D, class Op>
|
||||
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
|
||||
{
|
||||
__shared__ S smem[16 * 16];
|
||||
|
||||
const int x = blockIdx.x * 16 + threadIdx.x;
|
||||
|
||||
S myVal = op.template startValue<S>();
|
||||
|
||||
if (x < src.cols)
|
||||
{
|
||||
for (int y = threadIdx.y; y < src.rows; y += 16)
|
||||
{
|
||||
S srcVal = src(y, x);
|
||||
myVal = op(myVal, srcVal);
|
||||
}
|
||||
}
|
||||
|
||||
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
|
||||
|
||||
__syncthreads();
|
||||
|
||||
volatile S* srow = smem + threadIdx.y * 16;
|
||||
|
||||
myVal = srow[threadIdx.x];
|
||||
cudev::reduce<16>(srow, myVal, threadIdx.x, op);
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
srow[0] = myVal;
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (threadIdx.y == 0 && x < src.cols)
|
||||
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
|
||||
}
|
||||
|
||||
template <typename T, typename S, typename D, class Op>
|
||||
void rowsCaller(PtrStepSz<T> src, D* dst, cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(16, 16);
|
||||
const dim3 grid(divUp(src.cols, block.x));
|
||||
|
||||
Op op;
|
||||
rowsKernel<T, S, D, Op><<<grid, block, 0, stream>>>(src, dst, op);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <typename T, typename S, typename D>
|
||||
void rows(PtrStepSzb src, void* dst, int op, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSz<T> src, D* dst, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
rowsCaller<T, S, D, Sum>,
|
||||
rowsCaller<T, S, D, Avg>,
|
||||
rowsCaller<T, S, D, Max>,
|
||||
rowsCaller<T, S, D, Min>
|
||||
};
|
||||
|
||||
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
|
||||
}
|
||||
|
||||
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
|
||||
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
|
||||
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
|
||||
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
|
||||
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
|
||||
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
|
||||
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
|
||||
{
|
||||
typedef typename TypeVec<T, cn>::vec_type src_type;
|
||||
typedef typename TypeVec<S, cn>::vec_type work_type;
|
||||
typedef typename TypeVec<D, cn>::vec_type dst_type;
|
||||
|
||||
__shared__ S smem[BLOCK_SIZE * cn];
|
||||
|
||||
const int y = blockIdx.x;
|
||||
|
||||
const src_type* srcRow = src.ptr(y);
|
||||
|
||||
work_type myVal = op.template startValue<work_type>();
|
||||
|
||||
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
|
||||
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
|
||||
|
||||
cudev::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
|
||||
}
|
||||
|
||||
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, cudaStream_t stream)
|
||||
{
|
||||
const int BLOCK_SIZE = 256;
|
||||
|
||||
const dim3 block(BLOCK_SIZE);
|
||||
const dim3 grid(src.rows);
|
||||
|
||||
Op op;
|
||||
colsKernel<BLOCK_SIZE, T, S, D, cn, Op><<<grid, block, 0, stream>>>((PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
}
|
||||
|
||||
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, void* dst, cudaStream_t stream);
|
||||
static const func_t funcs[5][4] =
|
||||
{
|
||||
{0,0,0,0},
|
||||
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
|
||||
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
|
||||
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
|
||||
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
|
||||
};
|
||||
|
||||
funcs[cn][op](src, dst, stream);
|
||||
}
|
||||
|
||||
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
|
||||
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
|
||||
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
|
||||
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
|
||||
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
|
||||
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
185
modules/gpuarithm/src/cuda/sub_mat.cu
Normal file
185
modules/gpuarithm/src/cuda/sub_mat.cu
Normal file
@ -0,0 +1,185 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
struct VSub4 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vsub4(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VSub4() {}
|
||||
__device__ __forceinline__ VSub4(const VSub4& other) {}
|
||||
};
|
||||
|
||||
struct VSub2 : binary_function<uint, uint, uint>
|
||||
{
|
||||
__device__ __forceinline__ uint operator ()(uint a, uint b) const
|
||||
{
|
||||
return vsub2(a, b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ VSub2() {}
|
||||
__device__ __forceinline__ VSub2(const VSub2& other) {}
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
|
||||
{
|
||||
__device__ __forceinline__ D operator ()(T a, T b) const
|
||||
{
|
||||
return saturate_cast<D>(a - b);
|
||||
}
|
||||
|
||||
__device__ __forceinline__ SubMat() {}
|
||||
__device__ __forceinline__ SubMat(const SubMat& other) {}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <> struct TransformFunctorTraits< arithm::VSub4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <> struct TransformFunctorTraits< arithm::VSub2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void subMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VSub4(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
void subMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
|
||||
{
|
||||
cudev::transform(src1, src2, dst, VSub2(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T, typename D>
|
||||
void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
148
modules/gpuarithm/src/cuda/sub_scalar.cu
Normal file
148
modules/gpuarithm/src/cuda/sub_scalar.cu
Normal file
@ -0,0 +1,148 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D> struct SubScalar : unary_function<T, D>
|
||||
{
|
||||
S val;
|
||||
|
||||
explicit SubScalar(S val_) : val(val_) {}
|
||||
|
||||
__device__ __forceinline__ D operator ()(T a) const
|
||||
{
|
||||
return saturate_cast<D>(a - val);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::SubScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T, typename S, typename D>
|
||||
void subScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
|
||||
{
|
||||
SubScalar<T, S, D> op(static_cast<S>(val));
|
||||
|
||||
if (mask.data)
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
|
||||
else
|
||||
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template void subScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
template void subScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
|
||||
//template void subScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
//template void subScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
template void subScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
380
modules/gpuarithm/src/cuda/sum.cu
Normal file
380
modules/gpuarithm/src/cuda/sum.cu
Normal file
@ -0,0 +1,380 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
#include "opencv2/core/cuda/reduce.hpp"
|
||||
#include "opencv2/core/cuda/emulation.hpp"
|
||||
#include "opencv2/core/cuda/utility.hpp"
|
||||
|
||||
#include "unroll_detail.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace sum
|
||||
{
|
||||
__device__ unsigned int blocks_finished = 0;
|
||||
|
||||
template <typename R, int cn> struct AtomicAdd;
|
||||
template <typename R> struct AtomicAdd<R, 1>
|
||||
{
|
||||
static __device__ void run(R* ptr, R val)
|
||||
{
|
||||
Emulation::glob::atomicAdd(ptr, val);
|
||||
}
|
||||
};
|
||||
template <typename R> struct AtomicAdd<R, 2>
|
||||
{
|
||||
typedef typename TypeVec<R, 2>::vec_type val_type;
|
||||
|
||||
static __device__ void run(R* ptr, val_type val)
|
||||
{
|
||||
Emulation::glob::atomicAdd(ptr, val.x);
|
||||
Emulation::glob::atomicAdd(ptr + 1, val.y);
|
||||
}
|
||||
};
|
||||
template <typename R> struct AtomicAdd<R, 3>
|
||||
{
|
||||
typedef typename TypeVec<R, 3>::vec_type val_type;
|
||||
|
||||
static __device__ void run(R* ptr, val_type val)
|
||||
{
|
||||
Emulation::glob::atomicAdd(ptr, val.x);
|
||||
Emulation::glob::atomicAdd(ptr + 1, val.y);
|
||||
Emulation::glob::atomicAdd(ptr + 2, val.z);
|
||||
}
|
||||
};
|
||||
template <typename R> struct AtomicAdd<R, 4>
|
||||
{
|
||||
typedef typename TypeVec<R, 4>::vec_type val_type;
|
||||
|
||||
static __device__ void run(R* ptr, val_type val)
|
||||
{
|
||||
Emulation::glob::atomicAdd(ptr, val.x);
|
||||
Emulation::glob::atomicAdd(ptr + 1, val.y);
|
||||
Emulation::glob::atomicAdd(ptr + 2, val.z);
|
||||
Emulation::glob::atomicAdd(ptr + 3, val.w);
|
||||
}
|
||||
};
|
||||
|
||||
template <int BLOCK_SIZE, typename R, int cn>
|
||||
struct GlobalReduce
|
||||
{
|
||||
typedef typename TypeVec<R, cn>::vec_type result_type;
|
||||
|
||||
static __device__ void run(result_type& sum, result_type* result, int tid, int bid, R* smem)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 200
|
||||
if (tid == 0)
|
||||
AtomicAdd<R, cn>::run((R*) result, sum);
|
||||
#else
|
||||
__shared__ bool is_last;
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
result[bid] = sum;
|
||||
|
||||
__threadfence();
|
||||
|
||||
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
|
||||
is_last = (ticket == gridDim.x * gridDim.y - 1);
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
if (is_last)
|
||||
{
|
||||
sum = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<result_type>::all(0);
|
||||
|
||||
cudev::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
|
||||
|
||||
if (tid == 0)
|
||||
{
|
||||
result[0] = sum;
|
||||
blocks_finished = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template <int BLOCK_SIZE, typename src_type, typename result_type, class Mask, class Op>
|
||||
__global__ void kernel(const PtrStepSz<src_type> src, result_type* result, const Mask mask, const Op op, const int twidth, const int theight)
|
||||
{
|
||||
typedef typename VecTraits<src_type>::elem_type T;
|
||||
typedef typename VecTraits<result_type>::elem_type R;
|
||||
const int cn = VecTraits<src_type>::cn;
|
||||
|
||||
__shared__ R smem[BLOCK_SIZE * cn];
|
||||
|
||||
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
|
||||
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
|
||||
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
|
||||
|
||||
result_type sum = VecTraits<result_type>::all(0);
|
||||
|
||||
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
|
||||
{
|
||||
const src_type* ptr = src.ptr(y);
|
||||
|
||||
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
|
||||
{
|
||||
if (mask(y, x))
|
||||
{
|
||||
const src_type srcVal = ptr[x];
|
||||
sum = sum + op(saturate_cast<result_type>(srcVal));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cudev::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
|
||||
|
||||
GlobalReduce<BLOCK_SIZE, R, cn>::run(sum, result, tid, bid, smem);
|
||||
}
|
||||
|
||||
const int threads_x = 32;
|
||||
const int threads_y = 8;
|
||||
|
||||
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
|
||||
{
|
||||
block = dim3(threads_x, threads_y);
|
||||
|
||||
grid = dim3(divUp(cols, block.x * block.y),
|
||||
divUp(rows, block.y * block.x));
|
||||
|
||||
grid.x = ::min(grid.x, block.x);
|
||||
grid.y = ::min(grid.y, block.y);
|
||||
}
|
||||
|
||||
void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows)
|
||||
{
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(cols, rows, block, grid);
|
||||
|
||||
bufcols = grid.x * grid.y * sizeof(double) * cn;
|
||||
bufrows = 1;
|
||||
}
|
||||
|
||||
template <typename T, typename R, int cn, template <typename> class Op>
|
||||
void caller(PtrStepSzb src_, void* buf_, double* out, PtrStepSzb mask)
|
||||
{
|
||||
typedef typename TypeVec<T, cn>::vec_type src_type;
|
||||
typedef typename TypeVec<R, cn>::vec_type result_type;
|
||||
|
||||
PtrStepSz<src_type> src(src_);
|
||||
result_type* buf = (result_type*) buf_;
|
||||
|
||||
dim3 block, grid;
|
||||
getLaunchCfg(src.cols, src.rows, block, grid);
|
||||
|
||||
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
|
||||
const int theight = divUp(divUp(src.rows, grid.y), block.y);
|
||||
|
||||
Op<result_type> op;
|
||||
|
||||
if (mask.data)
|
||||
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, SingleMask(mask), op, twidth, theight);
|
||||
else
|
||||
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, WithOutMask(), op, twidth, theight);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
R result[4] = {0, 0, 0, 0};
|
||||
cudaSafeCall( cudaMemcpy(&result, buf, sizeof(result_type), cudaMemcpyDeviceToHost) );
|
||||
|
||||
out[0] = result[0];
|
||||
out[1] = result[1];
|
||||
out[2] = result[2];
|
||||
out[3] = result[3];
|
||||
}
|
||||
|
||||
template <typename T> struct SumType;
|
||||
template <> struct SumType<uchar> { typedef unsigned int R; };
|
||||
template <> struct SumType<schar> { typedef int R; };
|
||||
template <> struct SumType<ushort> { typedef unsigned int R; };
|
||||
template <> struct SumType<short> { typedef int R; };
|
||||
template <> struct SumType<int> { typedef int R; };
|
||||
template <> struct SumType<float> { typedef float R; };
|
||||
template <> struct SumType<double> { typedef double R; };
|
||||
|
||||
template <typename T, int cn>
|
||||
void run(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
|
||||
{
|
||||
typedef typename SumType<T>::R R;
|
||||
caller<T, R, cn, identity>(src, buf, out, mask);
|
||||
}
|
||||
|
||||
template void run<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template <typename T, int cn>
|
||||
void runAbs(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
|
||||
{
|
||||
typedef typename SumType<T>::R R;
|
||||
caller<T, R, cn, abs_func>(src, buf, out, mask);
|
||||
}
|
||||
|
||||
template void runAbs<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template <typename T> struct Sqr : unary_function<T, T>
|
||||
{
|
||||
__device__ __forceinline__ T operator ()(T x) const
|
||||
{
|
||||
return x * x;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, int cn>
|
||||
void runSqr(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
|
||||
{
|
||||
caller<T, double, cn, Sqr>(src, buf, out, mask);
|
||||
}
|
||||
|
||||
template void runSqr<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
114
modules/gpuarithm/src/cuda/threshold.cu
Normal file
114
modules/gpuarithm/src/cuda/threshold.cu
Normal file
@ -0,0 +1,114 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/simd_functions.hpp"
|
||||
|
||||
#include "arithm_func_traits.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
template <typename T> struct TransformFunctorTraits< thresh_binary_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< thresh_binary_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< thresh_trunc_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< thresh_to_zero_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename T> struct TransformFunctorTraits< thresh_to_zero_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
|
||||
{
|
||||
};
|
||||
}}}
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <template <typename> class Op, typename T>
|
||||
void threshold_caller(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream)
|
||||
{
|
||||
Op<T> op(thresh, maxVal);
|
||||
cudev::transform(src, dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream);
|
||||
|
||||
static const caller_t callers[] =
|
||||
{
|
||||
threshold_caller<thresh_binary_func, T>,
|
||||
threshold_caller<thresh_binary_inv_func, T>,
|
||||
threshold_caller<thresh_trunc_func, T>,
|
||||
threshold_caller<thresh_to_zero_func, T>,
|
||||
threshold_caller<thresh_to_zero_inv_func, T>
|
||||
};
|
||||
|
||||
callers[type]((PtrStepSz<T>) src, (PtrStepSz<T>) dst, static_cast<T>(thresh), static_cast<T>(maxVal), stream);
|
||||
}
|
||||
|
||||
template void threshold<uchar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
template void threshold<schar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
template void threshold<ushort>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
template void threshold<short>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
template void threshold<int>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
template void threshold<float>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
template void threshold<double>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
122
modules/gpuarithm/src/cuda/transpose.cu
Normal file
122
modules/gpuarithm/src/cuda/transpose.cu
Normal file
@ -0,0 +1,122 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
|
||||
using namespace cv::gpu;
|
||||
using namespace cv::gpu::cudev;
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
const int TRANSPOSE_TILE_DIM = 16;
|
||||
const int TRANSPOSE_BLOCK_ROWS = 16;
|
||||
|
||||
template <typename T>
|
||||
__global__ void transposeKernel(const PtrStepSz<T> src, PtrStep<T> dst)
|
||||
{
|
||||
__shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM + 1];
|
||||
|
||||
int blockIdx_x, blockIdx_y;
|
||||
|
||||
// do diagonal reordering
|
||||
if (gridDim.x == gridDim.y)
|
||||
{
|
||||
blockIdx_y = blockIdx.x;
|
||||
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
|
||||
}
|
||||
else
|
||||
{
|
||||
int bid = blockIdx.x + gridDim.x * blockIdx.y;
|
||||
blockIdx_y = bid % gridDim.y;
|
||||
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
|
||||
}
|
||||
|
||||
int xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x;
|
||||
int yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y;
|
||||
|
||||
if (xIndex < src.cols)
|
||||
{
|
||||
for (int i = 0; i < TRANSPOSE_TILE_DIM; i += TRANSPOSE_BLOCK_ROWS)
|
||||
{
|
||||
if (yIndex + i < src.rows)
|
||||
{
|
||||
tile[threadIdx.y + i][threadIdx.x] = src(yIndex + i, xIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x;
|
||||
yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y;
|
||||
|
||||
if (xIndex < src.rows)
|
||||
{
|
||||
for (int i = 0; i < TRANSPOSE_TILE_DIM; i += TRANSPOSE_BLOCK_ROWS)
|
||||
{
|
||||
if (yIndex + i < src.cols)
|
||||
{
|
||||
dst(yIndex + i, xIndex) = tile[threadIdx.x][threadIdx.y + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> void transpose(PtrStepSz<T> src, PtrStepSz<T> dst, cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM);
|
||||
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
|
||||
|
||||
transposeKernel<<<grid, block, 0, stream>>>(src, dst);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template void transpose<int>(PtrStepSz<int> src, PtrStepSz<int> dst, cudaStream_t stream);
|
||||
template void transpose<double>(PtrStepSz<double> src, PtrStepSz<double> dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
#endif // CUDA_DISABLER
|
135
modules/gpuarithm/src/cuda/unroll_detail.hpp
Normal file
135
modules/gpuarithm/src/cuda/unroll_detail.hpp
Normal file
@ -0,0 +1,135 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __UNROLL_DETAIL_HPP__
|
||||
#define __UNROLL_DETAIL_HPP__
|
||||
|
||||
#include <thrust/tuple.h>
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
|
||||
namespace detail
|
||||
{
|
||||
template <int cn> struct Unroll;
|
||||
template <> struct Unroll<1>
|
||||
{
|
||||
template <int BLOCK_SIZE, typename R>
|
||||
static __device__ __forceinline__ volatile R* smem_tuple(R* smem)
|
||||
{
|
||||
return smem;
|
||||
}
|
||||
|
||||
template <typename R>
|
||||
static __device__ __forceinline__ R& tie(R& val)
|
||||
{
|
||||
return val;
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
static __device__ __forceinline__ const Op& op(const Op& op)
|
||||
{
|
||||
return op;
|
||||
}
|
||||
};
|
||||
template <> struct Unroll<2>
|
||||
{
|
||||
template <int BLOCK_SIZE, typename R>
|
||||
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*> smem_tuple(R* smem)
|
||||
{
|
||||
return cv::gpu::cudev::smem_tuple(smem, smem + BLOCK_SIZE);
|
||||
}
|
||||
|
||||
template <typename R>
|
||||
static __device__ __forceinline__ thrust::tuple<typename cv::gpu::cudev::VecTraits<R>::elem_type&, typename cv::gpu::cudev::VecTraits<R>::elem_type&> tie(R& val)
|
||||
{
|
||||
return thrust::tie(val.x, val.y);
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
static __device__ __forceinline__ const thrust::tuple<Op, Op> op(const Op& op)
|
||||
{
|
||||
return thrust::make_tuple(op, op);
|
||||
}
|
||||
};
|
||||
template <> struct Unroll<3>
|
||||
{
|
||||
template <int BLOCK_SIZE, typename R>
|
||||
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
|
||||
{
|
||||
return cv::gpu::cudev::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
template <typename R>
|
||||
static __device__ __forceinline__ thrust::tuple<typename cv::gpu::cudev::VecTraits<R>::elem_type&, typename cv::gpu::cudev::VecTraits<R>::elem_type&, typename cv::gpu::cudev::VecTraits<R>::elem_type&> tie(R& val)
|
||||
{
|
||||
return thrust::tie(val.x, val.y, val.z);
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op> op(const Op& op)
|
||||
{
|
||||
return thrust::make_tuple(op, op, op);
|
||||
}
|
||||
};
|
||||
template <> struct Unroll<4>
|
||||
{
|
||||
template <int BLOCK_SIZE, typename R>
|
||||
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
|
||||
{
|
||||
return cv::gpu::cudev::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
template <typename R>
|
||||
static __device__ __forceinline__ thrust::tuple<typename cv::gpu::cudev::VecTraits<R>::elem_type&, typename cv::gpu::cudev::VecTraits<R>::elem_type&, typename cv::gpu::cudev::VecTraits<R>::elem_type&, typename cv::gpu::cudev::VecTraits<R>::elem_type&> tie(R& val)
|
||||
{
|
||||
return thrust::tie(val.x, val.y, val.z, val.w);
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op, Op> op(const Op& op)
|
||||
{
|
||||
return thrust::make_tuple(op, op, op, op);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif // __UNROLL_DETAIL_HPP__
|
@ -49,40 +49,72 @@ using namespace cv::gpu;
|
||||
|
||||
void cv::gpu::add(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::add(const GpuMat&, const Scalar&, GpuMat&, const GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::subtract(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::subtract(const GpuMat&, const Scalar&, GpuMat&, const GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::multiply(const GpuMat&, const GpuMat&, GpuMat&, double, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::multiply(const GpuMat&, const Scalar&, GpuMat&, double, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::divide(const GpuMat&, const GpuMat&, GpuMat&, double, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::divide(const GpuMat&, const Scalar&, GpuMat&, double, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::divide(double, const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::absdiff(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::absdiff(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::abs(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::sqr(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::sqrt(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::exp(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::log(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::pow(const GpuMat&, double, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::compare(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::compare(const GpuMat&, Scalar, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::bitwise_not(const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::bitwise_or(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::bitwise_and(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::bitwise_xor(const GpuMat&, const Scalar&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::rshift(const GpuMat&, Scalar_<int>, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::lshift(const GpuMat&, Scalar_<int>, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::min(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::min(const GpuMat&, double, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::max(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::max(const GpuMat&, double, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
double cv::gpu::threshold(const GpuMat&, GpuMat&, double, double, int, Stream&) {throw_no_cuda(); return 0.0;}
|
||||
void cv::gpu::pow(const GpuMat&, double, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::alphaComp(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::addWeighted(const GpuMat&, double, const GpuMat&, double, double, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
double cv::gpu::threshold(const GpuMat&, GpuMat&, double, double, int, Stream&) {throw_no_cuda(); return 0.0;}
|
||||
|
||||
void cv::gpu::magnitude(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::magnitude(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::magnitudeSqr(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::gpu::magnitudeSqr(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::phase(const GpuMat&, const GpuMat&, GpuMat&, bool, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_no_cuda(); }
|
||||
|
||||
#else
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
@ -1793,10 +1825,10 @@ void cv::gpu::exp(const GpuMat& src, GpuMat& dst, Stream& stream)
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
void cmpMatEq_v4(PtrStepSz<unsigned int> src1, PtrStepSz<unsigned int> src2, PtrStepSz<unsigned int> dst, cudaStream_t stream);
|
||||
void cmpMatNe_v4(PtrStepSz<unsigned int> src1, PtrStepSz<unsigned int> src2, PtrStepSz<unsigned int> dst, cudaStream_t stream);
|
||||
void cmpMatLt_v4(PtrStepSz<unsigned int> src1, PtrStepSz<unsigned int> src2, PtrStepSz<unsigned int> dst, cudaStream_t stream);
|
||||
void cmpMatLe_v4(PtrStepSz<unsigned int> src1, PtrStepSz<unsigned int> src2, PtrStepSz<unsigned int> dst, cudaStream_t stream);
|
||||
void cmpMatEq_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream);
|
||||
void cmpMatNe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream);
|
||||
void cmpMatLt_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream);
|
||||
void cmpMatLe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream);
|
||||
|
||||
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
|
||||
@ -1820,7 +1852,7 @@ void cv::gpu::compare(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int c
|
||||
{cmpMatEq<double> , cmpMatNe<double> , cmpMatLt<double> , cmpMatLe<double> }
|
||||
};
|
||||
|
||||
typedef void (*func_v4_t)(PtrStepSz<unsigned int> src1, PtrStepSz<unsigned int> src2, PtrStepSz<unsigned int> dst, cudaStream_t stream);
|
||||
typedef void (*func_v4_t)(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream);
|
||||
static const func_v4_t funcs_v4[] =
|
||||
{
|
||||
cmpMatEq_v4, cmpMatNe_v4, cmpMatLt_v4, cmpMatLe_v4
|
||||
@ -2670,72 +2702,6 @@ void cv::gpu::max(const GpuMat& src, double val, GpuMat& dst, Stream& stream)
|
||||
funcs[depth](src, cast_func[depth](val), dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// threshold
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T>
|
||||
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
}
|
||||
|
||||
double cv::gpu::threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxVal, int type, Stream& s)
|
||||
{
|
||||
const int depth = src.depth();
|
||||
|
||||
CV_Assert( src.channels() == 1 && depth <= CV_64F );
|
||||
CV_Assert( type <= THRESH_TOZERO_INV );
|
||||
|
||||
if (depth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
if (src.type() == CV_32FC1 && type == THRESH_TRUNC)
|
||||
{
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
nppSafeCall( nppiThreshold_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz, static_cast<Npp32f>(thresh), NPP_CMP_GREATER) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
else
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
arithm::threshold<unsigned char>,
|
||||
arithm::threshold<signed char>,
|
||||
arithm::threshold<unsigned short>,
|
||||
arithm::threshold<short>,
|
||||
arithm::threshold<int>,
|
||||
arithm::threshold<float>,
|
||||
arithm::threshold<double>
|
||||
};
|
||||
|
||||
if (depth != CV_32F && depth != CV_64F)
|
||||
{
|
||||
thresh = cvFloor(thresh);
|
||||
maxVal = cvRound(maxVal);
|
||||
}
|
||||
|
||||
funcs[depth](src, dst, thresh, maxVal, type, stream);
|
||||
}
|
||||
|
||||
return thresh;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// pow
|
||||
|
||||
@ -2777,79 +2743,6 @@ void cv::gpu::pow(const GpuMat& src, double power, GpuMat& dst, Stream& stream)
|
||||
funcs[depth](src_, power, dst_, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// alphaComp
|
||||
|
||||
namespace
|
||||
{
|
||||
template <int DEPTH> struct NppAlphaCompFunc
|
||||
{
|
||||
typedef typename NppTypeTraits<DEPTH>::npp_t npp_t;
|
||||
|
||||
typedef NppStatus (*func_t)(const npp_t* pSrc1, int nSrc1Step, const npp_t* pSrc2, int nSrc2Step, npp_t* pDst, int nDstStep, NppiSize oSizeROI, NppiAlphaOp eAlphaOp);
|
||||
};
|
||||
|
||||
template <int DEPTH, typename NppAlphaCompFunc<DEPTH>::func_t func> struct NppAlphaComp
|
||||
{
|
||||
typedef typename NppTypeTraits<DEPTH>::npp_t npp_t;
|
||||
|
||||
static void call(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, NppiAlphaOp eAlphaOp, cudaStream_t stream)
|
||||
{
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiSize oSizeROI;
|
||||
oSizeROI.width = img1.cols;
|
||||
oSizeROI.height = img2.rows;
|
||||
|
||||
nppSafeCall( func(img1.ptr<npp_t>(), static_cast<int>(img1.step), img2.ptr<npp_t>(), static_cast<int>(img2.step),
|
||||
dst.ptr<npp_t>(), static_cast<int>(dst.step), oSizeROI, eAlphaOp) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
void cv::gpu::alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream)
|
||||
{
|
||||
static const NppiAlphaOp npp_alpha_ops[] = {
|
||||
NPPI_OP_ALPHA_OVER,
|
||||
NPPI_OP_ALPHA_IN,
|
||||
NPPI_OP_ALPHA_OUT,
|
||||
NPPI_OP_ALPHA_ATOP,
|
||||
NPPI_OP_ALPHA_XOR,
|
||||
NPPI_OP_ALPHA_PLUS,
|
||||
NPPI_OP_ALPHA_OVER_PREMUL,
|
||||
NPPI_OP_ALPHA_IN_PREMUL,
|
||||
NPPI_OP_ALPHA_OUT_PREMUL,
|
||||
NPPI_OP_ALPHA_ATOP_PREMUL,
|
||||
NPPI_OP_ALPHA_XOR_PREMUL,
|
||||
NPPI_OP_ALPHA_PLUS_PREMUL,
|
||||
NPPI_OP_ALPHA_PREMUL
|
||||
};
|
||||
|
||||
typedef void (*func_t)(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, NppiAlphaOp eAlphaOp, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
NppAlphaComp<CV_8U, nppiAlphaComp_8u_AC4R>::call,
|
||||
0,
|
||||
NppAlphaComp<CV_16U, nppiAlphaComp_16u_AC4R>::call,
|
||||
0,
|
||||
NppAlphaComp<CV_32S, nppiAlphaComp_32s_AC4R>::call,
|
||||
NppAlphaComp<CV_32F, nppiAlphaComp_32f_AC4R>::call
|
||||
};
|
||||
|
||||
CV_Assert( img1.type() == CV_8UC4 || img1.type() == CV_16UC4 || img1.type() == CV_32SC4 || img1.type() == CV_32FC4 );
|
||||
CV_Assert( img1.size() == img2.size() && img1.type() == img2.type() );
|
||||
|
||||
dst.create(img1.size(), img1.type());
|
||||
|
||||
const func_t func = funcs[img1.depth()];
|
||||
|
||||
func(img1, img2, dst, npp_alpha_ops[alpha_op], StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// addWeighted
|
||||
|
||||
@ -3357,4 +3250,184 @@ void cv::gpu::addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2,
|
||||
func(src1_, alpha, src2_, beta, gamma, dst_, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// threshold
|
||||
|
||||
namespace arithm
|
||||
{
|
||||
template <typename T>
|
||||
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
}
|
||||
|
||||
double cv::gpu::threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxVal, int type, Stream& s)
|
||||
{
|
||||
const int depth = src.depth();
|
||||
|
||||
CV_Assert( src.channels() == 1 && depth <= CV_64F );
|
||||
CV_Assert( type <= 4/*THRESH_TOZERO_INV*/ );
|
||||
|
||||
if (depth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
if (src.type() == CV_32FC1 && type == 2/*THRESH_TRUNC*/)
|
||||
{
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
nppSafeCall( nppiThreshold_32f_C1R(src.ptr<Npp32f>(), static_cast<int>(src.step),
|
||||
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz, static_cast<Npp32f>(thresh), NPP_CMP_GREATER) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
else
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
|
||||
static const func_t funcs[] =
|
||||
{
|
||||
arithm::threshold<unsigned char>,
|
||||
arithm::threshold<signed char>,
|
||||
arithm::threshold<unsigned short>,
|
||||
arithm::threshold<short>,
|
||||
arithm::threshold<int>,
|
||||
arithm::threshold<float>,
|
||||
arithm::threshold<double>
|
||||
};
|
||||
|
||||
if (depth != CV_32F && depth != CV_64F)
|
||||
{
|
||||
thresh = cvFloor(thresh);
|
||||
maxVal = cvRound(maxVal);
|
||||
}
|
||||
|
||||
funcs[depth](src, dst, thresh, maxVal, type, stream);
|
||||
}
|
||||
|
||||
return thresh;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// NPP magnitide
|
||||
|
||||
namespace
|
||||
{
|
||||
typedef NppStatus (*nppMagnitude_t)(const Npp32fc* pSrc, int nSrcStep, Npp32f* pDst, int nDstStep, NppiSize oSizeROI);
|
||||
|
||||
inline void npp_magnitude(const GpuMat& src, GpuMat& dst, nppMagnitude_t func, cudaStream_t stream)
|
||||
{
|
||||
CV_Assert(src.type() == CV_32FC2);
|
||||
|
||||
dst.create(src.size(), CV_32FC1);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
nppSafeCall( func(src.ptr<Npp32fc>(), static_cast<int>(src.step), dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::magnitude(const GpuMat& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
npp_magnitude(src, dst, nppiMagnitude_32fc32f_C1R, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::magnitudeSqr(const GpuMat& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
npp_magnitude(src, dst, nppiMagnitudeSqr_32fc32f_C1R, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Polar <-> Cart
|
||||
|
||||
namespace cv { namespace gpu { namespace cudev
|
||||
{
|
||||
namespace mathfunc
|
||||
{
|
||||
void cartToPolar_gpu(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, bool magSqr, PtrStepSzf angle, bool angleInDegrees, cudaStream_t stream);
|
||||
void polarToCart_gpu(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
namespace
|
||||
{
|
||||
inline void cartToPolar_caller(const GpuMat& x, const GpuMat& y, GpuMat* mag, bool magSqr, GpuMat* angle, bool angleInDegrees, cudaStream_t stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::mathfunc;
|
||||
|
||||
CV_Assert(x.size() == y.size() && x.type() == y.type());
|
||||
CV_Assert(x.depth() == CV_32F);
|
||||
|
||||
if (mag)
|
||||
mag->create(x.size(), x.type());
|
||||
if (angle)
|
||||
angle->create(x.size(), x.type());
|
||||
|
||||
GpuMat x1cn = x.reshape(1);
|
||||
GpuMat y1cn = y.reshape(1);
|
||||
GpuMat mag1cn = mag ? mag->reshape(1) : GpuMat();
|
||||
GpuMat angle1cn = angle ? angle->reshape(1) : GpuMat();
|
||||
|
||||
cartToPolar_gpu(x1cn, y1cn, mag1cn, magSqr, angle1cn, angleInDegrees, stream);
|
||||
}
|
||||
|
||||
inline void polarToCart_caller(const GpuMat& mag, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, cudaStream_t stream)
|
||||
{
|
||||
using namespace ::cv::gpu::cudev::mathfunc;
|
||||
|
||||
CV_Assert((mag.empty() || mag.size() == angle.size()) && mag.type() == angle.type());
|
||||
CV_Assert(mag.depth() == CV_32F);
|
||||
|
||||
x.create(mag.size(), mag.type());
|
||||
y.create(mag.size(), mag.type());
|
||||
|
||||
GpuMat mag1cn = mag.reshape(1);
|
||||
GpuMat angle1cn = angle.reshape(1);
|
||||
GpuMat x1cn = x.reshape(1);
|
||||
GpuMat y1cn = y.reshape(1);
|
||||
|
||||
polarToCart_gpu(mag1cn, angle1cn, x1cn, y1cn, angleInDegrees, stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::magnitude(const GpuMat& x, const GpuMat& y, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, &dst, false, 0, false, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, &dst, true, 0, false, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, 0, false, &angle, angleInDegrees, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& mag, GpuMat& angle, bool angleInDegrees, Stream& stream)
|
||||
{
|
||||
cartToPolar_caller(x, y, &mag, false, &angle, angleInDegrees, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
void cv::gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, Stream& stream)
|
||||
{
|
||||
polarToCart_caller(magnitude, angle, x, y, angleInDegrees, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
#endif
|
@ -40,4 +40,4 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "opencv2/core/cuda_devptrs.hpp"
|
||||
#include "precomp.hpp"
|
70
modules/gpuarithm/src/precomp.hpp
Normal file
70
modules/gpuarithm/src/precomp.hpp
Normal file
@ -0,0 +1,70 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_PRECOMP_H__
|
||||
#define __OPENCV_PRECOMP_H__
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "cvconfig.h"
|
||||
|
||||
#include "opencv2/gpuarithm.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
|
||||
#include "opencv2/core/gpu_private.hpp"
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCV_GPULEGACY
|
||||
# include "opencv2/gpulegacy.hpp"
|
||||
# include "opencv2/gpulegacy/private.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CUBLAS
|
||||
# include <cublas.h>
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CUFFT
|
||||
# include <cufft.h>
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCV_PRECOMP_H__ */
|
@ -47,31 +47,43 @@ using namespace cv::gpu;
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&) { throw_no_cuda(); }
|
||||
void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&, GpuMat&) { throw_no_cuda(); }
|
||||
double cv::gpu::norm(const GpuMat&, int) { throw_no_cuda(); return 0.0; }
|
||||
double cv::gpu::norm(const GpuMat&, int, GpuMat&) { throw_no_cuda(); return 0.0; }
|
||||
double cv::gpu::norm(const GpuMat&, int, const GpuMat&, GpuMat&) { throw_no_cuda(); return 0.0; }
|
||||
double cv::gpu::norm(const GpuMat&, const GpuMat&, int) { throw_no_cuda(); return 0.0; }
|
||||
|
||||
Scalar cv::gpu::sum(const GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
Scalar cv::gpu::sum(const GpuMat&, GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
Scalar cv::gpu::sum(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
|
||||
Scalar cv::gpu::absSum(const GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
Scalar cv::gpu::absSum(const GpuMat&, GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
Scalar cv::gpu::absSum(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
|
||||
Scalar cv::gpu::sqrSum(const GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
Scalar cv::gpu::sqrSum(const GpuMat&, GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
Scalar cv::gpu::sqrSum(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); return Scalar(); }
|
||||
|
||||
void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&) { throw_no_cuda(); }
|
||||
void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&, GpuMat&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::minMaxLoc(const GpuMat&, double*, double*, Point*, Point*, const GpuMat&) { throw_no_cuda(); }
|
||||
void cv::gpu::minMaxLoc(const GpuMat&, double*, double*, Point*, Point*, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
|
||||
|
||||
int cv::gpu::countNonZero(const GpuMat&) { throw_no_cuda(); return 0; }
|
||||
int cv::gpu::countNonZero(const GpuMat&, GpuMat&) { throw_no_cuda(); return 0; }
|
||||
|
||||
void cv::gpu::reduce(const GpuMat&, GpuMat&, int, int, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&) { throw_no_cuda(); }
|
||||
void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&, GpuMat&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::rectStdDev(const GpuMat&, const GpuMat&, GpuMat&, const Rect&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&) { throw_no_cuda(); }
|
||||
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
|
||||
|
||||
#else
|
||||
#include "opencv2/core/utility.hpp"
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -109,58 +121,18 @@ namespace
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// meanStdDev
|
||||
|
||||
void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev)
|
||||
{
|
||||
GpuMat buf;
|
||||
meanStdDev(src, mean, stddev, buf);
|
||||
}
|
||||
|
||||
void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat& buf)
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1);
|
||||
|
||||
if (!deviceSupports(FEATURE_SET_COMPUTE_13))
|
||||
CV_Error(cv::Error::StsNotImplemented, "Not sufficient compute capebility");
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
DeviceBuffer dbuf(2);
|
||||
|
||||
int bufSize;
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
nppSafeCall( nppiMeanStdDev8uC1RGetBufferHostSize(sz, &bufSize) );
|
||||
#else
|
||||
nppSafeCall( nppiMeanStdDevGetBufferHostSize_8u_C1R(sz, &bufSize) );
|
||||
#endif
|
||||
|
||||
ensureSizeIsEnough(1, bufSize, CV_8UC1, buf);
|
||||
|
||||
nppSafeCall( nppiMean_StdDev_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), sz, buf.ptr<Npp8u>(), dbuf, (double*)dbuf + 1) );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
double* ptrs[2] = {mean.val, stddev.val};
|
||||
dbuf.download(ptrs);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// norm
|
||||
|
||||
double cv::gpu::norm(const GpuMat& src, int normType)
|
||||
{
|
||||
GpuMat buf;
|
||||
return norm(src, normType, GpuMat(), buf);
|
||||
return gpu::norm(src, normType, GpuMat(), buf);
|
||||
}
|
||||
|
||||
double cv::gpu::norm(const GpuMat& src, int normType, GpuMat& buf)
|
||||
{
|
||||
return norm(src, normType, GpuMat(), buf);
|
||||
return gpu::norm(src, normType, GpuMat(), buf);
|
||||
}
|
||||
|
||||
double cv::gpu::norm(const GpuMat& src, int normType, const GpuMat& mask, GpuMat& buf)
|
||||
@ -171,14 +143,14 @@ double cv::gpu::norm(const GpuMat& src, int normType, const GpuMat& mask, GpuMat
|
||||
GpuMat src_single_channel = src.reshape(1);
|
||||
|
||||
if (normType == NORM_L1)
|
||||
return absSum(src_single_channel, mask, buf)[0];
|
||||
return gpu::absSum(src_single_channel, mask, buf)[0];
|
||||
|
||||
if (normType == NORM_L2)
|
||||
return std::sqrt(sqrSum(src_single_channel, mask, buf)[0]);
|
||||
return std::sqrt(gpu::sqrSum(src_single_channel, mask, buf)[0]);
|
||||
|
||||
// NORM_INF
|
||||
double min_val, max_val;
|
||||
minMax(src_single_channel, &min_val, &max_val, mask, buf);
|
||||
gpu::minMax(src_single_channel, &min_val, &max_val, mask, buf);
|
||||
return std::max(std::abs(min_val), std::abs(max_val));
|
||||
}
|
||||
|
||||
@ -251,12 +223,12 @@ namespace sum
|
||||
Scalar cv::gpu::sum(const GpuMat& src)
|
||||
{
|
||||
GpuMat buf;
|
||||
return sum(src, GpuMat(), buf);
|
||||
return gpu::sum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
|
||||
{
|
||||
return sum(src, GpuMat(), buf);
|
||||
return gpu::sum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
@ -297,12 +269,12 @@ Scalar cv::gpu::sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
Scalar cv::gpu::absSum(const GpuMat& src)
|
||||
{
|
||||
GpuMat buf;
|
||||
return absSum(src, GpuMat(), buf);
|
||||
return gpu::absSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
|
||||
{
|
||||
return absSum(src, GpuMat(), buf);
|
||||
return gpu::absSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
@ -343,12 +315,12 @@ Scalar cv::gpu::absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
Scalar cv::gpu::sqrSum(const GpuMat& src)
|
||||
{
|
||||
GpuMat buf;
|
||||
return sqrSum(src, GpuMat(), buf);
|
||||
return gpu::sqrSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
|
||||
{
|
||||
return sqrSum(src, GpuMat(), buf);
|
||||
return gpu::sqrSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sqrSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
@ -400,7 +372,7 @@ namespace minMax
|
||||
void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask)
|
||||
{
|
||||
GpuMat buf;
|
||||
minMax(src, minVal, maxVal, mask, buf);
|
||||
gpu::minMax(src, minVal, maxVal, mask, buf);
|
||||
}
|
||||
|
||||
void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask, GpuMat& buf)
|
||||
@ -450,7 +422,7 @@ namespace minMaxLoc
|
||||
void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc, const GpuMat& mask)
|
||||
{
|
||||
GpuMat valBuf, locBuf;
|
||||
minMaxLoc(src, minVal, maxVal, minLoc, maxLoc, mask, valBuf, locBuf);
|
||||
gpu::minMaxLoc(src, minVal, maxVal, minLoc, maxLoc, mask, valBuf, locBuf);
|
||||
}
|
||||
|
||||
void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
|
||||
@ -716,4 +688,116 @@ void cv::gpu::reduce(const GpuMat& src, GpuMat& dst, int dim, int reduceOp, int
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// meanStdDev
|
||||
|
||||
void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev)
|
||||
{
|
||||
GpuMat buf;
|
||||
meanStdDev(src, mean, stddev, buf);
|
||||
}
|
||||
|
||||
void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat& buf)
|
||||
{
|
||||
CV_Assert(src.type() == CV_8UC1);
|
||||
|
||||
if (!deviceSupports(FEATURE_SET_COMPUTE_13))
|
||||
CV_Error(cv::Error::StsNotImplemented, "Not sufficient compute capebility");
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
DeviceBuffer dbuf(2);
|
||||
|
||||
int bufSize;
|
||||
#if (CUDA_VERSION <= 4020)
|
||||
nppSafeCall( nppiMeanStdDev8uC1RGetBufferHostSize(sz, &bufSize) );
|
||||
#else
|
||||
nppSafeCall( nppiMeanStdDevGetBufferHostSize_8u_C1R(sz, &bufSize) );
|
||||
#endif
|
||||
|
||||
ensureSizeIsEnough(1, bufSize, CV_8UC1, buf);
|
||||
|
||||
nppSafeCall( nppiMean_StdDev_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), sz, buf.ptr<Npp8u>(), dbuf, (double*)dbuf + 1) );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
double* ptrs[2] = {mean.val, stddev.val};
|
||||
dbuf.download(ptrs);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// rectStdDev
|
||||
|
||||
void cv::gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& s)
|
||||
{
|
||||
CV_Assert(src.type() == CV_32SC1 && sqr.type() == CV_64FC1);
|
||||
|
||||
dst.create(src.size(), CV_32FC1);
|
||||
|
||||
NppiSize sz;
|
||||
sz.width = src.cols;
|
||||
sz.height = src.rows;
|
||||
|
||||
NppiRect nppRect;
|
||||
nppRect.height = rect.height;
|
||||
nppRect.width = rect.width;
|
||||
nppRect.x = rect.x;
|
||||
nppRect.y = rect.y;
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
nppSafeCall( nppiRectStdDev_32s32f_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), sqr.ptr<Npp64f>(), static_cast<int>(sqr.step),
|
||||
dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz, nppRect) );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// normalize
|
||||
|
||||
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask)
|
||||
{
|
||||
GpuMat norm_buf;
|
||||
GpuMat cvt_buf;
|
||||
normalize(src, dst, a, b, norm_type, dtype, mask, norm_buf, cvt_buf);
|
||||
}
|
||||
|
||||
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
|
||||
{
|
||||
double scale = 1, shift = 0;
|
||||
if (norm_type == NORM_MINMAX)
|
||||
{
|
||||
double smin = 0, smax = 0;
|
||||
double dmin = std::min(a, b), dmax = std::max(a, b);
|
||||
gpu::minMax(src, &smin, &smax, mask, norm_buf);
|
||||
scale = (dmax - dmin) * (smax - smin > std::numeric_limits<double>::epsilon() ? 1.0 / (smax - smin) : 0.0);
|
||||
shift = dmin - smin * scale;
|
||||
}
|
||||
else if (norm_type == NORM_L2 || norm_type == NORM_L1 || norm_type == NORM_INF)
|
||||
{
|
||||
scale = gpu::norm(src, norm_type, mask, norm_buf);
|
||||
scale = scale > std::numeric_limits<double>::epsilon() ? a / scale : 0.0;
|
||||
shift = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_Error(cv::Error::StsBadArg, "Unknown/unsupported norm type");
|
||||
}
|
||||
|
||||
if (mask.empty())
|
||||
{
|
||||
src.convertTo(dst, dtype, scale, shift);
|
||||
}
|
||||
else
|
||||
{
|
||||
src.convertTo(cvt_buf, dtype, scale, shift);
|
||||
cvt_buf.copyTo(dst, mask);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
439
modules/gpuarithm/test/test_arithm.cpp
Normal file
439
modules/gpuarithm/test/test_arithm.cpp
Normal file
@ -0,0 +1,439 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
using namespace cvtest;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// GEMM
|
||||
|
||||
#ifdef HAVE_CUBLAS
|
||||
|
||||
CV_FLAGS(GemmFlags, 0, cv::GEMM_1_T, cv::GEMM_2_T, cv::GEMM_3_T);
|
||||
#define ALL_GEMM_FLAGS testing::Values(GemmFlags(0), GemmFlags(cv::GEMM_1_T), GemmFlags(cv::GEMM_2_T), GemmFlags(cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T | cv::GEMM_3_T))
|
||||
|
||||
PARAM_TEST_CASE(GEMM, cv::gpu::DeviceInfo, cv::Size, MatType, GemmFlags, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int type;
|
||||
int flags;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
type = GET_PARAM(2);
|
||||
flags = GET_PARAM(3);
|
||||
useRoi = GET_PARAM(4);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(GEMM, Accuracy)
|
||||
{
|
||||
cv::Mat src1 = randomMat(size, type, -10.0, 10.0);
|
||||
cv::Mat src2 = randomMat(size, type, -10.0, 10.0);
|
||||
cv::Mat src3 = randomMat(size, type, -10.0, 10.0);
|
||||
double alpha = randomDouble(-10.0, 10.0);
|
||||
double beta = randomDouble(-10.0, 10.0);
|
||||
|
||||
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
|
||||
{
|
||||
try
|
||||
{
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::gemm(loadMat(src1), loadMat(src2), alpha, loadMat(src3), beta, dst, flags);
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
|
||||
}
|
||||
}
|
||||
else if (type == CV_64FC2 && flags != 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::gemm(loadMat(src1), loadMat(src2), alpha, loadMat(src3), beta, dst, flags);
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
|
||||
cv::gpu::gemm(loadMat(src1, useRoi), loadMat(src2, useRoi), alpha, loadMat(src3, useRoi), beta, dst, flags);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::gemm(src1, src2, alpha, src3, beta, dst_gold, flags);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, CV_MAT_DEPTH(type) == CV_32F ? 1e-1 : 1e-10);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, GEMM, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_32FC1), MatType(CV_32FC2), MatType(CV_64FC1), MatType(CV_64FC2)),
|
||||
ALL_GEMM_FLAGS,
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Integral
|
||||
|
||||
PARAM_TEST_CASE(Integral, cv::gpu::DeviceInfo, cv::Size, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
useRoi = GET_PARAM(2);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(Integral, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, CV_8UC1);
|
||||
|
||||
cv::gpu::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_32SC1, useRoi);
|
||||
cv::gpu::integral(loadMat(src, useRoi), dst);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::integral(src, dst_gold, CV_32S);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, Integral, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// MulSpectrums
|
||||
|
||||
CV_FLAGS(DftFlags, 0, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX_OUTPUT, cv::DFT_REAL_OUTPUT)
|
||||
|
||||
PARAM_TEST_CASE(MulSpectrums, cv::gpu::DeviceInfo, cv::Size, DftFlags)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int flag;
|
||||
|
||||
cv::Mat a, b;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
flag = GET_PARAM(2);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
|
||||
a = randomMat(size, CV_32FC2);
|
||||
b = randomMat(size, CV_32FC2);
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(MulSpectrums, Simple)
|
||||
{
|
||||
cv::gpu::GpuMat c;
|
||||
cv::gpu::mulSpectrums(loadMat(a), loadMat(b), c, flag, false);
|
||||
|
||||
cv::Mat c_gold;
|
||||
cv::mulSpectrums(a, b, c_gold, flag, false);
|
||||
|
||||
EXPECT_MAT_NEAR(c_gold, c, 1e-2);
|
||||
}
|
||||
|
||||
GPU_TEST_P(MulSpectrums, Scaled)
|
||||
{
|
||||
float scale = 1.f / size.area();
|
||||
|
||||
cv::gpu::GpuMat c;
|
||||
cv::gpu::mulAndScaleSpectrums(loadMat(a), loadMat(b), c, flag, scale, false);
|
||||
|
||||
cv::Mat c_gold;
|
||||
cv::mulSpectrums(a, b, c_gold, flag, false);
|
||||
c_gold.convertTo(c_gold, c_gold.type(), scale);
|
||||
|
||||
EXPECT_MAT_NEAR(c_gold, c, 1e-2);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, MulSpectrums, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(DftFlags(0), DftFlags(cv::DFT_ROWS))));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Dft
|
||||
|
||||
struct Dft : testing::TestWithParam<cv::gpu::DeviceInfo>
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GetParam();
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
namespace
|
||||
{
|
||||
void testC2C(const std::string& hint, int cols, int rows, int flags, bool inplace)
|
||||
{
|
||||
SCOPED_TRACE(hint);
|
||||
|
||||
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC2, 0.0, 10.0);
|
||||
|
||||
cv::Mat b_gold;
|
||||
cv::dft(a, b_gold, flags);
|
||||
|
||||
cv::gpu::GpuMat d_b;
|
||||
cv::gpu::GpuMat d_b_data;
|
||||
if (inplace)
|
||||
{
|
||||
d_b_data.create(1, a.size().area(), CV_32FC2);
|
||||
d_b = cv::gpu::GpuMat(a.rows, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
|
||||
}
|
||||
cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), flags);
|
||||
|
||||
EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
|
||||
ASSERT_EQ(CV_32F, d_b.depth());
|
||||
ASSERT_EQ(2, d_b.channels());
|
||||
EXPECT_MAT_NEAR(b_gold, cv::Mat(d_b), rows * cols * 1e-4);
|
||||
}
|
||||
}
|
||||
|
||||
GPU_TEST_P(Dft, C2C)
|
||||
{
|
||||
int cols = randomInt(2, 100);
|
||||
int rows = randomInt(2, 100);
|
||||
|
||||
for (int i = 0; i < 2; ++i)
|
||||
{
|
||||
bool inplace = i != 0;
|
||||
|
||||
testC2C("no flags", cols, rows, 0, inplace);
|
||||
testC2C("no flags 0 1", cols, rows + 1, 0, inplace);
|
||||
testC2C("no flags 1 0", cols, rows + 1, 0, inplace);
|
||||
testC2C("no flags 1 1", cols + 1, rows, 0, inplace);
|
||||
testC2C("DFT_INVERSE", cols, rows, cv::DFT_INVERSE, inplace);
|
||||
testC2C("DFT_ROWS", cols, rows, cv::DFT_ROWS, inplace);
|
||||
testC2C("single col", 1, rows, 0, inplace);
|
||||
testC2C("single row", cols, 1, 0, inplace);
|
||||
testC2C("single col inversed", 1, rows, cv::DFT_INVERSE, inplace);
|
||||
testC2C("single row inversed", cols, 1, cv::DFT_INVERSE, inplace);
|
||||
testC2C("single row DFT_ROWS", cols, 1, cv::DFT_ROWS, inplace);
|
||||
testC2C("size 1 2", 1, 2, 0, inplace);
|
||||
testC2C("size 2 1", 2, 1, 0, inplace);
|
||||
}
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
void testR2CThenC2R(const std::string& hint, int cols, int rows, bool inplace)
|
||||
{
|
||||
SCOPED_TRACE(hint);
|
||||
|
||||
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC1, 0.0, 10.0);
|
||||
|
||||
cv::gpu::GpuMat d_b, d_c;
|
||||
cv::gpu::GpuMat d_b_data, d_c_data;
|
||||
if (inplace)
|
||||
{
|
||||
if (a.cols == 1)
|
||||
{
|
||||
d_b_data.create(1, (a.rows / 2 + 1) * a.cols, CV_32FC2);
|
||||
d_b = cv::gpu::GpuMat(a.rows / 2 + 1, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
|
||||
}
|
||||
else
|
||||
{
|
||||
d_b_data.create(1, a.rows * (a.cols / 2 + 1), CV_32FC2);
|
||||
d_b = cv::gpu::GpuMat(a.rows, a.cols / 2 + 1, CV_32FC2, d_b_data.ptr(), (a.cols / 2 + 1) * d_b_data.elemSize());
|
||||
}
|
||||
d_c_data.create(1, a.size().area(), CV_32F);
|
||||
d_c = cv::gpu::GpuMat(a.rows, a.cols, CV_32F, d_c_data.ptr(), a.cols * d_c_data.elemSize());
|
||||
}
|
||||
|
||||
cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), 0);
|
||||
cv::gpu::dft(d_b, d_c, cv::Size(cols, rows), cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
|
||||
|
||||
EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
|
||||
EXPECT_TRUE(!inplace || d_c.ptr() == d_c_data.ptr());
|
||||
ASSERT_EQ(CV_32F, d_c.depth());
|
||||
ASSERT_EQ(1, d_c.channels());
|
||||
|
||||
cv::Mat c(d_c);
|
||||
EXPECT_MAT_NEAR(a, c, rows * cols * 1e-5);
|
||||
}
|
||||
}
|
||||
|
||||
GPU_TEST_P(Dft, R2CThenC2R)
|
||||
{
|
||||
int cols = randomInt(2, 100);
|
||||
int rows = randomInt(2, 100);
|
||||
|
||||
testR2CThenC2R("sanity", cols, rows, false);
|
||||
testR2CThenC2R("sanity 0 1", cols, rows + 1, false);
|
||||
testR2CThenC2R("sanity 1 0", cols + 1, rows, false);
|
||||
testR2CThenC2R("sanity 1 1", cols + 1, rows + 1, false);
|
||||
testR2CThenC2R("single col", 1, rows, false);
|
||||
testR2CThenC2R("single col 1", 1, rows + 1, false);
|
||||
testR2CThenC2R("single row", cols, 1, false);
|
||||
testR2CThenC2R("single row 1", cols + 1, 1, false);
|
||||
|
||||
testR2CThenC2R("sanity", cols, rows, true);
|
||||
testR2CThenC2R("sanity 0 1", cols, rows + 1, true);
|
||||
testR2CThenC2R("sanity 1 0", cols + 1, rows, true);
|
||||
testR2CThenC2R("sanity 1 1", cols + 1, rows + 1, true);
|
||||
testR2CThenC2R("single row", cols, 1, true);
|
||||
testR2CThenC2R("single row 1", cols + 1, 1, true);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, Dft, ALL_DEVICES);
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Convolve
|
||||
|
||||
namespace
|
||||
{
|
||||
void convolveDFT(const cv::Mat& A, const cv::Mat& B, cv::Mat& C, bool ccorr = false)
|
||||
{
|
||||
// reallocate the output array if needed
|
||||
C.create(std::abs(A.rows - B.rows) + 1, std::abs(A.cols - B.cols) + 1, A.type());
|
||||
cv::Size dftSize;
|
||||
|
||||
// compute the size of DFT transform
|
||||
dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);
|
||||
dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);
|
||||
|
||||
// allocate temporary buffers and initialize them with 0s
|
||||
cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));
|
||||
cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));
|
||||
|
||||
// copy A and B to the top-left corners of tempA and tempB, respectively
|
||||
cv::Mat roiA(tempA, cv::Rect(0, 0, A.cols, A.rows));
|
||||
A.copyTo(roiA);
|
||||
cv::Mat roiB(tempB, cv::Rect(0, 0, B.cols, B.rows));
|
||||
B.copyTo(roiB);
|
||||
|
||||
// now transform the padded A & B in-place;
|
||||
// use "nonzeroRows" hint for faster processing
|
||||
cv::dft(tempA, tempA, 0, A.rows);
|
||||
cv::dft(tempB, tempB, 0, B.rows);
|
||||
|
||||
// multiply the spectrums;
|
||||
// the function handles packed spectrum representations well
|
||||
cv::mulSpectrums(tempA, tempB, tempA, 0, ccorr);
|
||||
|
||||
// transform the product back from the frequency domain.
|
||||
// Even though all the result rows will be non-zero,
|
||||
// you need only the first C.rows of them, and thus you
|
||||
// pass nonzeroRows == C.rows
|
||||
cv::dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);
|
||||
|
||||
// now copy the result back to C.
|
||||
tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);
|
||||
}
|
||||
|
||||
IMPLEMENT_PARAM_CLASS(KSize, int)
|
||||
IMPLEMENT_PARAM_CLASS(Ccorr, bool)
|
||||
}
|
||||
|
||||
PARAM_TEST_CASE(Convolve, cv::gpu::DeviceInfo, cv::Size, KSize, Ccorr)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int ksize;
|
||||
bool ccorr;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
ksize = GET_PARAM(2);
|
||||
ccorr = GET_PARAM(3);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(Convolve, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, CV_32FC1, 0.0, 100.0);
|
||||
cv::Mat kernel = randomMat(cv::Size(ksize, ksize), CV_32FC1, 0.0, 1.0);
|
||||
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::convolve(loadMat(src), loadMat(kernel), dst, ccorr);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
convolveDFT(src, kernel, dst_gold, ccorr);
|
||||
|
||||
EXPECT_MAT_NEAR(dst, dst_gold, 1e-1);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, Convolve, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(KSize(3), KSize(7), KSize(11), KSize(17), KSize(19), KSize(23), KSize(45)),
|
||||
testing::Values(Ccorr(false), Ccorr(true))));
|
||||
|
||||
#endif // HAVE_CUBLAS
|
||||
|
||||
#endif // HAVE_CUDA
|
415
modules/gpuarithm/test/test_core.cpp
Normal file
415
modules/gpuarithm/test/test_core.cpp
Normal file
@ -0,0 +1,415 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
using namespace cvtest;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Merge
|
||||
|
||||
PARAM_TEST_CASE(Merge, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int depth;
|
||||
int channels;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
depth = GET_PARAM(2);
|
||||
channels = GET_PARAM(3);
|
||||
useRoi = GET_PARAM(4);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(Merge, Accuracy)
|
||||
{
|
||||
std::vector<cv::Mat> src;
|
||||
src.reserve(channels);
|
||||
for (int i = 0; i < channels; ++i)
|
||||
src.push_back(cv::Mat(size, depth, cv::Scalar::all(i)));
|
||||
|
||||
std::vector<cv::gpu::GpuMat> d_src;
|
||||
for (int i = 0; i < channels; ++i)
|
||||
d_src.push_back(loadMat(src[i], useRoi));
|
||||
|
||||
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
|
||||
{
|
||||
try
|
||||
{
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::merge(d_src, dst);
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::merge(d_src, dst);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::merge(src, dst_gold);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, Merge, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
ALL_DEPTH,
|
||||
testing::Values(1, 2, 3, 4),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Split
|
||||
|
||||
PARAM_TEST_CASE(Split, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int depth;
|
||||
int channels;
|
||||
bool useRoi;
|
||||
|
||||
int type;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
depth = GET_PARAM(2);
|
||||
channels = GET_PARAM(3);
|
||||
useRoi = GET_PARAM(4);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
|
||||
type = CV_MAKE_TYPE(depth, channels);
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(Split, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
|
||||
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
|
||||
{
|
||||
try
|
||||
{
|
||||
std::vector<cv::gpu::GpuMat> dst;
|
||||
cv::gpu::split(loadMat(src), dst);
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<cv::gpu::GpuMat> dst;
|
||||
cv::gpu::split(loadMat(src, useRoi), dst);
|
||||
|
||||
std::vector<cv::Mat> dst_gold;
|
||||
cv::split(src, dst_gold);
|
||||
|
||||
ASSERT_EQ(dst_gold.size(), dst.size());
|
||||
|
||||
for (size_t i = 0; i < dst_gold.size(); ++i)
|
||||
{
|
||||
EXPECT_MAT_NEAR(dst_gold[i], dst[i], 0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, Split, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
ALL_DEPTH,
|
||||
testing::Values(1, 2, 3, 4),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Transpose
|
||||
|
||||
PARAM_TEST_CASE(Transpose, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int type;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
type = GET_PARAM(2);
|
||||
useRoi = GET_PARAM(3);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(Transpose, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
|
||||
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
|
||||
{
|
||||
try
|
||||
{
|
||||
cv::gpu::GpuMat dst;
|
||||
cv::gpu::transpose(loadMat(src), dst);
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::gpu::GpuMat dst = createMat(cv::Size(size.height, size.width), type, useRoi);
|
||||
cv::gpu::transpose(loadMat(src, useRoi), dst);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::transpose(src, dst_gold);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, Transpose, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_8UC1),
|
||||
MatType(CV_8UC4),
|
||||
MatType(CV_16UC2),
|
||||
MatType(CV_16SC2),
|
||||
MatType(CV_32SC1),
|
||||
MatType(CV_32SC2),
|
||||
MatType(CV_64FC1)),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Flip
|
||||
|
||||
enum {FLIP_BOTH = 0, FLIP_X = 1, FLIP_Y = -1};
|
||||
CV_ENUM(FlipCode, FLIP_BOTH, FLIP_X, FLIP_Y)
|
||||
#define ALL_FLIP_CODES testing::Values(FlipCode(FLIP_BOTH), FlipCode(FLIP_X), FlipCode(FLIP_Y))
|
||||
|
||||
PARAM_TEST_CASE(Flip, cv::gpu::DeviceInfo, cv::Size, MatType, FlipCode, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int type;
|
||||
int flip_code;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
type = GET_PARAM(2);
|
||||
flip_code = GET_PARAM(3);
|
||||
useRoi = GET_PARAM(4);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(Flip, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
|
||||
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
|
||||
cv::gpu::flip(loadMat(src, useRoi), dst, flip_code);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::flip(src, dst_gold, flip_code);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, Flip, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_8UC1),
|
||||
MatType(CV_8UC3),
|
||||
MatType(CV_8UC4),
|
||||
MatType(CV_16UC1),
|
||||
MatType(CV_16UC3),
|
||||
MatType(CV_16UC4),
|
||||
MatType(CV_32SC1),
|
||||
MatType(CV_32SC3),
|
||||
MatType(CV_32SC4),
|
||||
MatType(CV_32FC1),
|
||||
MatType(CV_32FC3),
|
||||
MatType(CV_32FC4)),
|
||||
ALL_FLIP_CODES,
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// LUT
|
||||
|
||||
PARAM_TEST_CASE(LUT, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int type;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
type = GET_PARAM(2);
|
||||
useRoi = GET_PARAM(3);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(LUT, OneChannel)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
cv::Mat lut = randomMat(cv::Size(256, 1), CV_8UC1);
|
||||
|
||||
cv::gpu::GpuMat dst = createMat(size, CV_MAKE_TYPE(lut.depth(), src.channels()));
|
||||
cv::gpu::LUT(loadMat(src, useRoi), lut, dst);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::LUT(src, lut, dst_gold);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
|
||||
}
|
||||
|
||||
GPU_TEST_P(LUT, MultiChannel)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
cv::Mat lut = randomMat(cv::Size(256, 1), CV_MAKE_TYPE(CV_8U, src.channels()));
|
||||
|
||||
cv::gpu::GpuMat dst = createMat(size, CV_MAKE_TYPE(lut.depth(), src.channels()), useRoi);
|
||||
cv::gpu::LUT(loadMat(src, useRoi), lut, dst);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::LUT(src, lut, dst_gold);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, LUT, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3)),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// CopyMakeBorder
|
||||
|
||||
namespace
|
||||
{
|
||||
IMPLEMENT_PARAM_CLASS(Border, int)
|
||||
}
|
||||
|
||||
PARAM_TEST_CASE(CopyMakeBorder, cv::gpu::DeviceInfo, cv::Size, MatType, Border, BorderType, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int type;
|
||||
int border;
|
||||
int borderType;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
type = GET_PARAM(2);
|
||||
border = GET_PARAM(3);
|
||||
borderType = GET_PARAM(4);
|
||||
useRoi = GET_PARAM(5);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
GPU_TEST_P(CopyMakeBorder, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
cv::Scalar val = randomScalar(0, 255);
|
||||
|
||||
cv::gpu::GpuMat dst = createMat(cv::Size(size.width + 2 * border, size.height + 2 * border), type, useRoi);
|
||||
cv::gpu::copyMakeBorder(loadMat(src, useRoi), dst, border, border, border, border, borderType, val);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::copyMakeBorder(src, dst_gold, border, border, border, border, borderType, val);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_Arithm, CopyMakeBorder, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_8UC1),
|
||||
MatType(CV_8UC3),
|
||||
MatType(CV_8UC4),
|
||||
MatType(CV_16UC1),
|
||||
MatType(CV_16UC3),
|
||||
MatType(CV_16UC4),
|
||||
MatType(CV_32FC1),
|
||||
MatType(CV_32FC3),
|
||||
MatType(CV_32FC4)),
|
||||
testing::Values(Border(1), Border(10), Border(50)),
|
||||
ALL_BORDER_TYPES,
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
#endif // HAVE_CUDA
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user