From 572d2d6a84540595d53e43831e7039e7ac07491d Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Fri, 16 Nov 2012 12:49:43 +0400 Subject: [PATCH] warpScanInclusive --- .../opencv2/gpu/device/warp_shuffle.hpp | 48 +++++++++++++++++++ .../gpu/src/nvidia/NCVHaarObjectDetection.cu | 19 +++++++- .../gpu/src/nvidia/NPP_staging/NPP_staging.cu | 34 ++++++++++++- 3 files changed, 99 insertions(+), 2 deletions(-) diff --git a/modules/gpu/include/opencv2/gpu/device/warp_shuffle.hpp b/modules/gpu/include/opencv2/gpu/device/warp_shuffle.hpp index 39b7e852ab..8b4479a79b 100644 --- a/modules/gpu/include/opencv2/gpu/device/warp_shuffle.hpp +++ b/modules/gpu/include/opencv2/gpu/device/warp_shuffle.hpp @@ -54,6 +54,14 @@ namespace cv { namespace gpu { namespace device return T(); #endif } + __device__ __forceinline__ unsigned int shfl(unsigned int val, int srcLane, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl((int) val, srcLane, width); + #else + return 0; + #endif + } __device__ __forceinline__ double shfl(double val, int srcLane, int width = warpSize) { #if __CUDA_ARCH__ >= 300 @@ -78,6 +86,14 @@ namespace cv { namespace gpu { namespace device return T(); #endif } + __device__ __forceinline__ unsigned int shfl_down(unsigned int val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl_down((int) val, delta, width); + #else + return 0; + #endif + } __device__ __forceinline__ double shfl_down(double val, unsigned int delta, int width = warpSize) { #if __CUDA_ARCH__ >= 300 @@ -92,6 +108,38 @@ namespace cv { namespace gpu { namespace device return 0.0; #endif } + + template + __device__ __forceinline__ T shfl_up(T val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return __shfl_up(val, delta, width); + #else + return T(); + #endif + } + __device__ __forceinline__ unsigned int shfl_up(unsigned int val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl_up((int) val, delta, width); + #else + return 0; + #endif + } + __device__ __forceinline__ double shfl_up(double val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + int lo = __double2loint(val); + int hi = __double2hiint(val); + + lo = __shfl_up(lo, delta, width); + hi = __shfl_up(hi, delta, width); + + return __hiloint2double(hi, lo); + #else + return 0.0; + #endif + } }}} #endif // __OPENCV_GPU_WARP_SHUFFLE_HPP__ diff --git a/modules/gpu/src/nvidia/NCVHaarObjectDetection.cu b/modules/gpu/src/nvidia/NCVHaarObjectDetection.cu index 2a8f419593..fb057ae79d 100644 --- a/modules/gpu/src/nvidia/NCVHaarObjectDetection.cu +++ b/modules/gpu/src/nvidia/NCVHaarObjectDetection.cu @@ -65,6 +65,8 @@ #include "NPP_staging/NPP_staging.hpp" #include "NCVRuntimeTemplates.hpp" #include "NCVHaarObjectDetection.hpp" +#include "opencv2/gpu/device/warp.hpp" +#include "opencv2/gpu/device/warp_shuffle.hpp" //============================================================================== @@ -81,6 +83,20 @@ NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of th //assuming size <= WARP_SIZE and size is power of 2 __device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data) { +#if __CUDA_ARCH__ >= 300 + const unsigned int laneId = cv::gpu::device::Warp::laneId(); + + // scan on shuffl functions + #pragma unroll + for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) + { + const Ncv32u n = cv::gpu::device::shfl_up(idata, i); + if (laneId >= i) + idata += n; + } + + return idata; +#else Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; @@ -93,6 +109,7 @@ __device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data) s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; +#endif } __device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data) @@ -2317,4 +2334,4 @@ NCVStatus ncvHaarStoreNVBIN_host(const std::string &filename, return NCV_SUCCESS; } -#endif /* CUDA_DISABLER */ \ No newline at end of file +#endif /* CUDA_DISABLER */ diff --git a/modules/gpu/src/nvidia/NPP_staging/NPP_staging.cu b/modules/gpu/src/nvidia/NPP_staging/NPP_staging.cu index a3a1075fda..f4ec9aace6 100644 --- a/modules/gpu/src/nvidia/NPP_staging/NPP_staging.cu +++ b/modules/gpu/src/nvidia/NPP_staging/NPP_staging.cu @@ -44,6 +44,8 @@ #include #include #include "NPP_staging.hpp" +#include "opencv2/gpu/device/warp.hpp" +#include "opencv2/gpu/device/warp_shuffle.hpp" texture tex8u; @@ -90,6 +92,36 @@ NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of th //assuming size <= WARP_SIZE and size is power of 2 template inline __device__ T warpScanInclusive(T idata, volatile T *s_Data) +{ +#if __CUDA_ARCH__ >= 300 + const unsigned int laneId = cv::gpu::device::Warp::laneId(); + + // scan on shuffl functions + #pragma unroll + for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) + { + const T n = cv::gpu::device::shfl_up(idata, i); + if (laneId >= i) + idata += n; + } + + return idata; +#else + Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); + s_Data[pos] = 0; + pos += K_WARP_SIZE; + s_Data[pos] = idata; + + s_Data[pos] += s_Data[pos - 1]; + s_Data[pos] += s_Data[pos - 2]; + s_Data[pos] += s_Data[pos - 4]; + s_Data[pos] += s_Data[pos - 8]; + s_Data[pos] += s_Data[pos - 16]; + + return s_Data[pos]; +#endif +} +inline __device__ Ncv64u warpScanInclusive(Ncv64u idata, volatile Ncv64u *s_Data) { Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; @@ -2578,4 +2610,4 @@ NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, return status; } -#endif /* CUDA_DISABLER */ \ No newline at end of file +#endif /* CUDA_DISABLER */