diff --git a/modules/gpu/src/cuda/cuda_shared.hpp b/modules/gpu/src/cuda/cuda_shared.hpp index 272e4f56cd..d0fba59641 100644 --- a/modules/gpu/src/cuda/cuda_shared.hpp +++ b/modules/gpu/src/cuda/cuda_shared.hpp @@ -59,7 +59,6 @@ namespace cv namespace impl { static inline int divUp(int a, int b) { return (a % b == 0) ? a/b : a/b + 1; } - extern "C" void set_to_without_mask (const DevMem2D& mat, const double * scalar, int depth, int channels); extern "C" void set_to_with_mask (const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int depth, int channels); diff --git a/modules/gpu/src/cuda/matrix_operations.cu b/modules/gpu/src/cuda/matrix_operations.cu index 533c780c04..ce5c6cd2f2 100644 --- a/modules/gpu/src/cuda/matrix_operations.cu +++ b/modules/gpu/src/cuda/matrix_operations.cu @@ -50,97 +50,32 @@ __constant__ __align__(16) float scalar_d[4]; namespace mat_operators { - template - struct unroll - { - __device__ static void unroll_set(T * mat, size_t i) - { - mat[i] = static_cast(scalar_d[channels - count]); - unroll::unroll_set(mat, i+1); - } - - __device__ static void unroll_set_with_mask(T * mat, unsigned char mask, size_t i) - { - if ( mask != 0 ) - mat[i] = static_cast(scalar_d[channels - count]); - - unroll::unroll_set_with_mask(mat, mask, i+1); - } - }; - - template - struct unroll - { - __device__ static void unroll_set(T * , size_t){} - __device__ static void unroll_set_with_mask(T * , unsigned char, size_t){} - }; - - template - __device__ size_t GetIndex(size_t i, int cols, int step) - { - size_t ret = (i / static_cast(cols))*static_cast(step) / static_cast(sizeof(T)) + - (i % static_cast(cols))*static_cast(channels); - return ret; - } - - template + template __global__ void kernel_set_to_without_mask(T * mat, int cols, int rows, int step) { - size_t i = (blockIdx.x * blockDim.x + threadIdx.x); + size_t x = blockIdx.x * blockDim.x + threadIdx.x; + size_t y = blockIdx.y * blockDim.y + threadIdx.y; - if (i < cols * rows) + if ((x < cols * channels ) && (y < rows)) { - unroll::unroll_set(mat, GetIndex(i, cols, step)); + size_t idx = y * (step / sizeof(T)) + x; + mat[idx] = scalar_d[ x % channels ]; } } - template - __global__ void kernel_set_to_with_mask(T * mat, const unsigned char * mask, int cols, int rows, int step) + template + __global__ void kernel_set_to_with_mask(T * mat, const unsigned char * mask, int cols, int rows, int step, int step_mask) { - size_t i = (blockIdx.x * blockDim.x + threadIdx.x); - if (i < cols * rows) - unroll::unroll_set_with_mask(mat, mask[i], GetIndex(i, cols, step)); - } -} + size_t x = blockIdx.x * blockDim.x + threadIdx.x; + size_t y = blockIdx.y * blockDim.y + threadIdx.y; -extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int elemSize1, int channels) -{ - // download scalar to constant memory - float data[4]; - data[0] = static_cast(scalar[0]); - data[1] = static_cast(scalar[1]); - data[2] = static_cast(scalar[2]); - data[3] = static_cast(scalar[3]); - cudaSafeCall( cudaMemcpyToSymbol(scalar_d, &data, sizeof(data))); - - dim3 threadsPerBlock(256,1,1); - dim3 numBlocks (mat.rows * mat.cols / threadsPerBlock.x + 1, 1, 1); - - if (channels == 1) - { - if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); + if (mask[y * step_mask + x] != 0) + if ((x < cols * channels ) && (y < rows)) + { + size_t idx = y * (step / sizeof(T)) + x; + mat[idx] = scalar_d[ x % channels ]; + } } - if (channels == 2) - { - if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - } - if (channels == 3) - { - if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - } - if (channels == 4) - { - if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); - } - cudaSafeCall( cudaThreadSynchronize() ); } extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int elemSize1, int channels) @@ -152,33 +87,74 @@ extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const do data[3] = static_cast(scalar[3]); cudaSafeCall( cudaMemcpyToSymbol(scalar_d, &data, sizeof(data))); - dim3 threadsPerBlock(256, 1, 1); - dim3 numBlocks (mat.rows * mat.cols / threadsPerBlock.x + 1, 1, 1); + dim3 threadsPerBlock(16, 16, 1); + dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); if (channels == 1) { if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<<>>(mat.ptr, mat.cols, mat.rows, mat.step); if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float, 1><<>>(( float *)mat.ptr, mat.cols, mat.rows, mat.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<<>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); } if (channels == 2) { if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<<>>(mat.ptr, mat.cols, mat.rows, mat.step); if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float , 2><<>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<<>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); } if (channels == 3) { if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<<>>(mat.ptr, mat.cols, mat.rows, mat.step); if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float, 3><<>>(( float *)mat.ptr, mat.cols, mat.rows, mat.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<<>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); } if (channels == 4) { if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<<>>(mat.ptr, mat.cols, mat.rows, mat.step); if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); - if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<<>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<<>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); } - cudaSafeCall( cudaThreadSynchronize() ); + cudaSafeCall ( cudaThreadSynchronize() ); } + +extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int elemSize1, int channels) +{ + float data[4]; + data[0] = static_cast(scalar[0]); + data[1] = static_cast(scalar[1]); + data[2] = static_cast(scalar[2]); + data[3] = static_cast(scalar[3]); + cudaSafeCall( cudaMemcpyToSymbol(scalar_d, &data, sizeof(data))); + + dim3 threadsPerBlock(16, 16, 1); + dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); + + if (channels == 1) + { + if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + } + if (channels == 2) + { + if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + } + if (channels == 3) + { + if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + } + if (channels == 4) + { + if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<<>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<<>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<<>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); + } + + cudaSafeCall ( cudaThreadSynchronize() ); +} + diff --git a/tests/gpu/src/operator_set_to.cpp b/tests/gpu/src/operator_set_to.cpp index f5b985d6ca..d071004a3c 100644 --- a/tests/gpu/src/operator_set_to.cpp +++ b/tests/gpu/src/operator_set_to.cpp @@ -6,6 +6,7 @@ #include #include #include +#include // for cout << setw() using namespace cv; using namespace std; @@ -35,6 +36,7 @@ class CV_GpuMatOpSetTo : public CvTest bool test_cv_32f_c3(); bool test_cv_32f_c4(); + private: int rows; int cols; @@ -43,8 +45,8 @@ class CV_GpuMatOpSetTo : public CvTest CV_GpuMatOpSetTo::CV_GpuMatOpSetTo(): CvTest( "GpuMatOperatorSetTo", "setTo" ) { - rows = 127; - cols = 129; + rows = 129; + cols = 127; s.val[0] = 128.0; s.val[1] = 128.0; @@ -75,8 +77,9 @@ bool CV_GpuMatOpSetTo::compare_matrix(cv::Mat & cpumat, gpu::GpuMat & gpumat) //int64 time1 = getTickCount(); gpumat.setTo(s); //int64 time2 = getTickCount(); - //std::cout << "\ntime cpu:" << double((time1 - time) / getTickFrequency()); - //std::cout << "\ntime gpu:" << double((time2 - time1) / getTickFrequency()); + + //std::cout << "\ntime cpu: " << std::fixed << std::setprecision(12) << double((time1 - time) / (double)getTickFrequency()); + //std::cout << "\ntime gpu: " << std::fixed << std::setprecision(12) << double((time2 - time1) / (double)getTickFrequency()); //std::cout << "\n"; #ifdef PRINT_MATRIX