diff --git a/modules/gpu/CMakeLists.txt b/modules/gpu/CMakeLists.txt index d83fdd056f..4e23b7cf1f 100644 --- a/modules/gpu/CMakeLists.txt +++ b/modules/gpu/CMakeLists.txt @@ -1,7 +1,6 @@ - -set(name "gpu") +set(name "gpu") set(DEPS "opencv_core") - + set(the_target "opencv_${name}") @@ -15,20 +14,20 @@ include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}") foreach(d ${DEPS}) - if(${d} MATCHES "opencv_") + if(${d} MATCHES "opencv_") string(REPLACE "opencv_" "${CMAKE_CURRENT_SOURCE_DIR}/../" d_dir ${d}) - include_directories("${d_dir}/include") + include_directories("${d_dir}/include") endif() -endforeach() +endforeach() file(GLOB lib_srcs "src/*.cpp") file(GLOB lib_int_hdrs "src/*.h*") file(GLOB lib_cuda "cuda/*.cu") -file(GLOB lib_cuda_hdrs "cuda/*.h*") +file(GLOB lib_cuda_hdrs "cuda/*.h*") source_group("Src" FILES ${lib_srcs} ${lib_int_hdrs}) source_group("Cuda" FILES ${lib_cuda} ${lib_cuda_hdrs}) -file(GLOB lib_hdrs "include/opencv2/${name}/*.h*") +file(GLOB lib_hdrs "include/opencv2/${name}/*.h*") source_group("Include" FILES ${lib_hdrs}) if (HAVE_CUDA) @@ -38,11 +37,11 @@ if (HAVE_CUDA) if (UNIX OR APPLE) set (CUDA_NVCC_FLAGS "-Xcompiler;-fPIC") endif() - + CUDA_COMPILE(cuda_objs ${lib_cuda}) #CUDA_BUILD_CLEAN_TARGET() endif() - + add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${lib_cuda} ${lib_cuda_hdrs} ${cuda_objs}) @@ -51,7 +50,7 @@ if(PCHSupport_FOUND) if(${CMAKE_GENERATOR} MATCHES "Visual*" OR ${CMAKE_GENERATOR} MATCHES "Xcode*") if(${CMAKE_GENERATOR} MATCHES "Visual*") set(${the_target}_pch "src/precomp.cpp") - endif() + endif() add_native_precompiled_header(${the_target} ${pch_header}) elseif(CMAKE_COMPILER_IS_GNUCXX AND ${CMAKE_GENERATOR} MATCHES ".*Makefiles") add_precompiled_header(${the_target} ${pch_header}) diff --git a/modules/gpu/cuda/cuda_shared.hpp b/modules/gpu/cuda/cuda_shared.hpp index 0f154d6dc7..d7b81c7f35 100644 --- a/modules/gpu/cuda/cuda_shared.hpp +++ b/modules/gpu/cuda/cuda_shared.hpp @@ -48,18 +48,21 @@ namespace cv { namespace gpu - { + { typedef unsigned char uchar; typedef unsigned short ushort; - typedef unsigned int uint; + typedef unsigned int uint; extern "C" void error( const char *error_string, const char *file, const int line, const char *func = ""); namespace impl - { + { static inline int divUp(int a, int b) { return (a % b == 0) ? a/b : a/b + 1; } extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, DevMem2D& disp, int maxdisp, DevMem2D_& minSSD_buf); + + extern "C" void set_to_without_mask (const DevMem2D& mat, const double * scalar, int depth, int channels); + extern "C" void set_to_with_mask (const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int depth, int channels); } } } diff --git a/modules/gpu/cuda/mat_operators.cu b/modules/gpu/cuda/mat_operators.cu new file mode 100644 index 0000000000..57eb9bbfa4 --- /dev/null +++ b/modules/gpu/cuda/mat_operators.cu @@ -0,0 +1,93 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "cuda_shared.hpp" +#include "cuda_runtime.h" + +__constant__ float scalar_d[4]; + +namespace mat_operators +{ + template + __global__ void kernel_set_to_without_mask(T * mat) + { + int i = blockIdx.x * blockDim.x + threadIdx.x; + mat[i * sizeof(T)] = static_cast(scalar_d[i % channels]); + } +} + + +extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int depth, int channels) +{ + +} + +extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int depth, int channels) +{ + scalar_d[0] = scalar[0]; + scalar_d[1] = scalar[1]; + scalar_d[2] = scalar[2]; + scalar_d[3] = scalar[3]; + + int numBlocks = mat.rows * mat.step / 256; + + dim3 threadsPerBlock(256); + + if (channels == 1) + { + if (depth == 1) ::mat_operators::kernel_set_to_without_mask<<>>(mat.ptr); + if (depth == 2) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned short *)mat.ptr); + if (depth == 4) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned int *)mat.ptr); + } + if (channels == 2) + { + if (depth == 1) ::mat_operators::kernel_set_to_without_mask<<>>(mat.ptr); + if (depth == 2) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned short *)mat.ptr); + if (depth == 4) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned int *)mat.ptr); + } + if (channels == 3) + { + if (depth == 1) ::mat_operators::kernel_set_to_without_mask<<>>(mat.ptr); + if (depth == 2) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned short *)mat.ptr); + if (depth == 4) ::mat_operators::kernel_set_to_without_mask<<>>((unsigned int *)mat.ptr); + } +} diff --git a/modules/gpu/src/cudastream.cpp b/modules/gpu/src/cudastream.cpp index c03ef6a3b4..818dff06be 100644 --- a/modules/gpu/src/cudastream.cpp +++ b/modules/gpu/src/cudastream.cpp @@ -41,23 +41,23 @@ //M*/ #include "precomp.hpp" -#include "opencv2/gpu/stream_access.hpp" +//#include "opencv2/gpu/stream_access.hpp" using namespace cv; using namespace cv::gpu; -cv::gpu::CudaStream::CudaStream() : impl( (Impl*)fastMalloc(sizeof(Impl)) ) +cv::gpu::CudaStream::CudaStream() //: impl( (Impl*)fastMalloc(sizeof(Impl)) ) { //cudaSafeCall( cudaStreamCreate( &impl->stream) ); } -cv::gpu::CudaStream::~CudaStream() -{ +cv::gpu::CudaStream::~CudaStream() +{ if (impl) { cudaSafeCall( cudaStreamDestroy( *(cudaStream_t*)impl ) ); cv::fastFree( impl ); - } + } } bool cv::gpu::CudaStream::queryIfComplete() @@ -70,8 +70,8 @@ bool cv::gpu::CudaStream::queryIfComplete() //if (err == cudaErrorNotReady) // return false; - ////cudaErrorInvalidResourceHandle - //cudaSafeCall( err ); + ////cudaErrorInvalidResourceHandle + //cudaSafeCall( err ); return true; } void cv::gpu::CudaStream::waitForCompletion() @@ -81,7 +81,7 @@ void cv::gpu::CudaStream::waitForCompletion() void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, Mat& dst) { -// cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost, +// cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, src.cols * src.elemSize(), src.rows, cudaMemcpyDeviceToHost, } void cv::gpu::CudaStream::enqueueUpload(const Mat& src, GpuMat& dst) { @@ -109,4 +109,4 @@ void cv::gpu::CudaStream::enqueueConvert(const GpuMat& src, GpuMat& dst, int typ //struct cudaStream_t& cv::gpu::CudaStream::getStream() { return stream; } - + diff --git a/modules/gpu/src/gpumat.cpp b/modules/gpu/src/gpumat.cpp index dbbeb690dc..0208d28f75 100644 --- a/modules/gpu/src/gpumat.cpp +++ b/modules/gpu/src/gpumat.cpp @@ -68,26 +68,42 @@ void GpuMat::copyTo( GpuMat& m ) const cudaSafeCall( cudaMemcpy2D(m.data, m.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) ); cudaSafeCall( cudaThreadSynchronize() ); } - + void GpuMat::copyTo( GpuMat& /*m*/, const GpuMat&/* mask */) const -{ +{ CV_Assert(!"Not implemented"); } - + void GpuMat::convertTo( GpuMat& /*m*/, int /*rtype*/, double /*alpha*/, double /*beta*/ ) const { CV_Assert(!"Not implemented"); } -GpuMat& GpuMat::operator = (const Scalar& /*s*/) +GpuMat& GpuMat::operator = (const Scalar& s) { - CV_Assert(!"Not implemented"); + CV_Assert(!"Not implemented"); + cv::gpu::impl::set_to_without_mask(*this, s.val, this->depth(), this->channels()); return *this; } -GpuMat& GpuMat::setTo(const Scalar& /*s*/, const GpuMat& /*mask*/) +GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask) { - CV_Assert(!"Not implemented"); + CV_Assert(!"Not implemented"); + + CV_DbgAssert(!this->empty()); + + this->channels(); + this->depth(); + + if (mask.empty()) + { + cv::gpu::impl::set_to_without_mask(*this, s.val, this->depth(), this->channels()); + } + else + { + cv::gpu::impl::set_to_with_mask(*this, s.val, mask, this->depth(), this->channels()); + } + return *this; } @@ -147,8 +163,8 @@ void GpuMat::create(int _rows, int _cols, int _type) rows = _rows; cols = _cols; - size_t esz = elemSize(); - + size_t esz = elemSize(); + void *dev_ptr; cudaSafeCall( cudaMallocPitch(&dev_ptr, &step, esz * cols, rows) ); @@ -157,10 +173,10 @@ void GpuMat::create(int _rows, int _cols, int _type) int64 _nettosize = (int64)step*rows; size_t nettosize = (size_t)_nettosize; - + datastart = data = (uchar*)dev_ptr; - dataend = data + nettosize; - + dataend = data + nettosize; + refcount = (int*)fastMalloc(sizeof(*refcount)); *refcount = 1; } @@ -171,7 +187,7 @@ void GpuMat::release() if( refcount && CV_XADD(refcount, -1) == 1 ) { fastFree(refcount); - cudaSafeCall( cudaFree(datastart) ); + cudaSafeCall( cudaFree(datastart) ); } data = datastart = dataend = 0; step = rows = cols = 0;