diff --git a/modules/gpu/include/opencv2/gpu/gpu.hpp b/modules/gpu/include/opencv2/gpu/gpu.hpp index 2b045ecf9c..39aa9ad5e0 100644 --- a/modules/gpu/include/opencv2/gpu/gpu.hpp +++ b/modules/gpu/include/opencv2/gpu/gpu.hpp @@ -43,6 +43,7 @@ #ifndef __OPENCV_GPU_HPP__ #define __OPENCV_GPU_HPP__ +#include #include "opencv2/core/core.hpp" #include "opencv2/gpu/devmem2d.hpp" @@ -368,6 +369,42 @@ namespace cv private: GpuMat minSSD, leBuf, riBuf; }; + + //////////////////////// StereoBeliefPropagation_GPU ///////////////////////// + + class CV_EXPORTS StereoBeliefPropagation_GPU + { + public: + enum { DEFAULT_NDISP = 64 }; + enum { DEFAULT_ITERS = 5 }; + enum { DEFAULT_LEVELS = 5 }; + + static const float DEFAULT_DISC_COST; + static const float DEFAULT_DATA_COST; + static const float DEFAULT_LAMBDA_COST; + + explicit StereoBeliefPropagation_GPU(int ndisp = DEFAULT_NDISP, + int iters = DEFAULT_ITERS, + int levels = DEFAULT_LEVELS, + float disc_cost = DEFAULT_DISC_COST, + float data_cost = DEFAULT_DATA_COST, + float lambda = DEFAULT_LAMBDA_COST); + + void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity); + + int ndisp; + + int iters; + int levels; + + float disc_cost; + float data_cost; + float lambda; + private: + GpuMat u, d, l, r, u2, d2, l2, r2; + + std::vector datas; + }; } } #include "opencv2/gpu/matrix_operations.hpp" diff --git a/modules/gpu/src/beliefpropagation_gpu.cpp b/modules/gpu/src/beliefpropagation_gpu.cpp new file mode 100644 index 0000000000..1cc8fae75a --- /dev/null +++ b/modules/gpu/src/beliefpropagation_gpu.cpp @@ -0,0 +1,179 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +using namespace cv; +using namespace cv::gpu; +using namespace std; + +const float cv::gpu::StereoBeliefPropagation_GPU::DEFAULT_DISC_COST = 1.7f; +const float cv::gpu::StereoBeliefPropagation_GPU::DEFAULT_DATA_COST = 10.0f; +const float cv::gpu::StereoBeliefPropagation_GPU::DEFAULT_LAMBDA_COST = 0.07f; + +#if !defined (HAVE_CUDA) + +cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int, int, int, float, float, float) { throw_nogpu(); } + +void cv::gpu::StereoBeliefPropagation_GPU::operator() (const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); } + +#else /* !defined (HAVE_CUDA) */ + +typedef DevMem2D_ DevMem2Df; + +namespace cv { namespace gpu { namespace impl { + extern "C" void load_constants(int ndisp, float disc_cost, float data_cost, float lambda); + extern "C" void comp_data_caller(const DevMem2D& l, const DevMem2D& r, DevMem2Df mdata); + extern "C" void data_down_kernel_caller(int dst_cols, int dst_rows, int src_rows, const DevMem2Df& src, DevMem2Df dst); + extern "C" void level_up(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Df* mu, DevMem2Df* md, DevMem2Df* ml, DevMem2Df* mr); + extern "C" void call_all_iterations(int cols, int rows, int iters, DevMem2Df& u, DevMem2Df& d, DevMem2Df& l, DevMem2Df& r, const DevMem2Df& data); + extern "C" void output_caller(const DevMem2Df& u, const DevMem2Df& d, const DevMem2Df& l, const DevMem2Df& r, const DevMem2Df& data, DevMem2D disp); +}}} + +cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_, float disc_cost_, float data_cost_, float lambda_) + : ndisp(ndisp_), iters(iters_), levels(levels_), disc_cost(disc_cost_), data_cost(data_cost_), lambda(lambda_), datas(levels_) +{ + const int max_supported_ndisp = 1 << (sizeof(unsigned char) * 8); + + CV_Assert(0 < ndisp && ndisp <= max_supported_ndisp); + CV_Assert(ndisp % 8 == 0); +} + +void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat& left, const GpuMat& right, GpuMat& disp) +{ + CV_DbgAssert(left.cols == right.cols && left.rows == right.rows && left.type() == right.type() && left.type() == CV_8U); + + const Scalar zero = Scalar::all(0); + + int rows = left.rows; + int cols = left.cols; + + int divisor = (int)pow(2.f, levels - 1.0f); + int lowest_cols = cols / divisor; + int lowest_rows = rows / divisor; + const int min_image_dim_size = 20; + CV_Assert(min(lowest_cols, lowest_rows) > min_image_dim_size); + + disp.create(rows, cols, CV_8U); + + u.create(rows * ndisp, cols, CV_32F); + d.create(rows * ndisp, cols, CV_32F); + l.create(rows * ndisp, cols, CV_32F); + r.create(rows * ndisp, cols, CV_32F); + + if (levels & 1) + { + u = zero; //can clear less area + d = zero; + l = zero; + r = zero; + } + + if (levels > 1) + { + int less_rows = (rows + 1) / 2; + int less_cols = (cols + 1) / 2; + + u2.create(less_rows * ndisp, less_cols, CV_32F); + d2.create(less_rows * ndisp, less_cols, CV_32F); + l2.create(less_rows * ndisp, less_cols, CV_32F); + r2.create(less_rows * ndisp, less_cols, CV_32F); + + if ((levels & 1) == 0) + { + u2 = zero; + d2 = zero; + l2 = zero; + r2 = zero; + } + } + + impl::load_constants(ndisp, disc_cost, data_cost, lambda); + + vector cols_all(levels); + vector rows_all(levels); + vector iters_all(levels); + + cols_all[0] = cols; + rows_all[0] = rows; + iters_all[0] = iters; + + datas[0].create(rows * ndisp, cols, CV_32F); + //datas[0] = Scalar(data_cost); //DOTO did in kernel, but not sure if correct + + impl::comp_data_caller(left, right, datas.front()); + + for (int i = 1; i < levels; i++) + { + cols_all[i] = (cols_all[i-1] + 1)/2; + rows_all[i] = (rows_all[i-1] + 1)/2; + + // this is difference from Felzenszwalb algorithm + // we reduce iters num for each next level + iters_all[i] = max(2 * iters_all[i-1] / 3, 1); + + datas[i].create(rows_all[i] * ndisp, cols_all[i], CV_32F); + + impl::data_down_kernel_caller(cols_all[i], rows_all[i], rows_all[i-1], datas[i-1], datas[i]); + } + + DevMem2D_ mus[] = {u, u2}; + DevMem2D_ mds[] = {d, d2}; + DevMem2D_ mrs[] = {r, r2}; + DevMem2D_ mls[] = {l, l2}; + + int mem_idx = (levels & 1) ? 0 : 1; + + for (int i = levels - 1; i >= 0; i--) // for lower level we have already computed messages by setting to zero + { + if (i != levels - 1) + impl::level_up(mem_idx, cols_all[i], rows_all[i], rows_all[i+1], mus, mds, mls, mrs); + + impl::call_all_iterations(cols_all[i], rows_all[i], iters_all[i], mus[mem_idx], mds[mem_idx], mls[mem_idx], mrs[mem_idx], datas[i]); + + mem_idx = (mem_idx + 1) & 1; + } + + impl::output_caller(u, d, l, r, datas.front(), disp); +} + +#endif /* !defined (HAVE_CUDA) */ diff --git a/modules/gpu/src/cuda/beliefpropagation.cu b/modules/gpu/src/cuda/beliefpropagation.cu new file mode 100644 index 0000000000..0191c5c7c6 --- /dev/null +++ b/modules/gpu/src/cuda/beliefpropagation.cu @@ -0,0 +1,372 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/gpu/devmem2d.hpp" +#include "safe_call.hpp" + +using namespace cv::gpu; + +static inline int divUp(int a, int b) { return (a % b == 0) ? a/b : a/b + 1; } + +#ifndef FLT_MAX +#define FLT_MAX 3.402823466e+38F +#endif + +typedef unsigned char uchar; + +namespace beliefpropagation_gpu +{ + __constant__ int cndisp; + __constant__ float cdisc_cost; + __constant__ float cdata_cost; + __constant__ float clambda; +}; + +/////////////////////////////////////////////////////////////// +////////////////// comp data ///////////////////////////////// +/////////////////////////////////////////////////////////////// + +namespace beliefpropagation_gpu +{ + __global__ void comp_data_kernel(uchar* l, uchar* r, size_t step, float* data, size_t data_step, int cols, int rows) + { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1) + { + uchar *ls = l + y * step + x; + uchar *rs = r + y * step + x; + + float *ds = data + y * data_step + x; + size_t disp_step = data_step * rows; + + for (int disp = 0; disp < cndisp; disp++) + { + if (x - disp >= 0) + { + int le = ls[0]; + int re = rs[-disp]; + float val = abs(le - re); + + ds[disp * disp_step] = clambda * fmin(val, cdata_cost); + } + else + { + ds[disp * disp_step] = cdata_cost; + } + } + } + } +} + +namespace cv { namespace gpu { namespace impl { + extern "C" void load_constants(int ndisp, float disc_cost, float data_cost, float lambda) + { + cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cndisp, &ndisp, sizeof(ndisp)) ); + cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdisc_cost, &disc_cost, sizeof(disc_cost)) ); + cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::cdata_cost, &data_cost, sizeof(data_cost)) ); + cudaSafeCall( cudaMemcpyToSymbol(beliefpropagation_gpu::clambda, &lambda, sizeof(lambda)) ); + } + + extern "C" void comp_data_caller(const DevMem2D& l, const DevMem2D& r, DevMem2D_ mdata) + { + dim3 threads(32, 8, 1); + dim3 grid(1, 1, 1); + + grid.x = divUp(l.cols, threads.x); + grid.y = divUp(l.rows, threads.y); + + beliefpropagation_gpu::comp_data_kernel<<>>(l.ptr, r.ptr, l.step, mdata.ptr, mdata.step/sizeof(float), l.cols, l.rows); + cudaSafeCall( cudaThreadSynchronize() ); + } +}}} + +/////////////////////////////////////////////////////////////// +////////////////// data_step_down //////////////////////////// +/////////////////////////////////////////////////////////////// + +namespace beliefpropagation_gpu +{ + __global__ void data_down_kernel(int dst_cols, int dst_rows, int src_rows, float *src, size_t src_step, float *dst, size_t dst_step) + { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < dst_cols && y < dst_rows) + { + const size_t dst_disp_step = dst_step * dst_rows; + const size_t src_disp_step = src_step * src_rows; + + for (int d = 0; d < cndisp; ++d) + { + float dst_reg = src[d * src_disp_step + src_step * (2*y+0) + (2*x+0)]; + dst_reg += src[d * src_disp_step + src_step * (2*y+1) + (2*x+0)]; + dst_reg += src[d * src_disp_step + src_step * (2*y+0) + (2*x+1)]; + dst_reg += src[d * src_disp_step + src_step * (2*y+1) + (2*x+1)]; + + dst[d * dst_disp_step + y * dst_step + x] = dst_reg; + } + } + } +} + +namespace cv { namespace gpu { namespace impl { + extern "C" void data_down_kernel_caller(int dst_cols, int dst_rows, int src_rows, const DevMem2D_& src, DevMem2D_ dst) + { + dim3 threads(32, 8, 1); + dim3 grid(1, 1, 1); + + grid.x = divUp(dst_cols, threads.x); + grid.y = divUp(dst_rows, threads.y); + + beliefpropagation_gpu::data_down_kernel<<>>(dst_cols, dst_rows, src_rows, src.ptr, src.step/sizeof(float), dst.ptr, dst.step/sizeof(float)); + cudaSafeCall( cudaThreadSynchronize() ); + } +}}} + +/////////////////////////////////////////////////////////////// +////////////////// level up messages //////////////////////// +/////////////////////////////////////////////////////////////// + + +namespace beliefpropagation_gpu +{ + __global__ void level_up_kernel(int dst_cols, int dst_rows, int src_rows, float *src, size_t src_step, float *dst, size_t dst_step) + { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < dst_cols && y < dst_rows) + { + const size_t dst_disp_step = dst_step * dst_rows; + const size_t src_disp_step = src_step * src_rows; + + float *dstr = dst + y * dst_step + x; + float *srcr = src + y/2 * src_step + x/2; + + for (int d = 0; d < cndisp; ++d) + dstr[d * dst_disp_step] = srcr[d * src_disp_step]; + } + } +} + +namespace cv { namespace gpu { namespace impl { + extern "C" void level_up(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2D_* mu, DevMem2D_* md, DevMem2D_* ml, DevMem2D_* mr) + { + dim3 threads(32, 8, 1); + dim3 grid(1, 1, 1); + + grid.x = divUp(dst_cols, threads.x); + grid.y = divUp(dst_rows, threads.y); + + int src_idx = (dst_idx + 1) & 1; + + beliefpropagation_gpu::level_up_kernel<<>>(dst_cols, dst_rows, src_rows, mu[src_idx].ptr, mu[src_idx].step/sizeof(float), mu[dst_idx].ptr, mu[dst_idx].step/sizeof(float)); + beliefpropagation_gpu::level_up_kernel<<>>(dst_cols, dst_rows, src_rows, md[src_idx].ptr, md[src_idx].step/sizeof(float), md[dst_idx].ptr, md[dst_idx].step/sizeof(float)); + beliefpropagation_gpu::level_up_kernel<<>>(dst_cols, dst_rows, src_rows, ml[src_idx].ptr, ml[src_idx].step/sizeof(float), ml[dst_idx].ptr, ml[dst_idx].step/sizeof(float)); + beliefpropagation_gpu::level_up_kernel<<>>(dst_cols, dst_rows, src_rows, mr[src_idx].ptr, mr[src_idx].step/sizeof(float), mr[dst_idx].ptr, mr[dst_idx].step/sizeof(float)); + + cudaSafeCall( cudaThreadSynchronize() ); + } +}}} + + +/////////////////////////////////////////////////////////////// +///////////////// Calcs all iterations /////////////////////// +/////////////////////////////////////////////////////////////// + + +namespace beliefpropagation_gpu +{ + __device__ void calc_min_linear_penalty(float *dst, size_t step) + { + float prev = dst[0]; + float cur; + for (int disp = 1; disp < cndisp; ++disp) + { + prev += 1.0f; + cur = dst[step * disp]; + if (prev < cur) + cur = prev; + dst[step * disp] = prev = cur; + } + + prev = dst[(cndisp - 1) * step]; + for (int disp = cndisp - 2; disp >= 0; disp--) + { + prev += 1.0f; + cur = dst[step * disp]; + if (prev < cur) + cur = prev; + dst[step * disp] = prev = cur; + } + } + + __device__ void message(float *msg1, float *msg2, float *msg3, float *data, float *dst, size_t msg_disp_step, size_t data_disp_step) + { + float minimum = FLT_MAX; + + for(int i = 0; i < cndisp; ++i) + { + float dst_reg = msg1[msg_disp_step * i] + msg2[msg_disp_step * i] + msg3[msg_disp_step * i] + data[data_disp_step * i]; + + if (dst_reg < minimum) + minimum = dst_reg; + + dst[msg_disp_step * i] = dst_reg; + + } + + calc_min_linear_penalty(dst, msg_disp_step); + + minimum += cdisc_cost; + + float sum = 0; + for(int i = 0; i < cndisp; ++i) + { + float dst_reg = dst[msg_disp_step * i]; + if (dst_reg > minimum) + { + dst[msg_disp_step * i] = dst_reg = minimum; + } + sum += dst_reg; + } + sum /= cndisp; + + for(int i = 0; i < cndisp; ++i) + dst[msg_disp_step * i] -= sum; + } + + __global__ void one_iteration(int t, float* u, float *d, float *l, float *r, size_t msg_step, float *data, size_t data_step, int cols, int rows) + { + int y = blockIdx.y * blockDim.y + threadIdx.y; + int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); + + if ( (y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1)) + { + float *us = u + y * msg_step + x; + float *ds = d + y * msg_step + x; + float *ls = l + y * msg_step + x; + float *rs = r + y * msg_step + x; + float *dt = data + y * data_step + x; + size_t msg_disp_step = msg_step * rows; + size_t data_disp_step = data_step * rows; + + message(us + msg_step, ls + 1, rs - 1, dt, us, msg_disp_step, data_disp_step); + message(ds - msg_step, ls + 1, rs - 1, dt, ds, msg_disp_step, data_disp_step); + message(us + msg_step, ds - msg_step, rs - 1, dt, rs, msg_disp_step, data_disp_step); + message(us + msg_step, ds - msg_step, ls + 1, dt, ls, msg_disp_step, data_disp_step); + } + } +} + +namespace cv { namespace gpu { namespace impl { + extern "C" void call_all_iterations(int cols, int rows, int iters, DevMem2D_& u, DevMem2D_& d, DevMem2D_& l, DevMem2D_& r, const DevMem2D_& data) + { + dim3 threads(32, 8, 1); + dim3 grid(1, 1, 1); + + grid.x = divUp(cols, threads.x << 1); + grid.y = divUp(rows, threads.y); + + for(int t = 0; t < iters; ++t) + beliefpropagation_gpu::one_iteration<<>>(t, u.ptr, d.ptr, l.ptr, r.ptr, u.step/sizeof(float), data.ptr, data.step/sizeof(float), cols, rows); + + cudaSafeCall( cudaThreadSynchronize() ); + } +}}} + + +/////////////////////////////////////////////////////////////// +////////////////// Output caller ///////////////////////////// +/////////////////////////////////////////////////////////////// + +namespace beliefpropagation_gpu +{ + __global__ void output(int cols, int rows, float *u, float *d, float *l, float *r, float* data, size_t step, unsigned char *disp, size_t res_step) + { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (y > 0 && y < rows - 1) + if (x > 0 && x < cols - 1) + { + float *us = u + (y + 1) * step + x; + float *ds = d + (y - 1) * step + x; + float *ls = l + y * step + (x + 1); + float *rs = r + y * step + (x - 1); + float *dt = data + y * step + x; + + size_t disp_step = rows * step; + + int best = 0; + float best_val = FLT_MAX; + for (int d = 0; d < cndisp; ++d) + { + float val = us[d * disp_step] + ds[d * disp_step] + ls[d * disp_step] + rs[d * disp_step] + dt[d * disp_step]; + + if (val < best_val) + { + best_val = val; + best = d; + } + } + + disp[res_step * y + x] = best & 0xFF; + } + } +} + +namespace cv { namespace gpu { namespace impl { + extern "C" void output_caller(const DevMem2D_& u, const DevMem2D_& d, const DevMem2D_& l, const DevMem2D_& r, const DevMem2D_& data, DevMem2D disp) + { + dim3 threads(32, 8, 1); + dim3 grid(1, 1, 1); + + grid.x = divUp(disp.cols, threads.x); + grid.y = divUp(disp.rows, threads.y); + + beliefpropagation_gpu::output<<>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step); + cudaSafeCall( cudaThreadSynchronize() ); + } +}}} \ No newline at end of file diff --git a/modules/gpu/src/cudastream.cpp b/modules/gpu/src/cudastream.cpp index 0c4d8ff0ba..5603197645 100644 --- a/modules/gpu/src/cudastream.cpp +++ b/modules/gpu/src/cudastream.cpp @@ -52,18 +52,18 @@ void cv::gpu::CudaStream::create() { throw_nogpu(); } void cv::gpu::CudaStream::release() { throw_nogpu(); } cv::gpu::CudaStream::CudaStream() : impl(0) { throw_nogpu(); } cv::gpu::CudaStream::~CudaStream() { throw_nogpu(); } -cv::gpu::CudaStream::CudaStream(const CudaStream& stream) { throw_nogpu(); } -CudaStream& cv::gpu::CudaStream::operator=(const CudaStream& stream) { throw_nogpu(); return *this; } +cv::gpu::CudaStream::CudaStream(const CudaStream& /*stream*/) { throw_nogpu(); } +CudaStream& cv::gpu::CudaStream::operator=(const CudaStream& /*stream*/) { throw_nogpu(); return *this; } bool cv::gpu::CudaStream::queryIfComplete() { throw_nogpu(); return true; } void cv::gpu::CudaStream::waitForCompletion() { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, Mat& dst) { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueDownload(const GpuMat& src, MatPL& dst) { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueUpload(const MatPL& src, GpuMat& dst) { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueUpload(const Mat& src, GpuMat& dst) { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueCopy(const GpuMat& src, GpuMat& dst) { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& src, Scalar val) { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& src, Scalar val, const GpuMat& mask) { throw_nogpu(); } -void cv::gpu::CudaStream::enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a, double b) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueDownload(const GpuMat& /*src*/, Mat& /*dst*/) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueDownload(const GpuMat& /*src*/, MatPL& /*dst*/) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueUpload(const MatPL& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueUpload(const Mat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueCopy(const GpuMat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& /*src*/, Scalar /*val*/) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueMemSet(const GpuMat& /*src*/, Scalar /*val*/, const GpuMat& /*mask*/) { throw_nogpu(); } +void cv::gpu::CudaStream::enqueueConvert(const GpuMat& /*src*/, GpuMat& /*dst*/, int /*type*/, double /*a*/, double /*b*/) { throw_nogpu(); } #else /* !defined (HAVE_CUDA) */ diff --git a/modules/gpu/src/precomp.hpp b/modules/gpu/src/precomp.hpp index b19f9740d7..c30216b0b0 100644 --- a/modules/gpu/src/precomp.hpp +++ b/modules/gpu/src/precomp.hpp @@ -54,13 +54,12 @@ #include #include "opencv2/gpu/gpu.hpp" -#include "opencv2/gpu/stream_accessor.hpp" - #if defined(HAVE_CUDA) #include "cuda_shared.hpp" #include "cuda_runtime_api.h" + #include "opencv2/gpu/stream_accessor.hpp" #else /* defined(HAVE_CUDA) */ diff --git a/modules/gpu/src/stereobm_gpu.cpp b/modules/gpu/src/stereobm_gpu.cpp index 752e3571e1..3c43f44418 100644 --- a/modules/gpu/src/stereobm_gpu.cpp +++ b/modules/gpu/src/stereobm_gpu.cpp @@ -98,7 +98,7 @@ bool cv::gpu::StereoBM_GPU::checkIfGpuCallReasonable() return false; } -void stereo_gpu_operator ( GpuMat& minSSD, GpuMat& leBuf, GpuMat& riBuf, int preset, int ndisp, int winSize, float avergeTexThreshold, const GpuMat& left, const GpuMat& right, GpuMat& disparity, const cudaStream_t & stream) +static void stereo_bm_gpu_operator ( GpuMat& minSSD, GpuMat& leBuf, GpuMat& riBuf, int preset, int ndisp, int winSize, float avergeTexThreshold, const GpuMat& left, const GpuMat& right, GpuMat& disparity, const cudaStream_t & stream) { CV_DbgAssert(left.rows == right.rows && left.cols == right.cols); CV_DbgAssert(left.type() == CV_8UC1); @@ -131,12 +131,12 @@ void stereo_gpu_operator ( GpuMat& minSSD, GpuMat& leBuf, GpuMat& riBuf, int void cv::gpu::StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity) { - ::stereo_gpu_operator(minSSD, leBuf, riBuf, preset, ndisp, winSize, avergeTexThreshold, left, right, disparity, 0); + ::stereo_bm_gpu_operator(minSSD, leBuf, riBuf, preset, ndisp, winSize, avergeTexThreshold, left, right, disparity, 0); } void cv::gpu::StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity, const CudaStream& stream) { - ::stereo_gpu_operator(minSSD, leBuf, riBuf, preset, ndisp, winSize, avergeTexThreshold, left, right, disparity, StreamAccessor::getStream(stream)); + ::stereo_bm_gpu_operator(minSSD, leBuf, riBuf, preset, ndisp, winSize, avergeTexThreshold, left, right, disparity, StreamAccessor::getStream(stream)); } #endif /* !defined (HAVE_CUDA) */