/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "precomp.hpp" #ifndef HAVE_CUDA void cv::gpu::pyrDown(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); } void cv::gpu::pyrUp(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); } void cv::gpu::ImagePyramid::build(const GpuMat&, int, Stream&) { throw_nogpu(); } void cv::gpu::ImagePyramid::getLayer(GpuMat&, Size, Stream&) const { throw_nogpu(); } #else // HAVE_CUDA ////////////////////////////////////////////////////////////////////////////// // pyrDown namespace cv { namespace gpu { namespace device { namespace imgproc { template void pyrDown_gpu(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); } }}} void cv::gpu::pyrDown(const GpuMat& src, GpuMat& dst, Stream& stream) { using namespace cv::gpu::device::imgproc; typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); static const func_t funcs[6][4] = { {pyrDown_gpu , 0 /*pyrDown_gpu*/ , pyrDown_gpu , pyrDown_gpu }, {0 /*pyrDown_gpu*/, 0 /*pyrDown_gpu*/ , 0 /*pyrDown_gpu*/, 0 /*pyrDown_gpu*/}, {pyrDown_gpu , 0 /*pyrDown_gpu*/, pyrDown_gpu , pyrDown_gpu }, {pyrDown_gpu , 0 /*pyrDown_gpu*/ , pyrDown_gpu , pyrDown_gpu }, {0 /*pyrDown_gpu*/ , 0 /*pyrDown_gpu*/ , 0 /*pyrDown_gpu*/ , 0 /*pyrDown_gpu*/ }, {pyrDown_gpu , 0 /*pyrDown_gpu*/ , pyrDown_gpu , pyrDown_gpu } }; CV_Assert(src.depth() <= CV_32F && src.channels() <= 4); const func_t func = funcs[src.depth()][src.channels() - 1]; CV_Assert(func != 0); dst.create((src.rows + 1) / 2, (src.cols + 1) / 2, src.type()); func(src, dst, StreamAccessor::getStream(stream)); } ////////////////////////////////////////////////////////////////////////////// // pyrUp namespace cv { namespace gpu { namespace device { namespace imgproc { template void pyrUp_gpu(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); } }}} void cv::gpu::pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream) { using namespace cv::gpu::device::imgproc; typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); static const func_t funcs[6][4] = { {pyrUp_gpu , 0 /*pyrUp_gpu*/ , pyrUp_gpu , pyrUp_gpu }, {0 /*pyrUp_gpu*/, 0 /*pyrUp_gpu*/ , 0 /*pyrUp_gpu*/, 0 /*pyrUp_gpu*/}, {pyrUp_gpu , 0 /*pyrUp_gpu*/, pyrUp_gpu , pyrUp_gpu }, {pyrUp_gpu , 0 /*pyrUp_gpu*/ , pyrUp_gpu , pyrUp_gpu }, {0 /*pyrUp_gpu*/ , 0 /*pyrUp_gpu*/ , 0 /*pyrUp_gpu*/ , 0 /*pyrUp_gpu*/ }, {pyrUp_gpu , 0 /*pyrUp_gpu*/ , pyrUp_gpu , pyrUp_gpu } }; CV_Assert(src.depth() <= CV_32F && src.channels() <= 4); const func_t func = funcs[src.depth()][src.channels() - 1]; CV_Assert(func != 0); dst.create(src.rows * 2, src.cols * 2, src.type()); func(src, dst, StreamAccessor::getStream(stream)); } ////////////////////////////////////////////////////////////////////////////// // ImagePyramid namespace cv { namespace gpu { namespace device { namespace pyramid { template void kernelDownsampleX2_gpu(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void kernelInterpolateFrom1_gpu(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); } }}} void cv::gpu::ImagePyramid::build(const GpuMat& img, int numLayers, Stream& stream) { using namespace cv::gpu::device::pyramid; typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); static const func_t funcs[6][4] = { {kernelDownsampleX2_gpu , 0 /*kernelDownsampleX2_gpu*/ , kernelDownsampleX2_gpu , kernelDownsampleX2_gpu }, {0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/ }, {kernelDownsampleX2_gpu , 0 /*kernelDownsampleX2_gpu*/, kernelDownsampleX2_gpu , kernelDownsampleX2_gpu }, {0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/, 0 /*kernelDownsampleX2_gpu*/}, {0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/ , 0 /*kernelDownsampleX2_gpu*/ }, {kernelDownsampleX2_gpu , 0 /*kernelDownsampleX2_gpu*/ , kernelDownsampleX2_gpu , kernelDownsampleX2_gpu } }; CV_Assert(img.depth() <= CV_32F && img.channels() <= 4); const func_t func = funcs[img.depth()][img.channels() - 1]; CV_Assert(func != 0); layer0_ = img; Size szLastLayer = img.size(); nLayers_ = 1; if (numLayers <= 0) numLayers = 255; //it will cut-off when any of the dimensions goes 1 pyramid_.resize(numLayers); for (int i = 0; i < numLayers - 1; ++i) { Size szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2); if (szCurLayer.width == 0 || szCurLayer.height == 0) break; ensureSizeIsEnough(szCurLayer, img.type(), pyramid_[i]); nLayers_++; const GpuMat& prevLayer = i == 0 ? layer0_ : pyramid_[i - 1]; func(prevLayer, pyramid_[i], StreamAccessor::getStream(stream)); szLastLayer = szCurLayer; } } void cv::gpu::ImagePyramid::getLayer(GpuMat& outImg, Size outRoi, Stream& stream) const { using namespace cv::gpu::device::pyramid; typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); static const func_t funcs[6][4] = { {kernelInterpolateFrom1_gpu , 0 /*kernelInterpolateFrom1_gpu*/ , kernelInterpolateFrom1_gpu , kernelInterpolateFrom1_gpu }, {0 /*kernelInterpolateFrom1_gpu*/ , 0 /*kernelInterpolateFrom1_gpu*/ , 0 /*kernelInterpolateFrom1_gpu*/ , 0 /*kernelInterpolateFrom1_gpu*/ }, {kernelInterpolateFrom1_gpu , 0 /*kernelInterpolateFrom1_gpu*/, kernelInterpolateFrom1_gpu , kernelInterpolateFrom1_gpu }, {0 /*kernelInterpolateFrom1_gpu*/, 0 /*kernelInterpolateFrom1_gpu*/ , 0 /*kernelInterpolateFrom1_gpu*/, 0 /*kernelInterpolateFrom1_gpu*/}, {0 /*kernelInterpolateFrom1_gpu*/ , 0 /*kernelInterpolateFrom1_gpu*/ , 0 /*kernelInterpolateFrom1_gpu*/ , 0 /*kernelInterpolateFrom1_gpu*/ }, {kernelInterpolateFrom1_gpu , 0 /*kernelInterpolateFrom1_gpu*/ , kernelInterpolateFrom1_gpu , kernelInterpolateFrom1_gpu } }; CV_Assert(outRoi.width <= layer0_.cols && outRoi.height <= layer0_.rows && outRoi.width > 0 && outRoi.height > 0); ensureSizeIsEnough(outRoi, layer0_.type(), outImg); const func_t func = funcs[outImg.depth()][outImg.channels() - 1]; CV_Assert(func != 0); if (outRoi.width == layer0_.cols && outRoi.height == layer0_.rows) { if (stream) stream.enqueueCopy(layer0_, outImg); else layer0_.copyTo(outImg); } float lastScale = 1.0f; float curScale; GpuMat lastLayer = layer0_; GpuMat curLayer; for (int i = 0; i < nLayers_ - 1; ++i) { curScale = lastScale * 0.5f; curLayer = pyramid_[i]; if (outRoi.width == curLayer.cols && outRoi.height == curLayer.rows) { if (stream) stream.enqueueCopy(curLayer, outImg); else curLayer.copyTo(outImg); } if (outRoi.width >= curLayer.cols && outRoi.height >= curLayer.rows) break; lastScale = curScale; lastLayer = curLayer; } func(lastLayer, outImg, StreamAccessor::getStream(stream)); } #endif // HAVE_CUDA