/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudaarithm.hpp" #include "opencv2/cudev.hpp" #include "opencv2/core/private.cuda.hpp" using namespace cv; using namespace cv::cuda; using namespace cv::cudev; namespace { template struct TransformPolicy : DefaultTransformPolicy { }; template <> struct TransformPolicy : DefaultTransformPolicy { enum { shift = 1 }; }; } ////////////////////////////////////////////////////////////////////////////// /// abs namespace { template void absMat(const GpuMat& src, const GpuMat& dst, Stream& stream) { gridTransformUnary_< TransformPolicy >(globPtr(src), globPtr(dst), abs_func(), stream); } } void cv::cuda::abs(InputArray _src, OutputArray _dst, Stream& stream) { typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { absMat, absMat, absMat, absMat, absMat, absMat, absMat }; GpuMat src = getInputMat(_src, stream); CV_Assert( src.depth() <= CV_64F ); GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream); funcs[src.depth()](src.reshape(1), dst.reshape(1), stream); syncOutput(dst, _dst, stream); } ////////////////////////////////////////////////////////////////////////////// /// sqr namespace { template struct SqrOp : unary_function { __device__ __forceinline__ T operator ()(T x) const { return cudev::saturate_cast(x * x); } }; template void sqrMat(const GpuMat& src, const GpuMat& dst, Stream& stream) { gridTransformUnary_< TransformPolicy >(globPtr(src), globPtr(dst), SqrOp(), stream); } } void cv::cuda::sqr(InputArray _src, OutputArray _dst, Stream& stream) { typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { sqrMat, sqrMat, sqrMat, sqrMat, sqrMat, sqrMat, sqrMat }; GpuMat src = getInputMat(_src, stream); CV_Assert( src.depth() <= CV_64F ); GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream); funcs[src.depth()](src.reshape(1), dst.reshape(1), stream); syncOutput(dst, _dst, stream); } ////////////////////////////////////////////////////////////////////////////// /// sqrt namespace { template void sqrtMat(const GpuMat& src, const GpuMat& dst, Stream& stream) { gridTransformUnary_< TransformPolicy >(globPtr(src), globPtr(dst), sqrt_func(), stream); } } void cv::cuda::sqrt(InputArray _src, OutputArray _dst, Stream& stream) { typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { sqrtMat, sqrtMat, sqrtMat, sqrtMat, sqrtMat, sqrtMat, sqrtMat }; GpuMat src = getInputMat(_src, stream); CV_Assert( src.depth() <= CV_64F ); GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream); funcs[src.depth()](src.reshape(1), dst.reshape(1), stream); syncOutput(dst, _dst, stream); } //////////////////////////////////////////////////////////////////////// /// exp namespace { template struct ExpOp : unary_function { __device__ __forceinline__ T operator ()(T x) const { exp_func f; return cudev::saturate_cast(f(x)); } }; template void expMat(const GpuMat& src, const GpuMat& dst, Stream& stream) { gridTransformUnary_< TransformPolicy >(globPtr(src), globPtr(dst), ExpOp(), stream); } } void cv::cuda::exp(InputArray _src, OutputArray _dst, Stream& stream) { typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { expMat, expMat, expMat, expMat, expMat, expMat, expMat }; GpuMat src = getInputMat(_src, stream); CV_Assert( src.depth() <= CV_64F ); GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream); funcs[src.depth()](src.reshape(1), dst.reshape(1), stream); syncOutput(dst, _dst, stream); } //////////////////////////////////////////////////////////////////////// // log namespace { template void logMat(const GpuMat& src, const GpuMat& dst, Stream& stream) { gridTransformUnary_< TransformPolicy >(globPtr(src), globPtr(dst), log_func(), stream); } } void cv::cuda::log(InputArray _src, OutputArray _dst, Stream& stream) { typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { logMat, logMat, logMat, logMat, logMat, logMat, logMat }; GpuMat src = getInputMat(_src, stream); CV_Assert( src.depth() <= CV_64F ); GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream); funcs[src.depth()](src.reshape(1), dst.reshape(1), stream); syncOutput(dst, _dst, stream); } //////////////////////////////////////////////////////////////////////// // pow namespace { template::is_signed> struct PowOp : unary_function { float power; __device__ __forceinline__ T operator()(T e) const { return cudev::saturate_cast(__powf((float)e, power)); } }; template struct PowOp : unary_function { float power; __device__ __forceinline__ T operator()(T e) const { T res = cudev::saturate_cast(__powf((float)e, power)); if ((e < 0) && (1 & static_cast(power))) res *= -1; return res; } }; template<> struct PowOp : unary_function { float power; __device__ __forceinline__ float operator()(float e) const { return __powf(::fabs(e), power); } }; template<> struct PowOp : unary_function { double power; __device__ __forceinline__ double operator()(double e) const { return ::pow(::fabs(e), power); } }; template void powMat(const GpuMat& src, double power, const GpuMat& dst, Stream& stream) { PowOp op; op.power = static_cast::type>(power); gridTransformUnary_< TransformPolicy >(globPtr(src), globPtr(dst), op, stream); } } void cv::cuda::pow(InputArray _src, double power, OutputArray _dst, Stream& stream) { typedef void (*func_t)(const GpuMat& src, double power, const GpuMat& dst, Stream& stream); static const func_t funcs[] = { powMat, powMat, powMat, powMat, powMat, powMat, powMat }; GpuMat src = getInputMat(_src, stream); CV_Assert( src.depth() <= CV_64F ); GpuMat dst = getOutputMat(_dst, src.size(), src.type(), stream); funcs[src.depth()](src.reshape(1), power, dst.reshape(1), stream); syncOutput(dst, _dst, stream); } #endif