diff --git a/modules/core/include/opencv2/core/ocl.hpp b/modules/core/include/opencv2/core/ocl.hpp index f50ed378f9..f2535940f0 100644 --- a/modules/core/include/opencv2/core/ocl.hpp +++ b/modules/core/include/opencv2/core/ocl.hpp @@ -286,7 +286,7 @@ public: Kernel(); Kernel(const char* kname, const Program& prog); Kernel(const char* kname, const ProgramSource2& prog, - const String& buildopts, String* errmsg=0); + const String& buildopts = String(), String* errmsg=0); ~Kernel(); Kernel(const Kernel& k); Kernel& operator = (const Kernel& k); diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp index 64460efb0d..8a0cc279a6 100644 --- a/modules/core/src/ocl.cpp +++ b/modules/core/src/ocl.cpp @@ -1893,7 +1893,7 @@ Context2& Context2::getDefault() // First, try to retrieve existing context of the same type. // In its turn, Platform::getContext() may call Context2::create() // if there is no such context. - ctx.create(Device::TYPE_ACCELERATOR); + ctx.create(Device::TYPE_CPU); if(!ctx.p) ctx.create(Device::TYPE_DGPU); if(!ctx.p) @@ -2041,6 +2041,7 @@ struct Kernel::Impl cl_int retval = 0; handle = ph != 0 ? clCreateKernel(ph, kname, &retval) : 0; + printf("kernel creation error code: %d\n", retval); for( int i = 0; i < MAX_ARRS; i++ ) u[i] = 0; haveTempDstUMats = false; @@ -2218,7 +2219,7 @@ int Kernel::set(int i, const KernelArg& arg) else if( arg.m->dims <= 2 ) { UMat2D u2d(*arg.m); - clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h); + clSetKernelArg(p->handle, (cl_uint)i, sizeof(h), &h)); clSetKernelArg(p->handle, (cl_uint)(i+1), sizeof(u2d.step), &u2d.step); clSetKernelArg(p->handle, (cl_uint)(i+2), sizeof(u2d.offset), &u2d.offset); i += 3; diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 15d7c6a875..e5383be04b 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -4030,16 +4030,76 @@ private: }; #endif +static bool ocl_warpPerspective(InputArray _src, OutputArray _dst, InputArray _M0, + Size dsize, int flags, int borderType, const Scalar& borderValue) +{ + int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), wdepth = depth; + double doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0; + + int interpolation = flags & INTER_MAX; + if( interpolation == INTER_AREA ) + interpolation = INTER_LINEAR; + + if ( !(borderType == cv::BORDER_CONSTANT && + (interpolation == cv::INTER_NEAREST || interpolation == cv::INTER_LINEAR || interpolation == cv::INTER_CUBIC)) || + (!doubleSupport && depth == CV_64F) || cn > 4 || cn == 3) + return false; + + UMat src = _src.getUMat(), M0; + _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() ); + UMat dst = _dst.getUMat(); + + double M[9]; + Mat matM(3, 3, doubleSupport ? CV_64F : CV_32F, M), M1 = _M0.getMat(); + CV_Assert( (M1.type() == CV_32F || M1.type() == CV_64F) && M1.rows == 3 && M1.cols == 3 ); + M1.convertTo(matM, matM.type()); + if( !(flags & WARP_INVERSE_MAP) ) + invert(matM, matM); + matM.copyTo(M0); + + const char * const interpolationMap[3] = { "NEAREST", "LINEAR", "CUBIC" }; + ocl::Kernel k; + + if (interpolation == INTER_NEAREST) + { + k.create("warpPerspective", ocl::imgproc::warp_perspective_oclsrc, + format("-D INTER_NEAREST -D T=%s%s", ocl::typeToStr(type), + doubleSupport ? " -D DOUBLE_SUPPORT" : "")); + } + else + { + char cvt[2][50]; + wdepth = std::max(CV_32S, depth); + k.create("warpPerspective", ocl::imgproc::warp_perspective_oclsrc, + format("-D INTER_%s -D T=%s -D WT=%s -D depth=%d -D convertToWT=%s -D convertToT=%s%s", + interpolationMap[interpolation], ocl::typeToStr(type), + ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)), depth, + ocl::convertTypeStr(depth, wdepth, cn, cvt[0]), + ocl::convertTypeStr(wdepth, depth, cn, cvt[1]), + doubleSupport ? " -D DOUBLE_SUPPORT" : "")); + } + + k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst), + ocl::KernelArg::PtrOnly(M0), ocl::KernelArg::Constant(Mat(1, 1, CV_MAKE_TYPE(wdepth, cn), borderValue))); + + size_t globalThreads[2] = { dst.cols, dst.rows }; + return k.run(2, globalThreads, NULL, false); +} + } void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, Size dsize, int flags, int borderType, const Scalar& borderValue ) { + CV_Assert( _src.total() > 0 ); + + if (ocl::useOpenCL() && _dst.isUMat() && ocl_warpPerspective(_src, _dst, _M0, dsize, flags, borderType, borderValue)) + return; + Mat src = _src.getMat(), M0 = _M0.getMat(); _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() ); Mat dst = _dst.getMat(); - CV_Assert( src.cols > 0 && src.rows > 0 ); if( dst.data == src.data ) src = src.clone(); diff --git a/modules/imgproc/src/opencl/warp_affine.cl b/modules/imgproc/src/opencl/warp_affine.cl new file mode 100644 index 0000000000..27f99e005e --- /dev/null +++ b/modules/imgproc/src/opencl/warp_affine.cl @@ -0,0 +1,761 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved. +// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Zhang Ying, zhangying913@gmail.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +//warpAffine kernel +//support data types: CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4, and three interpolation methods: NN, Linear, Cubic. + +#ifdef DOUBLE_SUPPORT +#ifdef cl_amd_fp64 +#pragma OPENCL EXTENSION cl_amd_fp64:enable +#elif defined (cl_khr_fp64) +#pragma OPENCL EXTENSION cl_khr_fp64:enable +#endif +typedef double F; +typedef double4 F4; +#define convert_F4 convert_double4 +#else +typedef float F; +typedef float4 F4; +#define convert_F4 convert_float4 +#endif + +#define INTER_BITS 5 +#define INTER_TAB_SIZE (1 << INTER_BITS) +#define INTER_SCALE 1.f/INTER_TAB_SIZE +#define AB_BITS max(10, (int)INTER_BITS) +#define AB_SCALE (1 << AB_BITS) +#define INTER_REMAP_COEF_BITS 15 +#define INTER_REMAP_COEF_SCALE (1 << INTER_REMAP_COEF_BITS) + +inline void interpolateCubic( float x, float* coeffs ) +{ + const float A = -0.75f; + + coeffs[0] = ((A*(x + 1.f) - 5.0f*A)*(x + 1.f) + 8.0f*A)*(x + 1.f) - 4.0f*A; + coeffs[1] = ((A + 2.f)*x - (A + 3.f))*x*x + 1.f; + coeffs[2] = ((A + 2.f)*(1.f - x) - (A + 3.f))*(1.f - x)*(1.f - x) + 1.f; + coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2]; +} + + +/**********************************************8UC1********************************************* +***********************************************************************************************/ +__kernel void warpAffineNN_C1_D0(__global uchar const * restrict src, __global uchar * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + dx = (dx<<2) - (dst_offset&3); + + int round_delta = (AB_SCALE>>1); + + int4 X, Y; + int4 sx, sy; + int4 DX = (int4)(dx, dx+1, dx+2, dx+3); + DX = (DX << AB_BITS); + F4 M0DX, M3DX; + M0DX = M[0] * convert_F4(DX); + M3DX = M[3] * convert_F4(DX); + X = convert_int4(rint(M0DX)); + Y = convert_int4(rint(M3DX)); + int tmp1, tmp2; + tmp1 = rint((M[1]*dy + M[2]) * AB_SCALE); + tmp2 = rint((M[4]*dy + M[5]) * AB_SCALE); + + X += tmp1 + round_delta; + Y += tmp2 + round_delta; + + sx = convert_int4(convert_short4(X >> AB_BITS)); + sy = convert_int4(convert_short4(Y >> AB_BITS)); + + __global uchar4 * d = (__global uchar4 *)(dst+dst_offset+dy*dstStep+dx); + uchar4 dval = *d; + DX = (int4)(dx, dx+1, dx+2, dx+3); + int4 dcon = DX >= 0 && DX < dst_cols && dy >= 0 && dy < dst_rows; + int4 scon = sx >= 0 && sx < src_cols && sy >= 0 && sy < src_rows; + int4 spos = src_offset + sy * srcStep + sx; + uchar4 sval; + sval.s0 = scon.s0 ? src[spos.s0] : 0; + sval.s1 = scon.s1 ? src[spos.s1] : 0; + sval.s2 = scon.s2 ? src[spos.s2] : 0; + sval.s3 = scon.s3 ? src[spos.s3] : 0; + dval = convert_uchar4(dcon) != (uchar4)(0,0,0,0) ? sval : dval; + *d = dval; + } +} + +__kernel void warpAffineLinear_C1_D0(__global const uchar * restrict src, __global uchar * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + + if( dx < threadCols && dy < dst_rows) + { + dx = (dx<<2) - (dst_offset&3); + + int round_delta = ((AB_SCALE >> INTER_BITS) >> 1); + + int4 X, Y; + short4 ax, ay; + int4 sx, sy; + int4 DX = (int4)(dx, dx+1, dx+2, dx+3); + DX = (DX << AB_BITS); + F4 M0DX, M3DX; + M0DX = M[0] * convert_F4(DX); + M3DX = M[3] * convert_F4(DX); + X = convert_int4(rint(M0DX)); + Y = convert_int4(rint(M3DX)); + + int tmp1, tmp2; + tmp1 = rint((M[1]*dy + M[2]) * AB_SCALE); + tmp2 = rint((M[4]*dy + M[5]) * AB_SCALE); + + X += tmp1 + round_delta; + Y += tmp2 + round_delta; + + X = X >> (AB_BITS - INTER_BITS); + Y = Y >> (AB_BITS - INTER_BITS); + + sx = convert_int4(convert_short4(X >> INTER_BITS)); + sy = convert_int4(convert_short4(Y >> INTER_BITS)); + ax = convert_short4(X & (INTER_TAB_SIZE-1)); + ay = convert_short4(Y & (INTER_TAB_SIZE-1)); + + uchar4 v0, v1, v2,v3; + int4 scon0, scon1, scon2, scon3; + int4 spos0, spos1, spos2, spos3; + + scon0 = (sx >= 0 && sx < src_cols && sy >= 0 && sy < src_rows); + scon1 = (sx+1 >= 0 && sx+1 < src_cols && sy >= 0 && sy < src_rows); + scon2 = (sx >= 0 && sx < src_cols && sy+1 >= 0 && sy+1 < src_rows); + scon3 = (sx+1 >= 0 && sx+1 < src_cols && sy+1 >= 0 && sy+1 < src_rows); + spos0 = src_offset + sy * srcStep + sx; + spos1 = src_offset + sy * srcStep + sx + 1; + spos2 = src_offset + (sy+1) * srcStep + sx; + spos3 = src_offset + (sy+1) * srcStep + sx + 1; + + v0.s0 = scon0.s0 ? src[spos0.s0] : 0; + v1.s0 = scon1.s0 ? src[spos1.s0] : 0; + v2.s0 = scon2.s0 ? src[spos2.s0] : 0; + v3.s0 = scon3.s0 ? src[spos3.s0] : 0; + + v0.s1 = scon0.s1 ? src[spos0.s1] : 0; + v1.s1 = scon1.s1 ? src[spos1.s1] : 0; + v2.s1 = scon2.s1 ? src[spos2.s1] : 0; + v3.s1 = scon3.s1 ? src[spos3.s1] : 0; + + v0.s2 = scon0.s2 ? src[spos0.s2] : 0; + v1.s2 = scon1.s2 ? src[spos1.s2] : 0; + v2.s2 = scon2.s2 ? src[spos2.s2] : 0; + v3.s2 = scon3.s2 ? src[spos3.s2] : 0; + + v0.s3 = scon0.s3 ? src[spos0.s3] : 0; + v1.s3 = scon1.s3 ? src[spos1.s3] : 0; + v2.s3 = scon2.s3 ? src[spos2.s3] : 0; + v3.s3 = scon3.s3 ? src[spos3.s3] : 0; + + short4 itab0, itab1, itab2, itab3; + float4 taby, tabx; + taby = INTER_SCALE * convert_float4(ay); + tabx = INTER_SCALE * convert_float4(ax); + + itab0 = convert_short4_sat(( (1.0f-taby)*(1.0f-tabx) * (float4)INTER_REMAP_COEF_SCALE )); + itab1 = convert_short4_sat(( (1.0f-taby)*tabx * (float4)INTER_REMAP_COEF_SCALE )); + itab2 = convert_short4_sat(( taby*(1.0f-tabx) * (float4)INTER_REMAP_COEF_SCALE )); + itab3 = convert_short4_sat(( taby*tabx * (float4)INTER_REMAP_COEF_SCALE )); + + + int4 val; + uchar4 tval; + val = convert_int4(v0) * convert_int4(itab0) + convert_int4(v1) * convert_int4(itab1) + + convert_int4(v2) * convert_int4(itab2) + convert_int4(v3) * convert_int4(itab3); + tval = convert_uchar4_sat ( (val + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS ) ; + + __global uchar4 * d =(__global uchar4 *)(dst+dst_offset+dy*dstStep+dx); + uchar4 dval = *d; + DX = (int4)(dx, dx+1, dx+2, dx+3); + int4 dcon = DX >= 0 && DX < dst_cols && dy >= 0 && dy < dst_rows; + dval = convert_uchar4(dcon != 0) ? tval : dval; + *d = dval; + } +} + +__kernel void warpAffineCubic_C1_D0(__global uchar * src, __global uchar * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = ((AB_SCALE>>INTER_BITS)>>1); + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + int X = X0 >> (AB_BITS - INTER_BITS); + int Y = Y0 >> (AB_BITS - INTER_BITS); + + short sx = (short)(X >> INTER_BITS) - 1; + short sy = (short)(Y >> INTER_BITS) - 1; + short ay = (short)(Y & (INTER_TAB_SIZE-1)); + short ax = (short)(X & (INTER_TAB_SIZE-1)); + + uchar v[16]; + int i, j; + +#pragma unroll 4 + for(i=0; i<4; i++) + for(j=0; j<4; j++) + { + v[i*4+j] = (sx+j >= 0 && sx+j < src_cols && sy+i >= 0 && sy+i < src_rows) ? src[src_offset+(sy+i) * srcStep + (sx+j)] : 0; + } + + short itab[16]; + float tab1y[4], tab1x[4]; + float axx, ayy; + + ayy = 1.f/INTER_TAB_SIZE * ay; + axx = 1.f/INTER_TAB_SIZE * ax; + interpolateCubic(ayy, tab1y); + interpolateCubic(axx, tab1x); + int isum = 0; + +#pragma unroll 16 + for( i=0; i<16; i++ ) + { + F v = tab1y[(i>>2)] * tab1x[(i&3)]; + isum += itab[i] = convert_short_sat( rint( v * INTER_REMAP_COEF_SCALE ) ); + } + + if( isum != INTER_REMAP_COEF_SCALE ) + { + int k1, k2; + int diff = isum - INTER_REMAP_COEF_SCALE; + int Mk1=2, Mk2=2, mk1=2, mk2=2; + for( k1 = 2; k1 < 4; k1++ ) + for( k2 = 2; k2 < 4; k2++ ) + { + if( itab[(k1<<2)+k2] < itab[(mk1<<2)+mk2] ) + mk1 = k1, mk2 = k2; + else if( itab[(k1<<2)+k2] > itab[(Mk1<<2)+Mk2] ) + Mk1 = k1, Mk2 = k2; + } + diff<0 ? (itab[(Mk1<<2)+Mk2]=(short)(itab[(Mk1<<2)+Mk2]-diff)) : (itab[(mk1<<2)+mk2]=(short)(itab[(mk1<<2)+mk2]-diff)); + } + + if( dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + { + int sum=0; + for ( i =0; i<16; i++ ) + { + sum += v[i] * itab[i] ; + } + dst[dst_offset+dy*dstStep+dx] = convert_uchar_sat( (sum + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS ) ; + } + } +} + +/**********************************************8UC4********************************************* +***********************************************************************************************/ + +__kernel void warpAffineNN_C4_D0(__global uchar4 const * restrict src, __global uchar4 * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = (AB_SCALE >> 1); + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + + int sx0 = (short)(X0 >> AB_BITS); + int sy0 = (short)(Y0 >> AB_BITS); + + if(dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + dst[(dst_offset>>2)+dy*(dstStep>>2)+dx]= (sx0>=0 && sx0=0 && sy0>2)+sy0*(srcStep>>2)+sx0] : (uchar4)0; + } +} + +__kernel void warpAffineLinear_C4_D0(__global uchar4 const * restrict src, __global uchar4 * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = AB_SCALE/INTER_TAB_SIZE/2; + + src_offset = (src_offset>>2); + srcStep = (srcStep>>2); + + int tmp = (dx << AB_BITS); + int X0 = rint(M[0] * tmp); + int Y0 = rint(M[3] * tmp); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + X0 = X0 >> (AB_BITS - INTER_BITS); + Y0 = Y0 >> (AB_BITS - INTER_BITS); + + short sx0 = (short)(X0 >> INTER_BITS); + short sy0 = (short)(Y0 >> INTER_BITS); + short ax0 = (short)(X0 & (INTER_TAB_SIZE-1)); + short ay0 = (short)(Y0 & (INTER_TAB_SIZE-1)); + + int4 v0, v1, v2, v3; + + v0 = (sx0 >= 0 && sx0 < src_cols && sy0 >= 0 && sy0 < src_rows) ? convert_int4(src[src_offset+sy0 * srcStep + sx0]) : 0; + v1 = (sx0+1 >= 0 && sx0+1 < src_cols && sy0 >= 0 && sy0 < src_rows) ? convert_int4(src[src_offset+sy0 * srcStep + sx0+1]) : 0; + v2 = (sx0 >= 0 && sx0 < src_cols && sy0+1 >= 0 && sy0+1 < src_rows) ? convert_int4(src[src_offset+(sy0+1) * srcStep + sx0]) : 0; + v3 = (sx0+1 >= 0 && sx0+1 < src_cols && sy0+1 >= 0 && sy0+1 < src_rows) ? convert_int4(src[src_offset+(sy0+1) * srcStep + sx0+1]) : 0; + + int itab0, itab1, itab2, itab3; + float taby, tabx; + taby = 1.f/INTER_TAB_SIZE*ay0; + tabx = 1.f/INTER_TAB_SIZE*ax0; + + itab0 = convert_short_sat(rint( (1.0f-taby)*(1.0f-tabx) * INTER_REMAP_COEF_SCALE )); + itab1 = convert_short_sat(rint( (1.0f-taby)*tabx * INTER_REMAP_COEF_SCALE )); + itab2 = convert_short_sat(rint( taby*(1.0f-tabx) * INTER_REMAP_COEF_SCALE )); + itab3 = convert_short_sat(rint( taby*tabx * INTER_REMAP_COEF_SCALE )); + + int4 val; + val = v0 * itab0 + v1 * itab1 + v2 * itab2 + v3 * itab3; + + if(dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + dst[(dst_offset>>2)+dy*(dstStep>>2)+dx] = convert_uchar4_sat ( (val + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS ) ; + } +} + +__kernel void warpAffineCubic_C4_D0(__global uchar4 const * restrict src, __global uchar4 * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = ((AB_SCALE>>INTER_BITS)>>1); + + src_offset = (src_offset>>2); + srcStep = (srcStep>>2); + dst_offset = (dst_offset>>2); + dstStep = (dstStep>>2); + + int tmp = (dx << AB_BITS); + int X0 = rint(M[0] * tmp); + int Y0 = rint(M[3] * tmp); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + X0 = X0 >> (AB_BITS - INTER_BITS); + Y0 = Y0 >> (AB_BITS - INTER_BITS); + + int sx = (short)(X0 >> INTER_BITS) - 1; + int sy = (short)(Y0 >> INTER_BITS) - 1; + int ay = (short)(Y0 & (INTER_TAB_SIZE-1)); + int ax = (short)(X0 & (INTER_TAB_SIZE-1)); + + uchar4 v[16]; + int i,j; +#pragma unroll 4 + for(i=0; i<4; i++) + for(j=0; j<4; j++) + { + v[i*4+j] = (sx+j >= 0 && sx+j < src_cols && sy+i >= 0 && sy+i < src_rows) ? (src[src_offset+(sy+i) * srcStep + (sx+j)]) : (uchar4)0; + } + int itab[16]; + float tab1y[4], tab1x[4]; + float axx, ayy; + + ayy = INTER_SCALE * ay; + axx = INTER_SCALE * ax; + interpolateCubic(ayy, tab1y); + interpolateCubic(axx, tab1x); + int isum = 0; + +#pragma unroll 16 + for( i=0; i<16; i++ ) + { + float tmp; + tmp = tab1y[(i>>2)] * tab1x[(i&3)] * INTER_REMAP_COEF_SCALE; + itab[i] = rint(tmp); + isum += itab[i]; + } + + if( isum != INTER_REMAP_COEF_SCALE ) + { + int k1, k2; + int diff = isum - INTER_REMAP_COEF_SCALE; + int Mk1=2, Mk2=2, mk1=2, mk2=2; + + for( k1 = 2; k1 < 4; k1++ ) + for( k2 = 2; k2 < 4; k2++ ) + { + + if( itab[(k1<<2)+k2] < itab[(mk1<<2)+mk2] ) + mk1 = k1, mk2 = k2; + else if( itab[(k1<<2)+k2] > itab[(Mk1<<2)+Mk2] ) + Mk1 = k1, Mk2 = k2; + } + + diff<0 ? (itab[(Mk1<<2)+Mk2]=(short)(itab[(Mk1<<2)+Mk2]-diff)) : (itab[(mk1<<2)+mk2]=(short)(itab[(mk1<<2)+mk2]-diff)); + } + + if( dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + { + int4 sum=0; + for ( i =0; i<16; i++ ) + { + sum += convert_int4(v[i]) * itab[i]; + } + dst[dst_offset+dy*dstStep+dx] = convert_uchar4_sat( (sum + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS ) ; + } + } +} + + +/**********************************************32FC1******************************************** +***********************************************************************************************/ + +__kernel void warpAffineNN_C1_D5(__global float * src, __global float * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = AB_SCALE/2; + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + + short sx0 = (short)(X0 >> AB_BITS); + short sy0 = (short)(Y0 >> AB_BITS); + + if(dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + dst[(dst_offset>>2)+dy*dstStep+dx]= (sx0>=0 && sx0=0 && sy0>2)+sy0*srcStep+sx0] : 0; + } +} + +__kernel void warpAffineLinear_C1_D5(__global float * src, __global float * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = AB_SCALE/INTER_TAB_SIZE/2; + + src_offset = (src_offset>>2); + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + X0 = X0 >> (AB_BITS - INTER_BITS); + Y0 = Y0 >> (AB_BITS - INTER_BITS); + + short sx0 = (short)(X0 >> INTER_BITS); + short sy0 = (short)(Y0 >> INTER_BITS); + short ax0 = (short)(X0 & (INTER_TAB_SIZE-1)); + short ay0 = (short)(Y0 & (INTER_TAB_SIZE-1)); + + float v0, v1, v2, v3; + + v0 = (sx0 >= 0 && sx0 < src_cols && sy0 >= 0 && sy0 < src_rows) ? src[src_offset+sy0 * srcStep + sx0] : 0; + v1 = (sx0+1 >= 0 && sx0+1 < src_cols && sy0 >= 0 && sy0 < src_rows) ? src[src_offset+sy0 * srcStep + sx0+1] : 0; + v2 = (sx0 >= 0 && sx0 < src_cols && sy0+1 >= 0 && sy0+1 < src_rows) ? src[src_offset+(sy0+1) * srcStep + sx0] : 0; + v3 = (sx0+1 >= 0 && sx0+1 < src_cols && sy0+1 >= 0 && sy0+1 < src_rows) ? src[src_offset+(sy0+1) * srcStep + sx0+1] : 0; + + float tab[4]; + float taby[2], tabx[2]; + taby[0] = 1.0f - 1.f/INTER_TAB_SIZE*ay0; + taby[1] = 1.f/INTER_TAB_SIZE*ay0; + tabx[0] = 1.0f - 1.f/INTER_TAB_SIZE*ax0; + tabx[1] = 1.f/INTER_TAB_SIZE*ax0; + + tab[0] = taby[0] * tabx[0]; + tab[1] = taby[0] * tabx[1]; + tab[2] = taby[1] * tabx[0]; + tab[3] = taby[1] * tabx[1]; + + float sum = 0; + sum += v0 * tab[0] + v1 * tab[1] + v2 * tab[2] + v3 * tab[3]; + if(dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + dst[(dst_offset>>2)+dy*dstStep+dx] = sum; + } +} + +__kernel void warpAffineCubic_C1_D5(__global float * src, __global float * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = AB_SCALE/INTER_TAB_SIZE/2; + + src_offset = (src_offset>>2); + dst_offset = (dst_offset>>2); + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + X0 = X0 >> (AB_BITS - INTER_BITS); + Y0 = Y0 >> (AB_BITS - INTER_BITS); + + short sx = (short)(X0 >> INTER_BITS) - 1; + short sy = (short)(Y0 >> INTER_BITS) - 1; + short ay = (short)(Y0 & (INTER_TAB_SIZE-1)); + short ax = (short)(X0 & (INTER_TAB_SIZE-1)); + + float v[16]; + int i; + + for(i=0; i<16; i++) + v[i] = (sx+(i&3) >= 0 && sx+(i&3) < src_cols && sy+(i>>2) >= 0 && sy+(i>>2) < src_rows) ? src[src_offset+(sy+(i>>2)) * srcStep + (sx+(i&3))] : 0; + + float tab[16]; + float tab1y[4], tab1x[4]; + float axx, ayy; + + ayy = 1.f/INTER_TAB_SIZE * ay; + axx = 1.f/INTER_TAB_SIZE * ax; + interpolateCubic(ayy, tab1y); + interpolateCubic(axx, tab1x); + +#pragma unroll 4 + for( i=0; i<16; i++ ) + { + tab[i] = tab1y[(i>>2)] * tab1x[(i&3)]; + } + + if( dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + { + float sum = 0; +#pragma unroll 4 + for ( i =0; i<16; i++ ) + { + sum += v[i] * tab[i]; + } + dst[dst_offset+dy*dstStep+dx] = sum; + + } + } +} + + +/**********************************************32FC4******************************************** +***********************************************************************************************/ + +__kernel void warpAffineNN_C4_D5(__global float4 * src, __global float4 * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = AB_SCALE/2; + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + + short sx0 = (short)(X0 >> AB_BITS); + short sy0 = (short)(Y0 >> AB_BITS); + + if(dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + dst[(dst_offset>>4)+dy*(dstStep>>2)+dx]= (sx0>=0 && sx0=0 && sy0>4)+sy0*(srcStep>>2)+sx0] : (float4)0; + } +} + +__kernel void warpAffineLinear_C4_D5(__global float4 * src, __global float4 * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = AB_SCALE/INTER_TAB_SIZE/2; + + src_offset = (src_offset>>4); + dst_offset = (dst_offset>>4); + srcStep = (srcStep>>2); + dstStep = (dstStep>>2); + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + X0 = X0 >> (AB_BITS - INTER_BITS); + Y0 = Y0 >> (AB_BITS - INTER_BITS); + + short sx0 = (short)(X0 >> INTER_BITS); + short sy0 = (short)(Y0 >> INTER_BITS); + short ax0 = (short)(X0 & (INTER_TAB_SIZE-1)); + short ay0 = (short)(Y0 & (INTER_TAB_SIZE-1)); + + float4 v0, v1, v2, v3; + + v0 = (sx0 >= 0 && sx0 < src_cols && sy0 >= 0 && sy0 < src_rows) ? src[src_offset+sy0 * srcStep + sx0] : (float4)0; + v1 = (sx0+1 >= 0 && sx0+1 < src_cols && sy0 >= 0 && sy0 < src_rows) ? src[src_offset+sy0 * srcStep + sx0+1] : (float4)0; + v2 = (sx0 >= 0 && sx0 < src_cols && sy0+1 >= 0 && sy0+1 < src_rows) ? src[src_offset+(sy0+1) * srcStep + sx0] : (float4)0; + v3 = (sx0+1 >= 0 && sx0+1 < src_cols && sy0+1 >= 0 && sy0+1 < src_rows) ? src[src_offset+(sy0+1) * srcStep + sx0+1] : (float4)0; + + float tab[4]; + float taby[2], tabx[2]; + taby[0] = 1.0f - 1.f/INTER_TAB_SIZE*ay0; + taby[1] = 1.f/INTER_TAB_SIZE*ay0; + tabx[0] = 1.0f - 1.f/INTER_TAB_SIZE*ax0; + tabx[1] = 1.f/INTER_TAB_SIZE*ax0; + + tab[0] = taby[0] * tabx[0]; + tab[1] = taby[0] * tabx[1]; + tab[2] = taby[1] * tabx[0]; + tab[3] = taby[1] * tabx[1]; + + float4 sum = 0; + sum += v0 * tab[0] + v1 * tab[1] + v2 * tab[2] + v3 * tab[3]; + if(dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + dst[dst_offset+dy*dstStep+dx] = sum; + } +} + +__kernel void warpAffineCubic_C4_D5(__global float4 * src, __global float4 * dst, int src_cols, int src_rows, + int dst_cols, int dst_rows, int srcStep, int dstStep, + int src_offset, int dst_offset, __constant F * M, int threadCols ) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if( dx < threadCols && dy < dst_rows) + { + int round_delta = AB_SCALE/INTER_TAB_SIZE/2; + + src_offset = (src_offset>>4); + dst_offset = (dst_offset>>4); + srcStep = (srcStep>>2); + dstStep = (dstStep>>2); + + int X0 = rint(M[0] * dx * AB_SCALE); + int Y0 = rint(M[3] * dx * AB_SCALE); + X0 += rint((M[1]*dy + M[2]) * AB_SCALE) + round_delta; + Y0 += rint((M[4]*dy + M[5]) * AB_SCALE) + round_delta; + X0 = X0 >> (AB_BITS - INTER_BITS); + Y0 = Y0 >> (AB_BITS - INTER_BITS); + + short sx = (short)(X0 >> INTER_BITS) - 1; + short sy = (short)(Y0 >> INTER_BITS) - 1; + short ay = (short)(Y0 & (INTER_TAB_SIZE-1)); + short ax = (short)(X0 & (INTER_TAB_SIZE-1)); + + float4 v[16]; + int i; + + for(i=0; i<16; i++) + v[i] = (sx+(i&3) >= 0 && sx+(i&3) < src_cols && sy+(i>>2) >= 0 && sy+(i>>2) < src_rows) ? src[src_offset+(sy+(i>>2)) * srcStep + (sx+(i&3))] : (float4)0; + + float tab[16]; + float tab1y[4], tab1x[4]; + float axx, ayy; + + ayy = 1.f/INTER_TAB_SIZE * ay; + axx = 1.f/INTER_TAB_SIZE * ax; + interpolateCubic(ayy, tab1y); + interpolateCubic(axx, tab1x); + +#pragma unroll 4 + for( i=0; i<16; i++ ) + { + tab[i] = tab1y[(i>>2)] * tab1x[(i&3)]; + } + + if( dx >= 0 && dx < dst_cols && dy >= 0 && dy < dst_rows) + { + float4 sum = 0; +#pragma unroll 4 + for ( i =0; i<16; i++ ) + { + sum += v[i] * tab[i]; + } + dst[dst_offset+dy*dstStep+dx] = sum; + + } + } +} diff --git a/modules/imgproc/src/opencl/warp_perspective.cl b/modules/imgproc/src/opencl/warp_perspective.cl new file mode 100644 index 0000000000..211433e709 --- /dev/null +++ b/modules/imgproc/src/opencl/warp_perspective.cl @@ -0,0 +1,223 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved. +// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Zhang Ying, zhangying913@gmail.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef DOUBLE_SUPPORT +#ifdef cl_amd_fp64 +#pragma OPENCL EXTENSION cl_amd_fp64:enable +#elif defined (cl_khr_fp64) +#pragma OPENCL EXTENSION cl_khr_fp64:enable +#endif +#define CT double +#else +#define CT float +#endif + +#define INTER_BITS 5 +#define INTER_TAB_SIZE (1 << INTER_BITS) +#define INTER_SCALE 1.f / INTER_TAB_SIZE +#define AB_BITS max(10, (int)INTER_BITS) +#define AB_SCALE (1 << AB_BITS) +#define INTER_REMAP_COEF_BITS 15 +#define INTER_REMAP_COEF_SCALE (1 << INTER_REMAP_COEF_BITS) + +#define noconvert + +#ifdef INTER_NEAREST + +__kernel void warpPerspective(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols, + __global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols, + __constant CT * M, T scalar) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if (dx < dst_cols && dy < dst_rows) + { + CT X0 = M[0] * dx + M[1] * dy + M[2]; + CT Y0 = M[3] * dx + M[4] * dy + M[5]; + CT W = M[6] * dx + M[7] * dy + M[8]; + W = W != 0.0f ? 1.f / W : 0.0f; + short sx = convert_short_sat_rte(X0*W); + short sy = convert_short_sat_rte(Y0*W); + + int dst_index = mad24(dy, dst_step, dx * (int)sizeof(T) + dst_offset); + __global T * dst = (__global T *)(dstptr + dst_index); + + if (sx >= 0 && sx < src_cols && sy >= 0 && sy < src_rows) + { + int src_index = mad24(sy, src_step, sx * (int)sizeof(T) + src_offset); + __global const T * src = (__global const T *)(srcptr + src_index); + dst[0] = src[0]; + } + else + dst[0] = scalar; + } +} + +#elif defined INTER_LINEAR + +__kernel void warpPerspective(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols, + __global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols, + __constant CT * M, WT scalar) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if (dx < dst_cols && dy < dst_rows) + { + CT X0 = M[0] * dx + M[1] * dy + M[2]; + CT Y0 = M[3] * dx + M[4] * dy + M[5]; + CT W = M[6] * dx + M[7] * dy + M[8]; + W = W != 0.0f ? INTER_TAB_SIZE / W : 0.0f; + int X = rint(X0 * W), Y = rint(Y0 * W); + + short sx = convert_short_sat(X >> INTER_BITS); + short sy = convert_short_sat(Y >> INTER_BITS); + short ay = (short)(Y & (INTER_TAB_SIZE - 1)); + short ax = (short)(X & (INTER_TAB_SIZE - 1)); + + WT v0 = (sx >= 0 && sx < src_cols && sy >= 0 && sy < src_rows) ? + convertToWT(*(__global const T *)(srcptr + mad24(sy, src_step, src_offset + sx * (int)sizeof(T)))) : scalar; + WT v1 = (sx+1 >= 0 && sx+1 < src_cols && sy >= 0 && sy < src_rows) ? + convertToWT(*(__global const T *)(srcptr + mad24(sy, src_step, src_offset + (sx+1) * (int)sizeof(T)))) : scalar; + WT v2 = (sx >= 0 && sx < src_cols && sy+1 >= 0 && sy+1 < src_rows) ? + convertToWT(*(__global const T *)(srcptr + mad24(sy+1, src_step, src_offset + sx * (int)sizeof(T)))) : scalar; + WT v3 = (sx+1 >= 0 && sx+1 < src_cols && sy+1 >= 0 && sy+1 < src_rows) ? + convertToWT(*(__global const T *)(srcptr + mad24(sy+1, src_step, src_offset + (sx+1) * (int)sizeof(T)))) : scalar; + + float taby = 1.f/INTER_TAB_SIZE*ay; + float tabx = 1.f/INTER_TAB_SIZE*ax; + + int dst_index = mad24(dy, dst_step, dst_offset + dx * (int)sizeof(T)); + __global T * dst = (__global T *)(dstptr + dst_index); + +#if depth <= 4 + int itab0 = convert_short_sat_rte( (1.0f-taby)*(1.0f-tabx) * INTER_REMAP_COEF_SCALE ); + int itab1 = convert_short_sat_rte( (1.0f-taby)*tabx * INTER_REMAP_COEF_SCALE ); + int itab2 = convert_short_sat_rte( taby*(1.0f-tabx) * INTER_REMAP_COEF_SCALE ); + int itab3 = convert_short_sat_rte( taby*tabx * INTER_REMAP_COEF_SCALE ); + + WT val = v0 * itab0 + v1 * itab1 + v2 * itab2 + v3 * itab3; + dst[0] = convertToT((val + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS); +#else + float tabx2 = 1.0f - tabx, taby2 = 1.0f - taby; + WT val = v0 * tabx2 * taby2 + v1 * tabx * taby2 + v2 * tabx2 * taby + v3 * tabx * taby; + dst[0] = convertToT(val); +#endif + } +} + +#elif defined INTER_CUBIC + +inline void interpolateCubic( float x, float* coeffs ) +{ + const float A = -0.75f; + + coeffs[0] = ((A*(x + 1.f) - 5.0f*A)*(x + 1.f) + 8.0f*A)*(x + 1.f) - 4.0f*A; + coeffs[1] = ((A + 2.f)*x - (A + 3.f))*x*x + 1.f; + coeffs[2] = ((A + 2.f)*(1.f - x) - (A + 3.f))*(1.f - x)*(1.f - x) + 1.f; + coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2]; +} + +__kernel void warpPerspective(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols, + __global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols, + __constant CT * M, WT scalar) +{ + int dx = get_global_id(0); + int dy = get_global_id(1); + + if (dx < dst_cols && dy < dst_rows) + { + CT X0 = M[0] * dx + M[1] * dy + M[2]; + CT Y0 = M[3] * dx + M[4] * dy + M[5]; + CT W = M[6] * dx + M[7] * dy + M[8]; + W = W != 0.0f ? INTER_TAB_SIZE / W : 0.0f; + int X = rint(X0 * W), Y = rint(Y0 * W); + + short sx = convert_short_sat(X >> INTER_BITS) - 1; + short sy = convert_short_sat(Y >> INTER_BITS) - 1; + short ay = (short)(Y & (INTER_TAB_SIZE-1)); + short ax = (short)(X & (INTER_TAB_SIZE-1)); + + WT v[16]; + #pragma unroll + for (int y = 0; y < 4; y++) + #pragma unroll + for (int x = 0; x < 4; x++) + v[mad24(y, 4, x)] = (sx+x >= 0 && sx+x < src_cols && sy+y >= 0 && sy+y < src_rows) ? + convertToWT(*(__global const T *)(srcptr + mad24(sy+y, src_step, src_offset + (sx+x) * (int)sizeof(T)))) : scalar; + + float tab1y[4], tab1x[4]; + + float ayy = INTER_SCALE * ay; + float axx = INTER_SCALE * ax; + interpolateCubic(ayy, tab1y); + interpolateCubic(axx, tab1x); + + int dst_index = mad24(dy, dst_step, dst_offset + dx * (int)sizeof(T)); + __global T * dst = (__global T *)(dstptr + dst_index); + + WT sum = (WT)(0); +#if depth <= 4 + int itab[16]; + + #pragma unroll + for (int i = 0; i < 16; i++) + itab[i] = rint(tab1y[(i>>2)] * tab1x[(i&3)] * INTER_REMAP_COEF_SCALE); + + #pragma unroll + for (int i = 0; i < 16; i++) + sum += v[i] * itab[i]; + dst[0] = convertToT( (sum + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS ); +#else + #pragma unroll + for (int i = 0; i < 16; i++) + sum += v[i] * tab1y[(i>>2)] * tab1x[(i&3)]; + dst[0] = convertToT( sum ); +#endif + } +} + +#endif diff --git a/modules/imgproc/test/ocl/test_warp.cpp b/modules/imgproc/test/ocl/test_warp.cpp index 6e549a4eca..fece5e7e45 100644 --- a/modules/imgproc/test/ocl/test_warp.cpp +++ b/modules/imgproc/test/ocl/test_warp.cpp @@ -61,7 +61,99 @@ namespace cvtest { namespace ocl { ///////////////////////////////////////////////////////////////////////////////////////////////// -// resize +// warpAffine & warpPerspective + +PARAM_TEST_CASE(WarpTestBase, MatType, Interpolation, bool, bool) +{ + int type, interpolation; + Size dsize; + bool useRoi, mapInverse; + + TEST_DECLARE_INPUT_PARATEMER(src) + TEST_DECLARE_OUTPUT_PARATEMER(dst) + + virtual void SetUp() + { + type = GET_PARAM(0); + interpolation = GET_PARAM(1); + mapInverse = GET_PARAM(2); + useRoi = GET_PARAM(3); + + if (mapInverse) + interpolation |= WARP_INVERSE_MAP; + } + + void random_roi() + { + dsize = randomSize(1, MAX_VALUE); + + Size roiSize = randomSize(1, MAX_VALUE); + Border srcBorder = randomBorder(0, useRoi ? MAX_VALUE : 0); + randomSubMat(src, src_roi, roiSize, srcBorder, type, -MAX_VALUE, MAX_VALUE); + + Border dstBorder = randomBorder(0, useRoi ? MAX_VALUE : 0); + randomSubMat(dst, dst_roi, dsize, dstBorder, type, -MAX_VALUE, MAX_VALUE); + + UMAT_UPLOAD_INPUT_PARAMETER(src) + UMAT_UPLOAD_OUTPUT_PARAMETER(dst) + } + + void Near(double threshold = 0.0) + { + EXPECT_MAT_NEAR(dst, udst, threshold); + EXPECT_MAT_NEAR(dst_roi, udst_roi, threshold); + } +}; + +/////warpAffine + +typedef WarpTestBase WarpAffine; + +OCL_TEST_P(WarpAffine, Mat) +{ + for (int j = 0; j < test_loop_times; j++) + { + random_roi(); + + Mat M = getRotationMatrix2D(Point2f(src_roi.cols / 2.0f, src_roi.rows / 2.0f), + rng.uniform(-180.f, 180.f), rng.uniform(0.4f, 2.0f)); + + OCL_OFF(cv::warpAffine(src_roi, dst_roi, M, dsize, interpolation)); + OCL_ON(cv::warpAffine(usrc_roi, udst_roi, M, dsize, interpolation)); + + Near(1.0); + } +} + +//// warpPerspective + +typedef WarpTestBase WarpPerspective; + +OCL_TEST_P(WarpPerspective, Mat) +{ + for (int j = 0; j < test_loop_times; j++) + { + random_roi(); + + float cols = static_cast(src_roi.cols), rows = static_cast(src_roi.rows); + float cols2 = cols / 2.0f, rows2 = rows / 2.0f; + Point2f sp[] = { Point2f(0.0f, 0.0f), Point2f(cols, 0.0f), Point2f(0.0f, rows), Point2f(cols, rows) }; + Point2f dp[] = { Point2f(rng.uniform(0.0f, cols2), rng.uniform(0.0f, rows2)), + Point2f(rng.uniform(cols2, cols), rng.uniform(0.0f, rows2)), + Point2f(rng.uniform(0.0f, cols2), rng.uniform(rows2, rows)), + Point2f(rng.uniform(cols2, cols), rng.uniform(rows2, rows)) }; + Mat M = getPerspectiveTransform(sp, dp); + + OCL_OFF(cv::warpPerspective(src_roi, dst_roi, M, dsize, interpolation)); + OCL_ON(cv::warpPerspective(usrc_roi, udst_roi, M, dsize, interpolation)); + + Near(1.0); + } +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// +//// resize PARAM_TEST_CASE(Resize, MatType, double, double, Interpolation, bool) { @@ -127,10 +219,22 @@ OCL_TEST_P(Resize, Mat) ///////////////////////////////////////////////////////////////////////////////////// -OCL_INSTANTIATE_TEST_CASE_P(ImgprocWarpResize, Resize, Combine( - Values((MatType)CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), - Values(0.7, 0.4, 2.0), - Values(0.3, 0.6, 2.0), +OCL_INSTANTIATE_TEST_CASE_P(ImgprocWarp, WarpAffine, Combine( + Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4), + Values((Interpolation)INTER_NEAREST, (Interpolation)INTER_LINEAR, (Interpolation)INTER_CUBIC), + Bool(), + Bool())); + +OCL_INSTANTIATE_TEST_CASE_P(ImgprocWarp, WarpPerspective, Combine( + Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4), + Values((Interpolation)INTER_NEAREST, (Interpolation)INTER_LINEAR, (Interpolation)INTER_CUBIC), + Bool(), + Bool())); + +OCL_INSTANTIATE_TEST_CASE_P(ImgprocWarp, Resize, Combine( + Values(CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, CV_32FC4), + Values(0.5, 1.5, 2.0), + Values(0.5, 1.5, 2.0), Values((Interpolation)INTER_NEAREST, (Interpolation)INTER_LINEAR), Bool()));