2010-05-12 01:44:00 +08:00
|
|
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
|
|
//
|
|
|
|
// By downloading, copying, installing or using the software you agree to this license.
|
|
|
|
// If you do not agree to this license, do not download, install,
|
|
|
|
// copy or use the software.
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// License Agreement
|
|
|
|
// For Open Source Computer Vision Library
|
|
|
|
//
|
|
|
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
2011-04-17 21:14:45 +08:00
|
|
|
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
|
2015-01-12 15:59:30 +08:00
|
|
|
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
|
2010-05-12 01:44:00 +08:00
|
|
|
// Third party copyrights are property of their respective owners.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without modification,
|
|
|
|
// are permitted provided that the following conditions are met:
|
|
|
|
//
|
|
|
|
// * Redistribution's of source code must retain the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
|
|
// and/or other materials provided with the distribution.
|
|
|
|
//
|
|
|
|
// * The name of the copyright holders may not be used to endorse or promote products
|
|
|
|
// derived from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// This software is provided by the copyright holders and contributors "as is" and
|
|
|
|
// any express or implied warranties, including, but not limited to, the implied
|
|
|
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
|
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
|
|
// indirect, incidental, special, exemplary, or consequential damages
|
|
|
|
// (including, but not limited to, procurement of substitute goods or services;
|
|
|
|
// loss of use, data, or profits; or business interruption) however caused
|
|
|
|
// and on any theory of liability, whether in contract, strict liability,
|
|
|
|
// or tort (including negligence or otherwise) arising in any way out of
|
|
|
|
// the use of this software, even if advised of the possibility of such damage.
|
|
|
|
//
|
|
|
|
//M*/
|
|
|
|
|
|
|
|
#include "precomp.hpp"
|
2015-12-03 19:43:37 +08:00
|
|
|
|
2014-08-01 22:11:20 +08:00
|
|
|
#include "opencl_kernels_core.hpp"
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2014-08-13 23:59:26 +08:00
|
|
|
#ifdef __APPLE__
|
|
|
|
#undef CV_NEON
|
2014-08-14 14:06:10 +08:00
|
|
|
#define CV_NEON 0
|
2014-08-13 23:59:26 +08:00
|
|
|
#endif
|
|
|
|
|
2016-07-08 22:46:08 +08:00
|
|
|
#define CV_SPLIT_MERGE_MAX_BLOCK_SIZE(cn) ((INT_MAX/4)/cn) // HAL implementation accepts 'int' len, so INT_MAX doesn't work here
|
2010-05-12 01:44:00 +08:00
|
|
|
|
|
|
|
/****************************************************************************************\
|
2011-04-17 21:14:45 +08:00
|
|
|
* split & merge *
|
2010-05-12 01:44:00 +08:00
|
|
|
\****************************************************************************************/
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn);
|
|
|
|
|
2013-08-15 15:01:40 +08:00
|
|
|
static SplitFunc getSplitFunc(int depth)
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2013-08-15 15:01:40 +08:00
|
|
|
static SplitFunc splitTab[] =
|
|
|
|
{
|
2015-12-03 19:43:37 +08:00
|
|
|
(SplitFunc)GET_OPTIMIZED(cv::hal::split8u), (SplitFunc)GET_OPTIMIZED(cv::hal::split8u), (SplitFunc)GET_OPTIMIZED(cv::hal::split16u), (SplitFunc)GET_OPTIMIZED(cv::hal::split16u),
|
|
|
|
(SplitFunc)GET_OPTIMIZED(cv::hal::split32s), (SplitFunc)GET_OPTIMIZED(cv::hal::split32s), (SplitFunc)GET_OPTIMIZED(cv::hal::split64s), 0
|
2013-08-15 15:01:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return splitTab[depth];
|
|
|
|
}
|
2011-04-17 21:14:45 +08:00
|
|
|
|
2015-12-03 19:43:37 +08:00
|
|
|
typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn);
|
|
|
|
|
2013-08-15 15:01:40 +08:00
|
|
|
static MergeFunc getMergeFunc(int depth)
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2013-08-15 15:01:40 +08:00
|
|
|
static MergeFunc mergeTab[] =
|
|
|
|
{
|
2015-12-03 19:43:37 +08:00
|
|
|
(MergeFunc)GET_OPTIMIZED(cv::hal::merge8u), (MergeFunc)GET_OPTIMIZED(cv::hal::merge8u), (MergeFunc)GET_OPTIMIZED(cv::hal::merge16u), (MergeFunc)GET_OPTIMIZED(cv::hal::merge16u),
|
|
|
|
(MergeFunc)GET_OPTIMIZED(cv::hal::merge32s), (MergeFunc)GET_OPTIMIZED(cv::hal::merge32s), (MergeFunc)GET_OPTIMIZED(cv::hal::merge64s), 0
|
2013-08-15 15:01:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return mergeTab[depth];
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
void cv::split(const Mat& src, Mat* mv)
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
int k, depth = src.depth(), cn = src.channels();
|
|
|
|
if( cn == 1 )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
src.copyTo(mv[0]);
|
|
|
|
return;
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2013-08-15 15:01:40 +08:00
|
|
|
SplitFunc func = getSplitFunc(depth);
|
2011-04-17 21:14:45 +08:00
|
|
|
CV_Assert( func != 0 );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2016-07-08 22:46:08 +08:00
|
|
|
size_t esz = src.elemSize(), esz1 = src.elemSize1();
|
|
|
|
size_t blocksize0 = (BLOCK_SIZE + esz-1)/esz;
|
2011-04-17 21:14:45 +08:00
|
|
|
AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
|
|
|
|
const Mat** arrays = (const Mat**)(uchar*)_buf;
|
|
|
|
uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
arrays[0] = &src;
|
|
|
|
for( k = 0; k < cn; k++ )
|
|
|
|
{
|
|
|
|
mv[k].create(src.dims, src.size, depth);
|
|
|
|
arrays[k+1] = &mv[k];
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
NAryMatIterator it(arrays, ptrs, cn+1);
|
2016-07-08 22:46:08 +08:00
|
|
|
size_t total = it.size;
|
|
|
|
size_t blocksize = std::min((size_t)CV_SPLIT_MERGE_MAX_BLOCK_SIZE(cn), cn <= 4 ? total : std::min(total, blocksize0));
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
|
|
|
{
|
2016-07-08 22:46:08 +08:00
|
|
|
for( size_t j = 0; j < total; j += blocksize )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2016-07-08 22:46:08 +08:00
|
|
|
size_t bsz = std::min(total - j, blocksize);
|
|
|
|
func( ptrs[0], &ptrs[1], (int)bsz, cn );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
if( j + blocksize < total )
|
|
|
|
{
|
|
|
|
ptrs[0] += bsz*esz;
|
|
|
|
for( k = 0; k < cn; k++ )
|
|
|
|
ptrs[k+1] += bsz*esz1;
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
|
|
|
|
2013-12-08 18:45:25 +08:00
|
|
|
namespace cv {
|
|
|
|
|
|
|
|
static bool ocl_split( InputArray _m, OutputArrayOfArrays _mv )
|
|
|
|
{
|
2014-05-14 22:56:25 +08:00
|
|
|
int type = _m.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
|
|
|
|
rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
|
2013-12-08 18:45:25 +08:00
|
|
|
|
2014-05-14 22:56:25 +08:00
|
|
|
String dstargs, processelem, indexdecl;
|
2013-12-08 18:45:25 +08:00
|
|
|
for (int i = 0; i < cn; ++i)
|
|
|
|
{
|
|
|
|
dstargs += format("DECLARE_DST_PARAM(%d)", i);
|
2014-05-14 22:56:25 +08:00
|
|
|
indexdecl += format("DECLARE_INDEX(%d)", i);
|
2013-12-08 18:45:25 +08:00
|
|
|
processelem += format("PROCESS_ELEM(%d)", i);
|
|
|
|
}
|
|
|
|
|
|
|
|
ocl::Kernel k("split", ocl::core::split_merge_oclsrc,
|
2014-05-14 22:56:25 +08:00
|
|
|
format("-D T=%s -D OP_SPLIT -D cn=%d -D DECLARE_DST_PARAMS=%s"
|
|
|
|
" -D PROCESS_ELEMS_N=%s -D DECLARE_INDEX_N=%s",
|
2013-12-08 18:45:25 +08:00
|
|
|
ocl::memopTypeToStr(depth), cn, dstargs.c_str(),
|
2014-05-14 22:56:25 +08:00
|
|
|
processelem.c_str(), indexdecl.c_str()));
|
2013-12-08 18:45:25 +08:00
|
|
|
if (k.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Size size = _m.size();
|
2014-01-25 04:43:32 +08:00
|
|
|
_mv.create(cn, 1, depth);
|
2013-12-08 18:45:25 +08:00
|
|
|
for (int i = 0; i < cn; ++i)
|
2014-01-25 04:43:32 +08:00
|
|
|
_mv.create(size, depth, i);
|
|
|
|
|
|
|
|
std::vector<UMat> dst;
|
|
|
|
_mv.getUMatVector(dst);
|
2013-12-08 18:45:25 +08:00
|
|
|
|
|
|
|
int argidx = k.set(0, ocl::KernelArg::ReadOnly(_m.getUMat()));
|
|
|
|
for (int i = 0; i < cn; ++i)
|
|
|
|
argidx = k.set(argidx, ocl::KernelArg::WriteOnlyNoSize(dst[i]));
|
2014-05-14 22:56:25 +08:00
|
|
|
k.set(argidx, rowsPerWI);
|
2013-12-08 18:45:25 +08:00
|
|
|
|
2015-10-16 22:10:00 +08:00
|
|
|
size_t globalsize[2] = { (size_t)size.width, ((size_t)size.height + rowsPerWI - 1) / rowsPerWI };
|
2013-12-08 18:45:25 +08:00
|
|
|
return k.run(2, globalsize, NULL, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#endif
|
|
|
|
|
2012-05-11 21:36:48 +08:00
|
|
|
void cv::split(InputArray _m, OutputArrayOfArrays _mv)
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
CV_OCL_RUN(_m.dims() <= 2 && _mv.isUMatVector(),
|
|
|
|
ocl_split(_m, _mv))
|
2013-12-08 18:45:25 +08:00
|
|
|
|
2012-05-11 21:36:48 +08:00
|
|
|
Mat m = _m.getMat();
|
|
|
|
if( m.empty() )
|
|
|
|
{
|
|
|
|
_mv.release();
|
|
|
|
return;
|
|
|
|
}
|
2014-01-25 04:43:32 +08:00
|
|
|
|
2013-10-24 21:23:48 +08:00
|
|
|
CV_Assert( !_mv.fixedType() || _mv.empty() || _mv.type() == m.depth() );
|
2014-01-25 04:43:32 +08:00
|
|
|
|
|
|
|
int depth = m.depth(), cn = m.channels();
|
|
|
|
_mv.create(cn, 1, depth);
|
|
|
|
for (int i = 0; i < cn; ++i)
|
2015-11-23 03:32:18 +08:00
|
|
|
_mv.create(m.dims, m.size.p, depth, i);
|
2014-01-25 04:43:32 +08:00
|
|
|
|
|
|
|
std::vector<Mat> dst;
|
|
|
|
_mv.getMatVector(dst);
|
|
|
|
|
|
|
|
split(m, &dst[0]);
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
void cv::merge(const Mat* mv, size_t n, OutputArray _dst)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
CV_Assert( mv && n > 0 );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
int depth = mv[0].depth();
|
|
|
|
bool allch1 = true;
|
2011-04-17 21:14:45 +08:00
|
|
|
int k, cn = 0;
|
|
|
|
size_t i;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
for( i = 0; i < n; i++ )
|
|
|
|
{
|
2010-10-12 20:31:40 +08:00
|
|
|
CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth);
|
2010-05-12 01:44:00 +08:00
|
|
|
allch1 = allch1 && mv[i].channels() == 1;
|
2011-04-17 21:14:45 +08:00
|
|
|
cn += mv[i].channels();
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
CV_Assert( 0 < cn && cn <= CV_CN_MAX );
|
|
|
|
_dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn));
|
|
|
|
Mat dst = _dst.getMat();
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
if( n == 1 )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
|
|
|
mv[0].copyTo(dst);
|
|
|
|
return;
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
if( !allch1 )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
AutoBuffer<int> pairs(cn*2);
|
|
|
|
int j, ni=0;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
for( i = 0, j = 0; i < n; i++, j += ni )
|
|
|
|
{
|
|
|
|
ni = mv[i].channels();
|
|
|
|
for( k = 0; k < ni; k++ )
|
|
|
|
{
|
|
|
|
pairs[(j+k)*2] = j + k;
|
|
|
|
pairs[(j+k)*2+1] = j + k;
|
|
|
|
}
|
|
|
|
}
|
2011-04-17 21:14:45 +08:00
|
|
|
mixChannels( mv, n, &dst, 1, &pairs[0], cn );
|
|
|
|
return;
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2016-07-08 22:46:08 +08:00
|
|
|
MergeFunc func = getMergeFunc(depth);
|
|
|
|
CV_Assert( func != 0 );
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
size_t esz = dst.elemSize(), esz1 = dst.elemSize1();
|
2016-07-08 22:46:08 +08:00
|
|
|
size_t blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz);
|
2011-04-17 21:14:45 +08:00
|
|
|
AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
|
|
|
|
const Mat** arrays = (const Mat**)(uchar*)_buf;
|
|
|
|
uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
arrays[0] = &dst;
|
|
|
|
for( k = 0; k < cn; k++ )
|
|
|
|
arrays[k+1] = &mv[k];
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
NAryMatIterator it(arrays, ptrs, cn+1);
|
2016-07-08 22:46:08 +08:00
|
|
|
size_t total = (int)it.size;
|
|
|
|
size_t blocksize = std::min((size_t)CV_SPLIT_MERGE_MAX_BLOCK_SIZE(cn), cn <= 4 ? total : std::min(total, blocksize0));
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( i = 0; i < it.nplanes; i++, ++it )
|
|
|
|
{
|
2016-07-08 22:46:08 +08:00
|
|
|
for( size_t j = 0; j < total; j += blocksize )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2016-07-08 22:46:08 +08:00
|
|
|
size_t bsz = std::min(total - j, blocksize);
|
|
|
|
func( (const uchar**)&ptrs[1], ptrs[0], (int)bsz, cn );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
if( j + blocksize < total )
|
|
|
|
{
|
|
|
|
ptrs[0] += bsz*esz;
|
2012-06-09 23:00:04 +08:00
|
|
|
for( int t = 0; t < cn; t++ )
|
|
|
|
ptrs[t+1] += bsz*esz1;
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
|
|
|
|
2013-12-08 05:32:17 +08:00
|
|
|
namespace cv {
|
|
|
|
|
|
|
|
static bool ocl_merge( InputArrayOfArrays _mv, OutputArray _dst )
|
|
|
|
{
|
2014-03-18 20:02:25 +08:00
|
|
|
std::vector<UMat> src, ksrc;
|
2014-01-25 04:43:32 +08:00
|
|
|
_mv.getUMatVector(src);
|
2013-12-08 05:32:17 +08:00
|
|
|
CV_Assert(!src.empty());
|
|
|
|
|
2014-05-14 22:56:25 +08:00
|
|
|
int type = src[0].type(), depth = CV_MAT_DEPTH(type),
|
|
|
|
rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
|
2013-12-08 05:32:17 +08:00
|
|
|
Size size = src[0].size();
|
|
|
|
|
2014-03-18 20:02:25 +08:00
|
|
|
for (size_t i = 0, srcsize = src.size(); i < srcsize; ++i)
|
2013-12-08 05:32:17 +08:00
|
|
|
{
|
2014-03-18 20:02:25 +08:00
|
|
|
int itype = src[i].type(), icn = CV_MAT_CN(itype), idepth = CV_MAT_DEPTH(itype),
|
|
|
|
esz1 = CV_ELEM_SIZE1(idepth);
|
|
|
|
if (src[i].dims > 2)
|
2013-12-08 05:32:17 +08:00
|
|
|
return false;
|
2014-03-18 20:02:25 +08:00
|
|
|
|
2013-12-08 05:32:17 +08:00
|
|
|
CV_Assert(size == src[i].size() && depth == idepth);
|
2014-03-18 20:02:25 +08:00
|
|
|
|
|
|
|
for (int cn = 0; cn < icn; ++cn)
|
|
|
|
{
|
|
|
|
UMat tsrc = src[i];
|
|
|
|
tsrc.offset += cn * esz1;
|
|
|
|
ksrc.push_back(tsrc);
|
|
|
|
}
|
2013-12-08 05:32:17 +08:00
|
|
|
}
|
2014-03-18 20:02:25 +08:00
|
|
|
int dcn = (int)ksrc.size();
|
2013-12-08 05:32:17 +08:00
|
|
|
|
2014-05-14 22:56:25 +08:00
|
|
|
String srcargs, processelem, cndecl, indexdecl;
|
2014-03-18 20:02:25 +08:00
|
|
|
for (int i = 0; i < dcn; ++i)
|
2013-12-08 05:32:17 +08:00
|
|
|
{
|
|
|
|
srcargs += format("DECLARE_SRC_PARAM(%d)", i);
|
|
|
|
processelem += format("PROCESS_ELEM(%d)", i);
|
2014-05-14 22:56:25 +08:00
|
|
|
indexdecl += format("DECLARE_INDEX(%d)", i);
|
2014-03-18 20:02:25 +08:00
|
|
|
cndecl += format(" -D scn%d=%d", i, ksrc[i].channels());
|
2013-12-08 05:32:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ocl::Kernel k("merge", ocl::core::split_merge_oclsrc,
|
2014-03-18 20:02:25 +08:00
|
|
|
format("-D OP_MERGE -D cn=%d -D T=%s -D DECLARE_SRC_PARAMS_N=%s"
|
2014-05-14 22:56:25 +08:00
|
|
|
" -D DECLARE_INDEX_N=%s -D PROCESS_ELEMS_N=%s%s",
|
2014-03-18 20:02:25 +08:00
|
|
|
dcn, ocl::memopTypeToStr(depth), srcargs.c_str(),
|
2014-05-14 22:56:25 +08:00
|
|
|
indexdecl.c_str(), processelem.c_str(), cndecl.c_str()));
|
2013-12-08 05:32:17 +08:00
|
|
|
if (k.empty())
|
|
|
|
return false;
|
|
|
|
|
2014-03-18 20:02:25 +08:00
|
|
|
_dst.create(size, CV_MAKE_TYPE(depth, dcn));
|
2013-12-08 05:32:17 +08:00
|
|
|
UMat dst = _dst.getUMat();
|
|
|
|
|
|
|
|
int argidx = 0;
|
2014-03-18 20:02:25 +08:00
|
|
|
for (int i = 0; i < dcn; ++i)
|
|
|
|
argidx = k.set(argidx, ocl::KernelArg::ReadOnlyNoSize(ksrc[i]));
|
2014-05-14 22:56:25 +08:00
|
|
|
argidx = k.set(argidx, ocl::KernelArg::WriteOnly(dst));
|
|
|
|
k.set(argidx, rowsPerWI);
|
2013-12-08 05:32:17 +08:00
|
|
|
|
2015-10-16 22:10:00 +08:00
|
|
|
size_t globalsize[2] = { (size_t)dst.cols, ((size_t)dst.rows + rowsPerWI - 1) / rowsPerWI };
|
2013-12-08 05:32:17 +08:00
|
|
|
return k.run(2, globalsize, NULL, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#endif
|
|
|
|
|
2012-05-11 21:36:48 +08:00
|
|
|
void cv::merge(InputArrayOfArrays _mv, OutputArray _dst)
|
2010-11-24 00:39:20 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
CV_OCL_RUN(_mv.isUMatVector() && _dst.isUMat(),
|
|
|
|
ocl_merge(_mv, _dst))
|
2013-12-08 05:32:17 +08:00
|
|
|
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector<Mat> mv;
|
2012-05-11 21:36:48 +08:00
|
|
|
_mv.getMatVector(mv);
|
2011-04-17 21:14:45 +08:00
|
|
|
merge(!mv.empty() ? &mv[0] : 0, mv.size(), _dst);
|
2012-06-09 23:00:04 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
|
|
|
/****************************************************************************************\
|
|
|
|
* Generalized split/merge: mixing channels *
|
|
|
|
\****************************************************************************************/
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
namespace cv
|
|
|
|
{
|
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
template<typename T> static void
|
2011-04-17 21:14:45 +08:00
|
|
|
mixChannels_( const T** src, const int* sdelta,
|
|
|
|
T** dst, const int* ddelta,
|
|
|
|
int len, int npairs )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
|
|
|
int i, k;
|
2011-04-17 21:14:45 +08:00
|
|
|
for( k = 0; k < npairs; k++ )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
const T* s = src[k];
|
|
|
|
T* d = dst[k];
|
|
|
|
int ds = sdelta[k], dd = ddelta[k];
|
|
|
|
if( s )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
for( i = 0; i <= len - 2; i += 2, s += ds*2, d += dd*2 )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
T t0 = s[0], t1 = s[ds];
|
|
|
|
d[0] = t0; d[dd] = t1;
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
2011-04-17 21:14:45 +08:00
|
|
|
if( i < len )
|
|
|
|
d[0] = s[0];
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for( i = 0; i <= len - 2; i += 2, d += dd*2 )
|
|
|
|
d[0] = d[dd] = 0;
|
|
|
|
if( i < len )
|
|
|
|
d[0] = 0;
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
static void mixChannels8u( const uchar** src, const int* sdelta,
|
|
|
|
uchar** dst, const int* ddelta,
|
|
|
|
int len, int npairs )
|
|
|
|
{
|
|
|
|
mixChannels_(src, sdelta, dst, ddelta, len, npairs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mixChannels16u( const ushort** src, const int* sdelta,
|
|
|
|
ushort** dst, const int* ddelta,
|
|
|
|
int len, int npairs )
|
|
|
|
{
|
|
|
|
mixChannels_(src, sdelta, dst, ddelta, len, npairs);
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
static void mixChannels32s( const int** src, const int* sdelta,
|
|
|
|
int** dst, const int* ddelta,
|
|
|
|
int len, int npairs )
|
|
|
|
{
|
|
|
|
mixChannels_(src, sdelta, dst, ddelta, len, npairs);
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
static void mixChannels64s( const int64** src, const int* sdelta,
|
|
|
|
int64** dst, const int* ddelta,
|
|
|
|
int len, int npairs )
|
|
|
|
{
|
|
|
|
mixChannels_(src, sdelta, dst, ddelta, len, npairs);
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta,
|
|
|
|
uchar** dst, const int* ddelta, int len, int npairs );
|
|
|
|
|
2013-08-15 15:01:40 +08:00
|
|
|
static MixChannelsFunc getMixchFunc(int depth)
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2013-08-15 15:01:40 +08:00
|
|
|
static MixChannelsFunc mixchTab[] =
|
|
|
|
{
|
|
|
|
(MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u,
|
|
|
|
(MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s,
|
|
|
|
(MixChannelsFunc)mixChannels64s, 0
|
|
|
|
};
|
|
|
|
|
|
|
|
return mixchTab[depth];
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, const int* fromTo, size_t npairs )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
if( npairs == 0 )
|
|
|
|
return;
|
|
|
|
CV_Assert( src && nsrcs > 0 && dst && ndsts > 0 && fromTo && npairs > 0 );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
size_t i, j, k, esz1 = dst[0].elemSize1();
|
|
|
|
int depth = dst[0].depth();
|
|
|
|
|
|
|
|
AutoBuffer<uchar> buf((nsrcs + ndsts + 1)*(sizeof(Mat*) + sizeof(uchar*)) + npairs*(sizeof(uchar*)*2 + sizeof(int)*6));
|
|
|
|
const Mat** arrays = (const Mat**)(uchar*)buf;
|
|
|
|
uchar** ptrs = (uchar**)(arrays + nsrcs + ndsts);
|
|
|
|
const uchar** srcs = (const uchar**)(ptrs + nsrcs + ndsts + 1);
|
|
|
|
uchar** dsts = (uchar**)(srcs + npairs);
|
|
|
|
int* tab = (int*)(dsts + npairs);
|
|
|
|
int *sdelta = (int*)(tab + npairs*4), *ddelta = sdelta + npairs;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( i = 0; i < nsrcs; i++ )
|
|
|
|
arrays[i] = &src[i];
|
|
|
|
for( i = 0; i < ndsts; i++ )
|
|
|
|
arrays[i + nsrcs] = &dst[i];
|
|
|
|
ptrs[nsrcs + ndsts] = 0;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
for( i = 0; i < npairs; i++ )
|
|
|
|
{
|
2010-07-17 02:28:46 +08:00
|
|
|
int i0 = fromTo[i*2], i1 = fromTo[i*2+1];
|
2010-05-12 01:44:00 +08:00
|
|
|
if( i0 >= 0 )
|
|
|
|
{
|
|
|
|
for( j = 0; j < nsrcs; i0 -= src[j].channels(), j++ )
|
|
|
|
if( i0 < src[j].channels() )
|
|
|
|
break;
|
2011-04-17 21:14:45 +08:00
|
|
|
CV_Assert(j < nsrcs && src[j].depth() == depth);
|
2011-07-19 20:27:07 +08:00
|
|
|
tab[i*4] = (int)j; tab[i*4+1] = (int)(i0*esz1);
|
2011-04-17 21:14:45 +08:00
|
|
|
sdelta[i] = src[j].channels();
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-07-19 20:27:07 +08:00
|
|
|
tab[i*4] = (int)(nsrcs + ndsts); tab[i*4+1] = 0;
|
2011-04-17 21:14:45 +08:00
|
|
|
sdelta[i] = 0;
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
for( j = 0; j < ndsts; i1 -= dst[j].channels(), j++ )
|
|
|
|
if( i1 < dst[j].channels() )
|
|
|
|
break;
|
2011-04-17 21:14:45 +08:00
|
|
|
CV_Assert(i1 >= 0 && j < ndsts && dst[j].depth() == depth);
|
2011-07-19 20:27:07 +08:00
|
|
|
tab[i*4+2] = (int)(j + nsrcs); tab[i*4+3] = (int)(i1*esz1);
|
2011-04-17 21:14:45 +08:00
|
|
|
ddelta[i] = dst[j].channels();
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-07-19 20:27:07 +08:00
|
|
|
NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts));
|
2011-04-17 21:14:45 +08:00
|
|
|
int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1));
|
2013-08-15 15:01:40 +08:00
|
|
|
MixChannelsFunc func = getMixchFunc(depth);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( i = 0; i < it.nplanes; i++, ++it )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
for( k = 0; k < npairs; k++ )
|
|
|
|
{
|
|
|
|
srcs[k] = ptrs[tab[k*4]] + tab[k*4+1];
|
|
|
|
dsts[k] = ptrs[tab[k*4+2]] + tab[k*4+3];
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
|
|
|
for( int t = 0; t < total; t += blocksize )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2012-06-09 23:00:04 +08:00
|
|
|
int bsz = std::min(total - t, blocksize);
|
2011-04-17 21:14:45 +08:00
|
|
|
func( srcs, sdelta, dsts, ddelta, bsz, (int)npairs );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
|
|
|
if( t + blocksize < total )
|
2011-04-17 21:14:45 +08:00
|
|
|
for( k = 0; k < npairs; k++ )
|
|
|
|
{
|
|
|
|
srcs[k] += blocksize*sdelta[k]*esz1;
|
|
|
|
dsts[k] += blocksize*ddelta[k]*esz1;
|
|
|
|
}
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
|
|
|
|
2013-12-26 22:48:43 +08:00
|
|
|
namespace cv {
|
|
|
|
|
|
|
|
static void getUMatIndex(const std::vector<UMat> & um, int cn, int & idx, int & cnidx)
|
|
|
|
{
|
|
|
|
int totalChannels = 0;
|
|
|
|
for (size_t i = 0, size = um.size(); i < size; ++i)
|
|
|
|
{
|
|
|
|
int ccn = um[i].channels();
|
|
|
|
totalChannels += ccn;
|
|
|
|
|
|
|
|
if (totalChannels == cn)
|
|
|
|
{
|
|
|
|
idx = (int)(i + 1);
|
|
|
|
cnidx = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else if (totalChannels > cn)
|
|
|
|
{
|
|
|
|
idx = (int)i;
|
|
|
|
cnidx = i == 0 ? cn : (cn - totalChannels + ccn);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
idx = cnidx = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ocl_mixChannels(InputArrayOfArrays _src, InputOutputArrayOfArrays _dst,
|
|
|
|
const int* fromTo, size_t npairs)
|
|
|
|
{
|
2014-01-25 04:43:32 +08:00
|
|
|
std::vector<UMat> src, dst;
|
|
|
|
_src.getUMatVector(src);
|
|
|
|
_dst.getUMatVector(dst);
|
2013-12-26 22:48:43 +08:00
|
|
|
|
|
|
|
size_t nsrc = src.size(), ndst = dst.size();
|
|
|
|
CV_Assert(nsrc > 0 && ndst > 0);
|
|
|
|
|
|
|
|
Size size = src[0].size();
|
2014-05-26 18:20:16 +08:00
|
|
|
int depth = src[0].depth(), esz = CV_ELEM_SIZE(depth),
|
|
|
|
rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
|
2013-12-26 22:48:43 +08:00
|
|
|
|
|
|
|
for (size_t i = 1, ssize = src.size(); i < ssize; ++i)
|
|
|
|
CV_Assert(src[i].size() == size && src[i].depth() == depth);
|
|
|
|
for (size_t i = 0, dsize = dst.size(); i < dsize; ++i)
|
|
|
|
CV_Assert(dst[i].size() == size && dst[i].depth() == depth);
|
|
|
|
|
2014-05-14 22:56:25 +08:00
|
|
|
String declsrc, decldst, declproc, declcn, indexdecl;
|
2013-12-26 22:48:43 +08:00
|
|
|
std::vector<UMat> srcargs(npairs), dstargs(npairs);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < npairs; ++i)
|
|
|
|
{
|
|
|
|
int scn = fromTo[i<<1], dcn = fromTo[(i<<1) + 1];
|
|
|
|
int src_idx, src_cnidx, dst_idx, dst_cnidx;
|
|
|
|
|
|
|
|
getUMatIndex(src, scn, src_idx, src_cnidx);
|
|
|
|
getUMatIndex(dst, dcn, dst_idx, dst_cnidx);
|
|
|
|
|
|
|
|
CV_Assert(dst_idx >= 0 && src_idx >= 0);
|
|
|
|
|
|
|
|
srcargs[i] = src[src_idx];
|
|
|
|
srcargs[i].offset += src_cnidx * esz;
|
|
|
|
|
|
|
|
dstargs[i] = dst[dst_idx];
|
|
|
|
dstargs[i].offset += dst_cnidx * esz;
|
|
|
|
|
|
|
|
declsrc += format("DECLARE_INPUT_MAT(%d)", i);
|
|
|
|
decldst += format("DECLARE_OUTPUT_MAT(%d)", i);
|
2014-05-14 22:56:25 +08:00
|
|
|
indexdecl += format("DECLARE_INDEX(%d)", i);
|
2013-12-26 22:48:43 +08:00
|
|
|
declproc += format("PROCESS_ELEM(%d)", i);
|
|
|
|
declcn += format(" -D scn%d=%d -D dcn%d=%d", i, src[src_idx].channels(), i, dst[dst_idx].channels());
|
|
|
|
}
|
|
|
|
|
|
|
|
ocl::Kernel k("mixChannels", ocl::core::mixchannels_oclsrc,
|
2014-05-14 22:56:25 +08:00
|
|
|
format("-D T=%s -D DECLARE_INPUT_MAT_N=%s -D DECLARE_OUTPUT_MAT_N=%s"
|
|
|
|
" -D PROCESS_ELEM_N=%s -D DECLARE_INDEX_N=%s%s",
|
|
|
|
ocl::memopTypeToStr(depth), declsrc.c_str(), decldst.c_str(),
|
|
|
|
declproc.c_str(), indexdecl.c_str(), declcn.c_str()));
|
2013-12-26 22:48:43 +08:00
|
|
|
if (k.empty())
|
|
|
|
return false;
|
|
|
|
|
2013-12-27 17:59:55 +08:00
|
|
|
int argindex = 0;
|
2013-12-26 22:48:43 +08:00
|
|
|
for (size_t i = 0; i < npairs; ++i)
|
|
|
|
argindex = k.set(argindex, ocl::KernelArg::ReadOnlyNoSize(srcargs[i]));
|
|
|
|
for (size_t i = 0; i < npairs; ++i)
|
2013-12-30 23:27:06 +08:00
|
|
|
argindex = k.set(argindex, ocl::KernelArg::WriteOnlyNoSize(dstargs[i]));
|
2014-05-26 18:20:16 +08:00
|
|
|
argindex = k.set(argindex, size.height);
|
|
|
|
argindex = k.set(argindex, size.width);
|
|
|
|
k.set(argindex, rowsPerWI);
|
2013-12-26 22:48:43 +08:00
|
|
|
|
2015-10-16 22:10:00 +08:00
|
|
|
size_t globalsize[2] = { (size_t)size.width, ((size_t)size.height + rowsPerWI - 1) / rowsPerWI };
|
2013-12-26 22:48:43 +08:00
|
|
|
return k.run(2, globalsize, NULL, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#endif
|
|
|
|
|
2013-04-09 17:10:54 +08:00
|
|
|
void cv::mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
|
2011-04-17 21:14:45 +08:00
|
|
|
const int* fromTo, size_t npairs)
|
2010-11-24 00:39:20 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2013-12-26 22:48:43 +08:00
|
|
|
if (npairs == 0 || fromTo == NULL)
|
2013-04-09 17:10:54 +08:00
|
|
|
return;
|
2013-12-26 22:48:43 +08:00
|
|
|
|
2014-01-25 04:43:32 +08:00
|
|
|
CV_OCL_RUN(dst.isUMatVector(),
|
2014-01-25 01:03:31 +08:00
|
|
|
ocl_mixChannels(src, dst, fromTo, npairs))
|
2013-12-26 22:48:43 +08:00
|
|
|
|
2013-04-09 17:10:54 +08:00
|
|
|
bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
|
2013-12-27 17:59:55 +08:00
|
|
|
src.kind() != _InputArray::STD_VECTOR_VECTOR &&
|
|
|
|
src.kind() != _InputArray::STD_VECTOR_UMAT;
|
2013-04-09 17:10:54 +08:00
|
|
|
bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
|
2013-12-27 17:59:55 +08:00
|
|
|
dst.kind() != _InputArray::STD_VECTOR_VECTOR &&
|
|
|
|
dst.kind() != _InputArray::STD_VECTOR_UMAT;
|
2013-04-09 17:10:54 +08:00
|
|
|
int i;
|
|
|
|
int nsrc = src_is_mat ? 1 : (int)src.total();
|
|
|
|
int ndst = dst_is_mat ? 1 : (int)dst.total();
|
|
|
|
|
|
|
|
CV_Assert(nsrc > 0 && ndst > 0);
|
|
|
|
cv::AutoBuffer<Mat> _buf(nsrc + ndst);
|
|
|
|
Mat* buf = _buf;
|
|
|
|
for( i = 0; i < nsrc; i++ )
|
|
|
|
buf[i] = src.getMat(src_is_mat ? -1 : i);
|
|
|
|
for( i = 0; i < ndst; i++ )
|
|
|
|
buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
|
|
|
|
mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, fromTo, npairs);
|
2011-06-18 18:56:49 +08:00
|
|
|
}
|
|
|
|
|
2013-04-09 17:10:54 +08:00
|
|
|
void cv::mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
|
2013-02-25 00:14:01 +08:00
|
|
|
const std::vector<int>& fromTo)
|
2011-07-19 00:31:30 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2013-12-26 22:48:43 +08:00
|
|
|
if (fromTo.empty())
|
2011-07-19 00:31:30 +08:00
|
|
|
return;
|
2013-12-26 22:48:43 +08:00
|
|
|
|
2014-01-25 04:43:32 +08:00
|
|
|
CV_OCL_RUN(dst.isUMatVector(),
|
2014-01-25 01:03:31 +08:00
|
|
|
ocl_mixChannels(src, dst, &fromTo[0], fromTo.size()>>1))
|
2013-12-26 22:48:43 +08:00
|
|
|
|
2011-12-04 04:19:33 +08:00
|
|
|
bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
|
2013-12-27 17:59:55 +08:00
|
|
|
src.kind() != _InputArray::STD_VECTOR_VECTOR &&
|
|
|
|
src.kind() != _InputArray::STD_VECTOR_UMAT;
|
2011-12-04 04:19:33 +08:00
|
|
|
bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
|
2013-12-27 17:59:55 +08:00
|
|
|
dst.kind() != _InputArray::STD_VECTOR_VECTOR &&
|
|
|
|
dst.kind() != _InputArray::STD_VECTOR_UMAT;
|
2011-12-04 04:19:33 +08:00
|
|
|
int i;
|
|
|
|
int nsrc = src_is_mat ? 1 : (int)src.total();
|
|
|
|
int ndst = dst_is_mat ? 1 : (int)dst.total();
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-07-19 00:31:30 +08:00
|
|
|
CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
|
|
|
|
cv::AutoBuffer<Mat> _buf(nsrc + ndst);
|
|
|
|
Mat* buf = _buf;
|
|
|
|
for( i = 0; i < nsrc; i++ )
|
2011-12-04 04:19:33 +08:00
|
|
|
buf[i] = src.getMat(src_is_mat ? -1 : i);
|
2011-07-19 00:31:30 +08:00
|
|
|
for( i = 0; i < ndst; i++ )
|
2011-12-04 04:19:33 +08:00
|
|
|
buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
|
2011-07-19 20:27:07 +08:00
|
|
|
mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2);
|
2011-07-19 00:31:30 +08:00
|
|
|
}
|
|
|
|
|
2011-06-18 18:56:49 +08:00
|
|
|
void cv::extractChannel(InputArray _src, OutputArray _dst, int coi)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2014-01-18 03:31:03 +08:00
|
|
|
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
|
|
|
|
CV_Assert( 0 <= coi && coi < cn );
|
|
|
|
int ch[] = { coi, 0 };
|
|
|
|
|
|
|
|
if (ocl::useOpenCL() && _src.dims() <= 2 && _dst.isUMat())
|
|
|
|
{
|
|
|
|
UMat src = _src.getUMat();
|
|
|
|
_dst.create(src.dims, &src.size[0], depth);
|
|
|
|
UMat dst = _dst.getUMat();
|
|
|
|
mixChannels(std::vector<UMat>(1, src), std::vector<UMat>(1, dst), ch, 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-06-18 18:56:49 +08:00
|
|
|
Mat src = _src.getMat();
|
2014-01-18 03:31:03 +08:00
|
|
|
_dst.create(src.dims, &src.size[0], depth);
|
2011-06-18 18:56:49 +08:00
|
|
|
Mat dst = _dst.getMat();
|
|
|
|
mixChannels(&src, 1, &dst, 1, ch, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cv::insertChannel(InputArray _src, InputOutputArray _dst, int coi)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2014-01-18 03:31:03 +08:00
|
|
|
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype);
|
|
|
|
int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype), dcn = CV_MAT_CN(dtype);
|
|
|
|
CV_Assert( _src.sameSize(_dst) && sdepth == ddepth );
|
|
|
|
CV_Assert( 0 <= coi && coi < dcn && scn == 1 );
|
|
|
|
|
2011-06-18 18:56:49 +08:00
|
|
|
int ch[] = { 0, coi };
|
2014-01-18 03:31:03 +08:00
|
|
|
if (ocl::useOpenCL() && _src.dims() <= 2 && _dst.isUMat())
|
|
|
|
{
|
|
|
|
UMat src = _src.getUMat(), dst = _dst.getUMat();
|
|
|
|
mixChannels(std::vector<UMat>(1, src), std::vector<UMat>(1, dst), ch, 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Mat src = _src.getMat(), dst = _dst.getMat();
|
2011-06-18 18:56:49 +08:00
|
|
|
mixChannels(&src, 1, &dst, 1, ch, 1);
|
|
|
|
}
|
2010-11-24 00:39:20 +08:00
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
/****************************************************************************************\
|
|
|
|
* convertScale[Abs] *
|
|
|
|
\****************************************************************************************/
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
namespace cv
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
|
|
|
|
2014-06-30 01:03:16 +08:00
|
|
|
template<typename T, typename DT, typename WT>
|
2014-08-31 18:54:12 +08:00
|
|
|
struct cvtScaleAbs_SIMD
|
2014-06-30 01:03:16 +08:00
|
|
|
{
|
|
|
|
int operator () (const T *, DT *, int, WT, WT) const
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE2
|
|
|
|
|
|
|
|
template <>
|
2014-08-31 18:54:12 +08:00
|
|
|
struct cvtScaleAbs_SIMD<uchar, uchar, float>
|
2014-06-30 01:03:16 +08:00
|
|
|
{
|
|
|
|
int operator () (const uchar * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (USE_SSE2)
|
|
|
|
{
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
|
|
|
|
v_zero_f = _mm_setzero_ps();
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128();
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
|
|
|
|
__m128i v_src12 = _mm_unpacklo_epi8(v_src, v_zero_i), v_src_34 = _mm_unpackhi_epi8(v_src, v_zero_i);
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src12, v_zero_i)), v_scale), v_shift);
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src12, v_zero_i)), v_scale), v_shift);
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
|
|
|
|
__m128 v_dst3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src_34, v_zero_i)), v_scale), v_shift);
|
|
|
|
v_dst3 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst3), v_dst3);
|
|
|
|
__m128 v_dst4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src_34, v_zero_i)), v_scale), v_shift);
|
|
|
|
v_dst4 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst4), v_dst4);
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)),
|
|
|
|
_mm_packs_epi32(_mm_cvtps_epi32(v_dst3), _mm_cvtps_epi32(v_dst4)));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst_i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<schar, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (USE_SSE2)
|
|
|
|
{
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
|
|
|
|
v_zero_f = _mm_setzero_ps();
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128();
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
|
|
|
|
__m128i v_src_12 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero_i, v_src), 8),
|
|
|
|
v_src_34 = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero_i, v_src), 8);
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(
|
|
|
|
_mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift);
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(
|
|
|
|
_mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_12), 16)), v_scale), v_shift);
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
|
|
|
|
__m128 v_dst3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(
|
|
|
|
_mm_srai_epi32(_mm_unpacklo_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift);
|
|
|
|
v_dst3 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst3), v_dst3);
|
|
|
|
__m128 v_dst4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(
|
|
|
|
_mm_srai_epi32(_mm_unpackhi_epi16(v_zero_i, v_src_34), 16)), v_scale), v_shift);
|
|
|
|
v_dst4 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst4), v_dst4);
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)),
|
|
|
|
_mm_packs_epi32(_mm_cvtps_epi32(v_dst3), _mm_cvtps_epi32(v_dst4)));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst_i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-06-30 01:03:16 +08:00
|
|
|
template <>
|
2014-08-31 18:54:12 +08:00
|
|
|
struct cvtScaleAbs_SIMD<ushort, uchar, float>
|
2014-06-30 01:03:16 +08:00
|
|
|
{
|
|
|
|
int operator () (const ushort * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (USE_SSE2)
|
|
|
|
{
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
|
|
|
|
v_zero_f = _mm_setzero_ps();
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128();
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero_i)), v_scale), v_shift);
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero_i)), v_scale), v_shift);
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), v_zero_i);
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), v_dst_i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2014-08-31 18:54:12 +08:00
|
|
|
struct cvtScaleAbs_SIMD<short, uchar, float>
|
2014-06-30 01:03:16 +08:00
|
|
|
{
|
|
|
|
int operator () (const short * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (USE_SSE2)
|
|
|
|
{
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
|
|
|
|
v_zero_f = _mm_setzero_ps();
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128();
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_src, v_src), 16)), v_scale), v_shift);
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_src, v_src), 16)), v_scale), v_shift);
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), _mm_cvtps_epi32(v_dst2)), v_zero_i);
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), v_dst_i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2014-08-31 18:54:12 +08:00
|
|
|
struct cvtScaleAbs_SIMD<int, uchar, float>
|
2014-06-30 01:03:16 +08:00
|
|
|
{
|
|
|
|
int operator () (const int * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (USE_SSE2)
|
|
|
|
{
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
|
|
|
|
v_zero_f = _mm_setzero_ps();
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128();
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 4)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((const __m128i *)(src + x));
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packus_epi16(_mm_packs_epi32(_mm_cvtps_epi32(v_dst1), v_zero_i), v_zero_i);
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), v_dst_i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2014-08-31 18:54:12 +08:00
|
|
|
struct cvtScaleAbs_SIMD<float, uchar, float>
|
2014-06-30 01:03:16 +08:00
|
|
|
{
|
|
|
|
int operator () (const float * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (USE_SSE2)
|
|
|
|
{
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
|
|
|
|
v_zero_f = _mm_setzero_ps();
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128();
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 4)
|
|
|
|
{
|
|
|
|
__m128 v_dst = _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(src + x), v_scale), v_shift);
|
|
|
|
v_dst = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst), v_dst);
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packs_epi32(_mm_cvtps_epi32(v_dst), v_zero_i);
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst_i, v_zero_i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<double, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const double * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (USE_SSE2)
|
|
|
|
{
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift),
|
|
|
|
v_zero_f = _mm_setzero_ps();
|
|
|
|
__m128i v_zero_i = _mm_setzero_si128();
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)));
|
|
|
|
__m128 v_src2 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)));
|
|
|
|
|
|
|
|
__m128 v_dst1 = _mm_add_ps(_mm_mul_ps(v_src1, v_scale), v_shift);
|
|
|
|
v_dst1 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst1), v_dst1);
|
|
|
|
|
|
|
|
__m128 v_dst2 = _mm_add_ps(_mm_mul_ps(v_src2, v_scale), v_shift);
|
|
|
|
v_dst2 = _mm_max_ps(_mm_sub_ps(v_zero_f, v_dst2), v_dst2);
|
|
|
|
|
|
|
|
__m128i v_dst_i = _mm_packs_epi32(_mm_cvtps_epi32(v_dst1),
|
|
|
|
_mm_cvtps_epi32(v_dst2));
|
|
|
|
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst_i, v_zero_i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-08-31 18:54:12 +08:00
|
|
|
#elif CV_NEON
|
|
|
|
|
2014-09-22 23:30:12 +08:00
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<uchar, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
uint8x16_t v_src = vld1q_u8(src + x);
|
|
|
|
uint16x8_t v_half = vmovl_u8(vget_low_u8(v_src));
|
|
|
|
|
|
|
|
uint32x4_t v_quat = vmovl_u16(vget_low_u16(v_half));
|
|
|
|
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
|
|
|
|
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
|
|
|
|
|
|
|
|
v_quat = vmovl_u16(vget_high_u16(v_half));
|
|
|
|
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
|
|
|
|
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
|
|
|
|
|
|
|
|
v_half = vmovl_u8(vget_high_u8(v_src));
|
|
|
|
|
|
|
|
v_quat = vmovl_u16(vget_low_u16(v_half));
|
|
|
|
float32x4_t v_dst_2 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
|
|
|
|
v_dst_2 = vabsq_f32(vaddq_f32(v_dst_2, v_shift));
|
|
|
|
|
|
|
|
v_quat = vmovl_u16(vget_high_u16(v_half));
|
|
|
|
float32x4_t v_dst_3 = vmulq_n_f32(vcvtq_f32_u32(v_quat), scale);
|
|
|
|
v_dst_3 = vabsq_f32(vaddq_f32(v_dst_3, v_shift));
|
|
|
|
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x8_t v_dsti_0 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
|
|
|
|
uint16x8_t v_dsti_1 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_2)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst_3)));
|
2014-09-22 23:30:12 +08:00
|
|
|
|
|
|
|
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(v_dsti_0), vqmovn_u16(v_dsti_1)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<schar, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
int8x16_t v_src = vld1q_s8(src + x);
|
|
|
|
int16x8_t v_half = vmovl_s8(vget_low_s8(v_src));
|
|
|
|
|
|
|
|
int32x4_t v_quat = vmovl_s16(vget_low_s16(v_half));
|
|
|
|
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
|
|
|
|
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
|
|
|
|
|
|
|
|
v_quat = vmovl_s16(vget_high_s16(v_half));
|
|
|
|
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
|
|
|
|
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
|
|
|
|
|
|
|
|
v_half = vmovl_s8(vget_high_s8(v_src));
|
|
|
|
|
|
|
|
v_quat = vmovl_s16(vget_low_s16(v_half));
|
|
|
|
float32x4_t v_dst_2 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
|
|
|
|
v_dst_2 = vabsq_f32(vaddq_f32(v_dst_2, v_shift));
|
|
|
|
|
|
|
|
v_quat = vmovl_s16(vget_high_s16(v_half));
|
|
|
|
float32x4_t v_dst_3 = vmulq_n_f32(vcvtq_f32_s32(v_quat), scale);
|
|
|
|
v_dst_3 = vabsq_f32(vaddq_f32(v_dst_3, v_shift));
|
|
|
|
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x8_t v_dsti_0 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
|
|
|
|
uint16x8_t v_dsti_1 = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_2)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst_3)));
|
2014-09-22 23:30:12 +08:00
|
|
|
|
|
|
|
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(v_dsti_0), vqmovn_u16(v_dsti_1)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<ushort, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
|
|
|
|
uint32x4_t v_half = vmovl_u16(vget_low_u16(v_src));
|
|
|
|
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_u32(v_half), scale);
|
|
|
|
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
|
|
|
|
|
|
|
|
v_half = vmovl_u16(vget_high_u16(v_src));
|
|
|
|
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_u32(v_half), scale);
|
|
|
|
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
|
|
|
|
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
|
2014-09-22 23:30:12 +08:00
|
|
|
|
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<short, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
|
|
|
|
int32x4_t v_half = vmovl_s16(vget_low_s16(v_src));
|
|
|
|
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_s32(v_half), scale);
|
|
|
|
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
|
|
|
|
|
|
|
|
v_half = vmovl_s16(vget_high_s16(v_src));
|
|
|
|
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_s32(v_half), scale);
|
|
|
|
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
|
|
|
|
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst_0)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst_1)));
|
2014-09-22 23:30:12 +08:00
|
|
|
|
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<int, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst_0 = vmulq_n_f32(vcvtq_f32_s32(vld1q_s32(src + x)), scale);
|
|
|
|
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x4_t v_dsti_0 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_0));
|
2014-09-22 23:30:12 +08:00
|
|
|
|
|
|
|
float32x4_t v_dst_1 = vmulq_n_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), scale);
|
|
|
|
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x4_t v_dsti_1 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_1));
|
2014-09-22 23:30:12 +08:00
|
|
|
|
|
|
|
uint16x8_t v_dst = vcombine_u16(v_dsti_0, v_dsti_1);
|
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-08-31 18:54:12 +08:00
|
|
|
template <>
|
|
|
|
struct cvtScaleAbs_SIMD<float, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, uchar * dst, int width,
|
|
|
|
float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst_0 = vmulq_n_f32(vld1q_f32(src + x), scale);
|
|
|
|
v_dst_0 = vabsq_f32(vaddq_f32(v_dst_0, v_shift));
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x4_t v_dsti_0 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_0));
|
2014-08-31 18:54:12 +08:00
|
|
|
|
|
|
|
float32x4_t v_dst_1 = vmulq_n_f32(vld1q_f32(src + x + 4), scale);
|
|
|
|
v_dst_1 = vabsq_f32(vaddq_f32(v_dst_1, v_shift));
|
2014-09-25 15:50:06 +08:00
|
|
|
uint16x4_t v_dsti_1 = vqmovn_u32(cv_vrndq_u32_f32(v_dst_1));
|
2014-08-31 18:54:12 +08:00
|
|
|
|
|
|
|
uint16x8_t v_dst = vcombine_u16(v_dsti_0, v_dsti_1);
|
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-06-30 01:03:16 +08:00
|
|
|
#endif
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
template<typename T, typename DT, typename WT> static void
|
|
|
|
cvtScaleAbs_( const T* src, size_t sstep,
|
|
|
|
DT* dst, size_t dstep, Size size,
|
|
|
|
WT scale, WT shift )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
2014-08-31 18:54:12 +08:00
|
|
|
cvtScaleAbs_SIMD<T, DT, WT> vop;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2014-06-30 01:03:16 +08:00
|
|
|
int x = vop(src, dst, size.width, scale, shift);
|
|
|
|
|
2012-02-10 21:47:53 +08:00
|
|
|
#if CV_ENABLE_UNROLLED
|
2010-05-12 01:44:00 +08:00
|
|
|
for( ; x <= size.width - 4; x += 4 )
|
|
|
|
{
|
|
|
|
DT t0, t1;
|
2011-04-17 21:14:45 +08:00
|
|
|
t0 = saturate_cast<DT>(std::abs(src[x]*scale + shift));
|
|
|
|
t1 = saturate_cast<DT>(std::abs(src[x+1]*scale + shift));
|
2010-05-12 01:44:00 +08:00
|
|
|
dst[x] = t0; dst[x+1] = t1;
|
2011-04-17 21:14:45 +08:00
|
|
|
t0 = saturate_cast<DT>(std::abs(src[x+2]*scale + shift));
|
|
|
|
t1 = saturate_cast<DT>(std::abs(src[x+3]*scale + shift));
|
2010-05-12 01:44:00 +08:00
|
|
|
dst[x+2] = t0; dst[x+3] = t1;
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
#endif
|
2010-05-12 01:44:00 +08:00
|
|
|
for( ; x < size.width; x++ )
|
2011-04-17 21:14:45 +08:00
|
|
|
dst[x] = saturate_cast<DT>(std::abs(src[x]*scale + shift));
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
}
|
2012-02-10 14:05:04 +08:00
|
|
|
|
2014-09-28 18:48:28 +08:00
|
|
|
template <typename T, typename DT, typename WT>
|
|
|
|
struct cvtScale_SIMD
|
|
|
|
{
|
|
|
|
int operator () (const T *, DT *, int, WT, WT) const
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
#if CV_SSE2
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
// from uchar
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, ushort, float>
|
|
|
|
{
|
|
|
|
cvtScale_SIMD()
|
|
|
|
{
|
|
|
|
haveSSE = checkHardwareSupport(CV_CPU_SSE4_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!haveSSE)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool haveSSE;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_ps(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_ps(dst + x + 4, v_dst_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<uchar, double, double>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const uchar * src, double * dst, int width, double scale, double shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_src_s32 = _mm_unpacklo_epi16(v_src, v_zero);
|
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 2, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src_s32 = _mm_unpackhi_epi16(v_src, v_zero);
|
|
|
|
v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x + 4, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 6, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from schar
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, ushort, float>
|
|
|
|
{
|
|
|
|
cvtScale_SIMD()
|
|
|
|
{
|
|
|
|
haveSSE = checkHardwareSupport(CV_CPU_SSE4_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!haveSSE)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool haveSSE;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8);
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_ps(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_ps(dst + x + 4, v_dst_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<schar, double, double>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const schar * src, double * dst, int width, double scale, double shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_src = _mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x)));
|
|
|
|
v_src = _mm_srai_epi16(v_src, 8);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_src_s32 = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16);
|
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 2, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src_s32 = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16);
|
|
|
|
v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x + 4, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 6, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from ushort
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, ushort, float>
|
|
|
|
{
|
|
|
|
cvtScale_SIMD()
|
|
|
|
{
|
|
|
|
haveSSE = checkHardwareSupport(CV_CPU_SSE4_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!haveSSE)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool haveSSE;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_ps(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_ps(dst + x + 4, v_dst_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<ushort, double, double>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const ushort * src, double * dst, int width, double scale, double shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_src_s32 = _mm_unpacklo_epi16(v_src, v_zero);
|
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 2, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src_s32 = _mm_unpackhi_epi16(v_src, v_zero);
|
|
|
|
v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x + 4, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 6, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from short
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, ushort, float>
|
|
|
|
{
|
|
|
|
cvtScale_SIMD()
|
|
|
|
{
|
|
|
|
haveSSE = checkHardwareSupport(CV_CPU_SSE4_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int operator () (const short * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!haveSSE)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool haveSSE;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_ps(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_ps(dst + x + 4, v_dst_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<short, double, double>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const short * src, double * dst, int width, double scale, double shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_src_s32 = _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16);
|
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 2, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src_s32 = _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16);
|
|
|
|
v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src_s32), v_scale), v_shift);
|
|
|
|
v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(_mm_srli_si128(v_src_s32, 8)), v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x + 4, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 6, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from int
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_si128((__m128i const *)(src + x + 4));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_si128((__m128i const *)(src + x + 4));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, ushort, float>
|
|
|
|
{
|
|
|
|
cvtScale_SIMD()
|
|
|
|
{
|
|
|
|
haveSSE = checkHardwareSupport(CV_CPU_SSE4_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int operator () (const int * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!haveSSE)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_si128((__m128i const *)(src + x + 4));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool haveSSE;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_si128((__m128i const *)(src + x + 4));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<int, int, double>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const int * src, int * dst, int width, double scale, double shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
for ( ; x <= width - 4; x += 4)
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src = _mm_srli_si128(v_src, 8);
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 v_dst = _mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst_0)),
|
|
|
|
_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst_1)));
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_castps_si128(v_dst));
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<int, float, double>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const int * src, float * dst, int width, double scale, double shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
for ( ; x <= width - 4; x += 4)
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src = _mm_srli_si128(v_src, 8);
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
_mm_storeu_ps(dst + x, _mm_movelh_ps(_mm_cvtpd_ps(v_dst_0),
|
|
|
|
_mm_cvtpd_ps(v_dst_1)));
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<int, double, double>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const int * src, double * dst, int width, double scale, double shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
for ( ; x <= width - 4; x += 4)
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src = _mm_srli_si128(v_src, 8);
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtepi32_pd(v_src), v_scale), v_shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
_mm_storeu_pd(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 2, v_dst_1);
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from float
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_loadu_ps(src + x);
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_ps(src + x + 4);
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_loadu_ps(src + x);
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_ps(src + x + 4);
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, ushort, float>
|
|
|
|
{
|
|
|
|
cvtScale_SIMD()
|
|
|
|
{
|
|
|
|
haveSSE = checkHardwareSupport(CV_CPU_SSE4_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int operator () (const float * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!haveSSE)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_loadu_ps(src + x);
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_ps(src + x + 4);
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool haveSSE;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_loadu_ps(src + x);
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_ps(src + x + 4);
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_loadu_ps(src + x);
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_ps(src + x + 4);
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
for ( ; x <= width - 4; x += 4)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_loadu_ps(src + x);
|
|
|
|
__m128 v_dst = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
_mm_storeu_ps(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, double, double>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, double * dst, int width, double scale, double shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 4; x += 4)
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
__m128 v_src = _mm_loadu_ps(src + x);
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128d v_dst_0 = _mm_add_pd(_mm_mul_pd(_mm_cvtps_pd(v_src), v_scale), v_shift);
|
|
|
|
v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8));
|
|
|
|
__m128d v_dst_1 = _mm_add_pd(_mm_mul_pd(_mm_cvtps_pd(v_src), v_scale), v_shift);
|
|
|
|
|
|
|
|
_mm_storeu_pd(dst + x, v_dst_0);
|
|
|
|
_mm_storeu_pd(dst + x + 2, v_dst_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from double
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<double, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const double * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero));
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
2015-01-12 15:59:29 +08:00
|
|
|
struct cvtScale_SIMD<double, schar, float>
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
int operator () (const double * src, schar * dst, int width, float scale, float shift) const
|
2015-01-12 15:59:29 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_zero = _mm_setzero_si128();
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero));
|
|
|
|
}
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<double, ushort, float>
|
|
|
|
{
|
|
|
|
cvtScale_SIMD()
|
|
|
|
{
|
|
|
|
haveSSE = checkHardwareSupport(CV_CPU_SSE4_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int operator () (const double * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!haveSSE)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool haveSSE;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<double, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const double * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)));
|
|
|
|
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)),
|
|
|
|
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)));
|
|
|
|
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0),
|
|
|
|
_mm_cvtps_epi32(v_dst_1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<double, int, double>
|
|
|
|
{
|
|
|
|
int operator () (const double * src, int * dst, int width, double scale, double shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 4; x += 4)
|
|
|
|
{
|
|
|
|
__m128d v_src = _mm_loadu_pd(src + x);
|
|
|
|
__m128d v_dst0 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_pd(src + x + 2);
|
|
|
|
__m128d v_dst1 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128 v_dst = _mm_movelh_ps(_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst0)),
|
|
|
|
_mm_castsi128_ps(_mm_cvtpd_epi32(v_dst1)));
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_castps_si128(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<double, float, double>
|
|
|
|
{
|
|
|
|
int operator () (const double * src, float * dst, int width, double scale, double shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 4; x += 4)
|
|
|
|
{
|
|
|
|
__m128d v_src = _mm_loadu_pd(src + x);
|
|
|
|
__m128d v_dst0 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
v_src = _mm_loadu_pd(src + x + 2);
|
|
|
|
__m128d v_dst1 = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift);
|
|
|
|
|
|
|
|
__m128 v_dst = _mm_movelh_ps(_mm_cvtpd_ps(v_dst0),
|
|
|
|
_mm_cvtpd_ps(v_dst1));
|
|
|
|
|
|
|
|
_mm_storeu_ps(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<double, double, double>
|
|
|
|
{
|
|
|
|
int operator () (const double * src, double * dst, int width, double scale, double shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
|
|
|
__m128d v_scale = _mm_set1_pd(scale), v_shift = _mm_set1_pd(shift);
|
|
|
|
|
|
|
|
for ( ; x <= width - 2; x += 2)
|
|
|
|
{
|
|
|
|
__m128d v_src = _mm_loadu_pd(src + x);
|
|
|
|
__m128d v_dst = _mm_add_pd(_mm_mul_pd(v_src, v_scale), v_shift);
|
|
|
|
_mm_storeu_pd(dst + x, v_dst);
|
2015-01-12 15:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#elif CV_NEON
|
2014-09-28 18:48:28 +08:00
|
|
|
|
|
|
|
// from uchar
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, ushort, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_u16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_s16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
|
|
|
|
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
|
2014-09-28 18:48:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<uchar, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const uchar * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift));
|
|
|
|
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from schar
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, ushort, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_u16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_s16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
|
|
|
|
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
|
2014-09-28 18:48:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<schar, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const schar * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift));
|
|
|
|
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from ushort
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, ushort, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_u16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_s16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
|
|
|
|
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
|
2014-09-28 18:48:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<ushort, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const ushort * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift));
|
|
|
|
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from short
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, ushort, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_u16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<short, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const short * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift));
|
|
|
|
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from int
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, ushort, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_u16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<int, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const int * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_s16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from float
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, uchar, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, uchar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, schar, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, schar * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, ushort, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, ushort * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
|
|
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_u16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, short, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, short * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
|
|
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
|
|
|
|
2014-09-29 05:51:30 +08:00
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
2014-09-28 18:48:28 +08:00
|
|
|
vst1q_s16(dst + x, v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, int, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, int * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 4; x += 4)
|
2014-09-29 05:51:30 +08:00
|
|
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift)));
|
2014-09-28 18:48:28 +08:00
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct cvtScale_SIMD<float, float, float>
|
|
|
|
{
|
|
|
|
int operator () (const float * src, float * dst, int width, float scale, float shift) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
|
|
|
|
|
|
|
for ( ; x <= width - 4; x += 4)
|
|
|
|
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift));
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
template<typename T, typename DT, typename WT> static void
|
|
|
|
cvtScale_( const T* src, size_t sstep,
|
|
|
|
DT* dst, size_t dstep, Size size,
|
|
|
|
WT scale, WT shift )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2014-09-28 18:48:28 +08:00
|
|
|
cvtScale_SIMD<T, DT, WT> vop;
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2014-09-28 18:48:28 +08:00
|
|
|
int x = vop(src, dst, size.width, scale, shift);
|
2014-09-23 22:03:07 +08:00
|
|
|
|
2012-02-10 21:47:53 +08:00
|
|
|
#if CV_ENABLE_UNROLLED
|
2010-05-12 01:44:00 +08:00
|
|
|
for( ; x <= size.width - 4; x += 4 )
|
|
|
|
{
|
|
|
|
DT t0, t1;
|
2011-04-17 21:14:45 +08:00
|
|
|
t0 = saturate_cast<DT>(src[x]*scale + shift);
|
|
|
|
t1 = saturate_cast<DT>(src[x+1]*scale + shift);
|
2010-05-12 01:44:00 +08:00
|
|
|
dst[x] = t0; dst[x+1] = t1;
|
2011-04-17 21:14:45 +08:00
|
|
|
t0 = saturate_cast<DT>(src[x+2]*scale + shift);
|
|
|
|
t1 = saturate_cast<DT>(src[x+3]*scale + shift);
|
2010-05-12 01:44:00 +08:00
|
|
|
dst[x+2] = t0; dst[x+3] = t1;
|
|
|
|
}
|
2012-02-10 21:47:53 +08:00
|
|
|
#endif
|
2010-05-12 01:44:00 +08:00
|
|
|
|
|
|
|
for( ; x < size.width; x++ )
|
2011-04-17 21:14:45 +08:00
|
|
|
dst[x] = saturate_cast<DT>(src[x]*scale + shift);
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-16 05:15:51 +08:00
|
|
|
//vz optimized template specialization
|
2011-12-16 12:42:34 +08:00
|
|
|
template<> void
|
2011-12-16 05:15:51 +08:00
|
|
|
cvtScale_<short, short, float>( const short* src, size_t sstep,
|
|
|
|
short* dst, size_t dstep, Size size,
|
2012-06-09 23:00:04 +08:00
|
|
|
float scale, float shift )
|
2011-12-16 05:15:51 +08:00
|
|
|
{
|
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
|
|
|
|
2012-06-09 23:00:04 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2011-12-16 05:15:51 +08:00
|
|
|
{
|
|
|
|
int x = 0;
|
2012-06-09 23:00:04 +08:00
|
|
|
#if CV_SSE2
|
|
|
|
if(USE_SSE2)
|
2011-12-16 05:15:51 +08:00
|
|
|
{
|
2011-12-27 23:56:17 +08:00
|
|
|
__m128 scale128 = _mm_set1_ps (scale);
|
|
|
|
__m128 shift128 = _mm_set1_ps (shift);
|
2012-06-09 23:00:04 +08:00
|
|
|
for(; x <= size.width - 8; x += 8 )
|
|
|
|
{
|
|
|
|
__m128i r0 = _mm_loadl_epi64((const __m128i*)(src + x));
|
2011-12-16 05:15:51 +08:00
|
|
|
__m128i r1 = _mm_loadl_epi64((const __m128i*)(src + x + 4));
|
2012-06-09 23:00:04 +08:00
|
|
|
__m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16));
|
2011-12-16 05:15:51 +08:00
|
|
|
__m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r1, r1), 16));
|
|
|
|
rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128);
|
|
|
|
rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128);
|
2012-06-09 23:00:04 +08:00
|
|
|
r0 = _mm_cvtps_epi32(rf0);
|
|
|
|
r1 = _mm_cvtps_epi32(rf1);
|
|
|
|
r0 = _mm_packs_epi32(r0, r1);
|
|
|
|
_mm_storeu_si128((__m128i*)(dst + x), r0);
|
|
|
|
}
|
|
|
|
}
|
2014-09-23 03:39:34 +08:00
|
|
|
#elif CV_NEON
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
for(; x <= size.width - 8; x += 8 )
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
float32x4_t v_tmp1 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src)));
|
|
|
|
float32x4_t v_tmp2 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src)));
|
|
|
|
|
|
|
|
v_tmp1 = vaddq_f32(vmulq_n_f32(v_tmp1, scale), v_shift);
|
|
|
|
v_tmp2 = vaddq_f32(vmulq_n_f32(v_tmp2, scale), v_shift);
|
|
|
|
|
|
|
|
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_tmp1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_tmp2))));
|
|
|
|
}
|
2011-12-16 05:15:51 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
for(; x < size.width; x++ )
|
|
|
|
dst[x] = saturate_cast<short>(src[x]*scale + shift);
|
2012-06-09 23:00:04 +08:00
|
|
|
}
|
2011-12-16 05:15:51 +08:00
|
|
|
}
|
|
|
|
|
2012-07-24 21:24:31 +08:00
|
|
|
template<> void
|
|
|
|
cvtScale_<short, int, float>( const short* src, size_t sstep,
|
|
|
|
int* dst, size_t dstep, Size size,
|
|
|
|
float scale, float shift )
|
|
|
|
{
|
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
|
|
|
|
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
2015-01-12 15:59:28 +08:00
|
|
|
#if CV_AVX2
|
|
|
|
if (USE_AVX2)
|
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
__m256 scale256 = _mm256_set1_ps(scale);
|
|
|
|
__m256 shift256 = _mm256_set1_ps(shift);
|
2015-01-12 15:59:30 +08:00
|
|
|
const int shuffle = 0xD8;
|
2015-01-12 15:59:29 +08:00
|
|
|
|
2015-01-12 15:59:28 +08:00
|
|
|
for ( ; x <= size.width - 16; x += 16)
|
2012-07-24 21:24:31 +08:00
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
__m256i v_src = _mm256_loadu_si256((const __m256i *)(src + x));
|
|
|
|
v_src = _mm256_permute4x64_epi64(v_src, shuffle);
|
|
|
|
__m256i v_src_lo = _mm256_srai_epi32(_mm256_unpacklo_epi16(v_src, v_src), 16);
|
|
|
|
__m256i v_src_hi = _mm256_srai_epi32(_mm256_unpackhi_epi16(v_src, v_src), 16);
|
|
|
|
__m256 v_dst0 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_lo), scale256), shift256);
|
|
|
|
__m256 v_dst1 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_hi), scale256), shift256);
|
|
|
|
_mm256_storeu_si256((__m256i *)(dst + x), _mm256_cvtps_epi32(v_dst0));
|
|
|
|
_mm256_storeu_si256((__m256i *)(dst + x + 8), _mm256_cvtps_epi32(v_dst1));
|
2015-01-12 15:59:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if CV_SSE2
|
|
|
|
if (USE_SSE2)//~5X
|
|
|
|
{
|
|
|
|
__m128 scale128 = _mm_set1_ps (scale);
|
|
|
|
__m128 shift128 = _mm_set1_ps (shift);
|
|
|
|
for(; x <= size.width - 8; x += 8 )
|
|
|
|
{
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128i r0 = _mm_loadu_si128((const __m128i*)(src + x));
|
|
|
|
|
2015-01-12 15:59:28 +08:00
|
|
|
__m128 rf0 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(r0, r0), 16));
|
2015-01-12 15:59:29 +08:00
|
|
|
__m128 rf1 =_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(r0, r0), 16));
|
2015-01-12 15:59:28 +08:00
|
|
|
rf0 = _mm_add_ps(_mm_mul_ps(rf0, scale128), shift128);
|
|
|
|
rf1 = _mm_add_ps(_mm_mul_ps(rf1, scale128), shift128);
|
|
|
|
|
2015-01-12 15:59:29 +08:00
|
|
|
_mm_storeu_si128((__m128i*)(dst + x), _mm_cvtps_epi32(rf0));
|
|
|
|
_mm_storeu_si128((__m128i*)(dst + x + 4), _mm_cvtps_epi32(rf1));
|
2012-07-24 21:24:31 +08:00
|
|
|
}
|
2015-01-12 15:59:28 +08:00
|
|
|
}
|
2014-09-23 03:39:34 +08:00
|
|
|
#elif CV_NEON
|
|
|
|
float32x4_t v_shift = vdupq_n_f32(shift);
|
|
|
|
for(; x <= size.width - 8; x += 8 )
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
float32x4_t v_tmp1 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src)));
|
|
|
|
float32x4_t v_tmp2 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src)));
|
|
|
|
|
|
|
|
v_tmp1 = vaddq_f32(vmulq_n_f32(v_tmp1, scale), v_shift);
|
|
|
|
v_tmp2 = vaddq_f32(vmulq_n_f32(v_tmp2, scale), v_shift);
|
|
|
|
|
|
|
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_tmp1));
|
|
|
|
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_tmp2));
|
|
|
|
}
|
2012-07-24 21:24:31 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
for(; x < size.width; x++ )
|
|
|
|
dst[x] = saturate_cast<int>(src[x]*scale + shift);
|
|
|
|
}
|
|
|
|
}
|
2011-04-17 21:14:45 +08:00
|
|
|
|
2014-09-23 22:03:07 +08:00
|
|
|
template <typename T, typename DT>
|
|
|
|
struct Cvt_SIMD
|
|
|
|
{
|
|
|
|
int operator() (const T *, DT *, int) const
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
#if CV_SSE2
|
|
|
|
|
|
|
|
// from double
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<double, uchar>
|
|
|
|
{
|
|
|
|
int operator() (const double * src, uchar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
2015-01-12 15:59:31 +08:00
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x));
|
|
|
|
__m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2));
|
|
|
|
__m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4));
|
|
|
|
__m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6));
|
|
|
|
|
|
|
|
v_src0 = _mm_movelh_ps(v_src0, v_src1);
|
|
|
|
v_src1 = _mm_movelh_ps(v_src2, v_src3);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0),
|
|
|
|
_mm_cvtps_epi32(v_src1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<double, schar>
|
|
|
|
{
|
|
|
|
int operator() (const double * src, schar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
2015-01-12 15:59:31 +08:00
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x));
|
|
|
|
__m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2));
|
|
|
|
__m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4));
|
|
|
|
__m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6));
|
|
|
|
|
|
|
|
v_src0 = _mm_movelh_ps(v_src0, v_src1);
|
|
|
|
v_src1 = _mm_movelh_ps(v_src2, v_src3);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0),
|
|
|
|
_mm_cvtps_epi32(v_src1));
|
|
|
|
_mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<double, ushort>
|
|
|
|
{
|
2015-01-12 15:59:31 +08:00
|
|
|
bool haveSIMD;
|
|
|
|
Cvt_SIMD() { haveSIMD = checkHardwareSupport(CV_CPU_SSE4_1); }
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
int operator() (const double * src, ushort * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
2015-01-12 15:59:31 +08:00
|
|
|
if (!haveSIMD)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x));
|
|
|
|
__m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2));
|
|
|
|
__m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4));
|
|
|
|
__m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6));
|
|
|
|
|
|
|
|
v_src0 = _mm_movelh_ps(v_src0, v_src1);
|
|
|
|
v_src1 = _mm_movelh_ps(v_src2, v_src3);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_src0),
|
|
|
|
_mm_cvtps_epi32(v_src1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif // CV_SSE4_1
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<double, short>
|
|
|
|
{
|
|
|
|
int operator() (const double * src, short * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
2015-01-12 15:59:31 +08:00
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
__m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x));
|
|
|
|
__m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2));
|
|
|
|
__m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4));
|
|
|
|
__m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6));
|
|
|
|
|
|
|
|
v_src0 = _mm_movelh_ps(v_src0, v_src1);
|
|
|
|
v_src1 = _mm_movelh_ps(v_src2, v_src3);
|
|
|
|
|
|
|
|
__m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_src0),
|
|
|
|
_mm_cvtps_epi32(v_src1));
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<double, int>
|
|
|
|
{
|
|
|
|
int operator() (const double * src, int * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
2015-01-12 15:59:31 +08:00
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
for ( ; x <= width - 4; x += 4)
|
|
|
|
{
|
|
|
|
__m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x));
|
|
|
|
__m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2));
|
|
|
|
v_src0 = _mm_movelh_ps(v_src0, v_src1);
|
|
|
|
|
|
|
|
_mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_src0));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<double, float>
|
|
|
|
{
|
|
|
|
int operator() (const double * src, float * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
2015-01-12 15:59:31 +08:00
|
|
|
if (!USE_SSE2)
|
|
|
|
return x;
|
|
|
|
|
2015-01-12 15:59:30 +08:00
|
|
|
for ( ; x <= width - 4; x += 4)
|
|
|
|
{
|
|
|
|
__m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x));
|
|
|
|
__m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2));
|
|
|
|
|
|
|
|
_mm_storeu_ps(dst + x, _mm_movelh_ps(v_src0, v_src1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#elif CV_NEON
|
2014-09-23 22:03:07 +08:00
|
|
|
|
|
|
|
// from uchar
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<uchar, schar>
|
|
|
|
{
|
|
|
|
int operator() (const uchar * src, schar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
vst1_s8(dst + x, vqmovn_s16(vreinterpretq_s16_u16(vmovl_u8(vld1_u8(src + x)))));
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<uchar, ushort>
|
|
|
|
{
|
|
|
|
int operator() (const uchar * src, ushort * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
vst1q_u16(dst + x, vmovl_u8(vld1_u8(src + x)));
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<uchar, short>
|
|
|
|
{
|
|
|
|
int operator() (const uchar * src, short * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
vst1q_s16(dst + x, vreinterpretq_s16_u16(vmovl_u8(vld1_u8(src + x))));
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<uchar, int>
|
|
|
|
{
|
|
|
|
int operator() (const uchar * src, int * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
vst1q_s32(dst + x, vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src))));
|
|
|
|
vst1q_s32(dst + x + 4, vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<uchar, float>
|
|
|
|
{
|
|
|
|
int operator() (const uchar * src, float * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
|
|
|
|
vst1q_f32(dst + x, vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))));
|
|
|
|
vst1q_f32(dst + x + 4, vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from schar
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<schar, uchar>
|
|
|
|
{
|
|
|
|
int operator() (const schar * src, uchar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
vst1_u8(dst + x, vqmovun_s16(vmovl_s8(vld1_s8(src + x))));
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<schar, short>
|
|
|
|
{
|
|
|
|
int operator() (const schar * src, short * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
vst1q_s16(dst + x, vmovl_s8(vld1_s8(src + x)));
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-10-10 19:05:19 +08:00
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<schar, ushort>
|
|
|
|
{
|
|
|
|
int operator() (const schar * src, ushort * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(vmovl_s16(vget_low_s16(v_src))),
|
|
|
|
vqmovun_s32(vmovl_s16(vget_high_s16(v_src)))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2014-09-23 22:03:07 +08:00
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<schar, int>
|
|
|
|
{
|
|
|
|
int operator() (const schar * src, int * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
vst1q_s32(dst + x, vmovl_s16(vget_low_s16(v_src)));
|
|
|
|
vst1q_s32(dst + x + 4, vmovl_s16(vget_high_s16(v_src)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<schar, float>
|
|
|
|
{
|
|
|
|
int operator() (const schar * src, float * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
|
|
|
|
vst1q_f32(dst + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))));
|
|
|
|
vst1q_f32(dst + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from ushort
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<ushort, uchar>
|
|
|
|
{
|
|
|
|
int operator() (const ushort * src, uchar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src1 = vld1q_u16(src + x), v_src2 = vld1q_u16(src + x + 8);
|
|
|
|
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(v_src1), vqmovn_u16(v_src2)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-10-10 19:05:19 +08:00
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<ushort, schar>
|
|
|
|
{
|
|
|
|
int operator() (const ushort * src, schar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src1 = vld1q_u16(src + x), v_src2 = vld1q_u16(src + x + 8);
|
|
|
|
int32x4_t v_dst10 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src1)));
|
|
|
|
int32x4_t v_dst11 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src1)));
|
|
|
|
int32x4_t v_dst20 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src2)));
|
|
|
|
int32x4_t v_dst21 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src2)));
|
|
|
|
|
|
|
|
vst1q_s8(dst + x, vcombine_s8(vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst10), vqmovn_s32(v_dst11))),
|
|
|
|
vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst20), vqmovn_s32(v_dst21)))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<ushort, short>
|
|
|
|
{
|
|
|
|
int operator() (const ushort * src, short * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
int32x4_t v_dst0 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src)));
|
|
|
|
int32x4_t v_dst1 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src)));
|
|
|
|
|
|
|
|
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_dst0), vqmovn_s32(v_dst1)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-09-23 22:03:07 +08:00
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<ushort, int>
|
|
|
|
{
|
|
|
|
int operator() (const ushort * src, int * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
vst1q_s32(dst + x, vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src))));
|
|
|
|
vst1q_s32(dst + x + 4, vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<ushort, float>
|
|
|
|
{
|
|
|
|
int operator() (const ushort * src, float * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
uint16x8_t v_src = vld1q_u16(src + x);
|
|
|
|
vst1q_f32(dst + x, vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))));
|
|
|
|
vst1q_f32(dst + x + 4, vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from short
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<short, uchar>
|
|
|
|
{
|
|
|
|
int operator() (const short * src, uchar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
int16x8_t v_src1 = vld1q_s16(src + x), v_src2 = vld1q_s16(src + x + 8);
|
|
|
|
vst1q_u8(dst + x, vcombine_u8(vqmovun_s16(v_src1), vqmovun_s16(v_src2)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<short, schar>
|
|
|
|
{
|
|
|
|
int operator() (const short * src, schar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
int16x8_t v_src1 = vld1q_s16(src + x), v_src2 = vld1q_s16(src + x + 8);
|
|
|
|
vst1q_s8(dst + x, vcombine_s8(vqmovn_s16(v_src1), vqmovn_s16(v_src2)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<short, ushort>
|
|
|
|
{
|
|
|
|
int operator() (const short * src, ushort * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
uint16x4_t v_dst1 = vqmovun_s32(vmovl_s16(vget_low_s16(v_src)));
|
|
|
|
uint16x4_t v_dst2 = vqmovun_s32(vmovl_s16(vget_high_s16(v_src)));
|
|
|
|
vst1q_u16(dst + x, vcombine_u16(v_dst1, v_dst2));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<short, int>
|
|
|
|
{
|
|
|
|
int operator() (const short * src, int * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
vst1q_s32(dst + x, vmovl_s16(vget_low_s16(v_src)));
|
|
|
|
vst1q_s32(dst + x + 4, vmovl_s16(vget_high_s16(v_src)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<short, float>
|
|
|
|
{
|
|
|
|
int operator() (const short * src, float * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int16x8_t v_src = vld1q_s16(src + x);
|
|
|
|
vst1q_f32(dst + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))));
|
|
|
|
vst1q_f32(dst + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from int
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<int, uchar>
|
|
|
|
{
|
|
|
|
int operator() (const int * src, uchar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
|
|
|
|
int32x4_t v_src3 = vld1q_s32(src + x + 8), v_src4 = vld1q_s32(src + x + 12);
|
|
|
|
uint8x8_t v_dst1 = vqmovn_u16(vcombine_u16(vqmovun_s32(v_src1), vqmovun_s32(v_src2)));
|
|
|
|
uint8x8_t v_dst2 = vqmovn_u16(vcombine_u16(vqmovun_s32(v_src3), vqmovun_s32(v_src4)));
|
|
|
|
vst1q_u8(dst + x, vcombine_u8(v_dst1, v_dst2));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<int, schar>
|
|
|
|
{
|
|
|
|
int operator() (const int * src, schar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
|
|
|
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
|
|
|
|
int32x4_t v_src3 = vld1q_s32(src + x + 8), v_src4 = vld1q_s32(src + x + 12);
|
|
|
|
int8x8_t v_dst1 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src1), vqmovn_s32(v_src2)));
|
|
|
|
int8x8_t v_dst2 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src3), vqmovn_s32(v_src4)));
|
|
|
|
vst1q_s8(dst + x, vcombine_s8(v_dst1, v_dst2));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<int, ushort>
|
|
|
|
{
|
|
|
|
int operator() (const int * src, ushort * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
|
|
|
|
vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(v_src1), vqmovun_s32(v_src2)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<int, short>
|
|
|
|
{
|
|
|
|
int operator() (const int * src, short * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
|
|
|
int32x4_t v_src1 = vld1q_s32(src + x), v_src2 = vld1q_s32(src + x + 4);
|
|
|
|
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_src1), vqmovn_s32(v_src2)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<int, float>
|
|
|
|
{
|
|
|
|
int operator() (const int * src, float * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 4; x += 4)
|
|
|
|
vst1q_f32(dst + x, vcvtq_f32_s32(vld1q_s32(src + x)));
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// from float
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<float, uchar>
|
|
|
|
{
|
|
|
|
int operator() (const float * src, uchar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
2014-09-25 15:50:06 +08:00
|
|
|
uint32x4_t v_src1 = cv_vrndq_u32_f32(vld1q_f32(src + x));
|
|
|
|
uint32x4_t v_src2 = cv_vrndq_u32_f32(vld1q_f32(src + x + 4));
|
|
|
|
uint32x4_t v_src3 = cv_vrndq_u32_f32(vld1q_f32(src + x + 8));
|
|
|
|
uint32x4_t v_src4 = cv_vrndq_u32_f32(vld1q_f32(src + x + 12));
|
|
|
|
uint8x8_t v_dst1 = vqmovn_u16(vcombine_u16(vqmovn_u32(v_src1), vqmovn_u32(v_src2)));
|
|
|
|
uint8x8_t v_dst2 = vqmovn_u16(vcombine_u16(vqmovn_u32(v_src3), vqmovn_u32(v_src4)));
|
2014-09-23 22:03:07 +08:00
|
|
|
vst1q_u8(dst + x, vcombine_u8(v_dst1, v_dst2));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<float, schar>
|
|
|
|
{
|
|
|
|
int operator() (const float * src, schar * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 16; x += 16)
|
|
|
|
{
|
2014-09-25 15:50:06 +08:00
|
|
|
int32x4_t v_src1 = cv_vrndq_s32_f32(vld1q_f32(src + x));
|
|
|
|
int32x4_t v_src2 = cv_vrndq_s32_f32(vld1q_f32(src + x + 4));
|
|
|
|
int32x4_t v_src3 = cv_vrndq_s32_f32(vld1q_f32(src + x + 8));
|
|
|
|
int32x4_t v_src4 = cv_vrndq_s32_f32(vld1q_f32(src + x + 12));
|
2014-09-23 22:03:07 +08:00
|
|
|
int8x8_t v_dst1 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src1), vqmovn_s32(v_src2)));
|
|
|
|
int8x8_t v_dst2 = vqmovn_s16(vcombine_s16(vqmovn_s32(v_src3), vqmovn_s32(v_src4)));
|
|
|
|
vst1q_s8(dst + x, vcombine_s8(v_dst1, v_dst2));
|
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<float, ushort>
|
|
|
|
{
|
|
|
|
int operator() (const float * src, ushort * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 8; x += 8)
|
|
|
|
{
|
2014-09-25 15:50:06 +08:00
|
|
|
uint32x4_t v_src1 = cv_vrndq_u32_f32(vld1q_f32(src + x));
|
|
|
|
uint32x4_t v_src2 = cv_vrndq_u32_f32(vld1q_f32(src + x + 4));
|
|
|
|
vst1q_u16(dst + x, vcombine_u16(vqmovn_u32(v_src1), vqmovn_u32(v_src2)));
|
2014-09-23 22:03:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct Cvt_SIMD<float, int>
|
|
|
|
{
|
|
|
|
int operator() (const float * src, int * dst, int width) const
|
|
|
|
{
|
|
|
|
int x = 0;
|
|
|
|
|
|
|
|
for ( ; x <= width - 4; x += 4)
|
2014-09-25 15:50:06 +08:00
|
|
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(vld1q_f32(src + x)));
|
2014-09-23 22:03:07 +08:00
|
|
|
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2016-06-07 17:32:47 +08:00
|
|
|
#if !( ( defined (__arm__) || defined (__aarch64__) ) && ( defined (__GNUC__) && ( ( ( 4 <= __GNUC__ ) && ( 7 <= __GNUC__ ) ) || ( 5 <= __GNUC__ ) ) ) )
|
2016-05-21 20:31:33 +08:00
|
|
|
// const numbers for floating points format
|
|
|
|
const unsigned int kShiftSignificand = 13;
|
|
|
|
const unsigned int kMaskFp16Significand = 0x3ff;
|
|
|
|
const unsigned int kBiasFp16Exponent = 15;
|
|
|
|
const unsigned int kBiasFp32Exponent = 127;
|
|
|
|
#endif
|
|
|
|
|
2016-06-07 17:32:47 +08:00
|
|
|
#if ( defined (__arm__) || defined (__aarch64__) ) && ( defined (__GNUC__) && ( ( ( 4 <= __GNUC__ ) && ( 7 <= __GNUC__ ) ) || ( 5 <= __GNUC__ ) ) )
|
2016-05-21 20:31:33 +08:00
|
|
|
static float convertFp16SW(short fp16)
|
|
|
|
{
|
|
|
|
// Fp16 -> Fp32
|
2016-08-24 17:32:13 +08:00
|
|
|
Cv16suf a;
|
2016-05-21 20:31:33 +08:00
|
|
|
a.i = fp16;
|
|
|
|
return (float)a.h;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static float convertFp16SW(short fp16)
|
|
|
|
{
|
|
|
|
// Fp16 -> Fp32
|
2016-08-24 17:32:13 +08:00
|
|
|
Cv16suf b;
|
2016-05-21 20:31:33 +08:00
|
|
|
b.i = fp16;
|
|
|
|
int exponent = b.fmt.exponent - kBiasFp16Exponent;
|
|
|
|
int significand = b.fmt.significand;
|
|
|
|
|
2016-08-24 17:32:13 +08:00
|
|
|
Cv32suf a;
|
2016-05-21 20:31:33 +08:00
|
|
|
a.i = 0;
|
|
|
|
a.fmt.sign = b.fmt.sign; // sign bit
|
|
|
|
if( exponent == 16 )
|
|
|
|
{
|
|
|
|
// Inf or NaN
|
|
|
|
a.i = a.i | 0x7F800000;
|
|
|
|
if( significand != 0 )
|
|
|
|
{
|
|
|
|
// NaN
|
|
|
|
#if defined(__x86_64__) || defined(_M_X64)
|
|
|
|
// 64bit
|
|
|
|
a.i = a.i | 0x7FC00000;
|
|
|
|
#endif
|
|
|
|
a.fmt.significand = a.fmt.significand | (significand << kShiftSignificand);
|
|
|
|
}
|
|
|
|
return a.f;
|
|
|
|
}
|
|
|
|
else if ( exponent == -15 )
|
|
|
|
{
|
|
|
|
// subnormal in Fp16
|
|
|
|
if( significand == 0 )
|
|
|
|
{
|
|
|
|
// zero
|
|
|
|
return a.f;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
int shift = -1;
|
|
|
|
while( ( significand & 0x400 ) == 0 )
|
|
|
|
{
|
|
|
|
significand = significand << 1;
|
|
|
|
shift++;
|
|
|
|
}
|
|
|
|
significand = significand & kMaskFp16Significand;
|
|
|
|
exponent -= shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
a.fmt.exponent = (exponent+kBiasFp32Exponent);
|
|
|
|
a.fmt.significand = significand << kShiftSignificand;
|
|
|
|
return a.f;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-06-07 17:32:47 +08:00
|
|
|
#if ( defined (__arm__) || defined (__aarch64__) ) && ( defined (__GNUC__) && ( ( ( 4 <= __GNUC__ ) && ( 7 <= __GNUC__ ) ) || ( 5 <= __GNUC__ ) ) )
|
2016-05-21 20:31:33 +08:00
|
|
|
static short convertFp16SW(float fp32)
|
|
|
|
{
|
|
|
|
// Fp32 -> Fp16
|
2016-08-24 17:32:13 +08:00
|
|
|
Cv16suf a;
|
2016-05-21 20:31:33 +08:00
|
|
|
a.h = (__fp16)fp32;
|
|
|
|
return a.i;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static short convertFp16SW(float fp32)
|
|
|
|
{
|
|
|
|
// Fp32 -> Fp16
|
2016-08-24 17:32:13 +08:00
|
|
|
Cv32suf a;
|
2016-05-21 20:31:33 +08:00
|
|
|
a.f = fp32;
|
|
|
|
int exponent = a.fmt.exponent - kBiasFp32Exponent;
|
|
|
|
int significand = a.fmt.significand;
|
|
|
|
|
2016-08-24 17:32:13 +08:00
|
|
|
Cv16suf result;
|
2016-05-21 20:31:33 +08:00
|
|
|
result.i = 0;
|
2016-06-07 07:59:28 +08:00
|
|
|
unsigned int absolute = a.i & 0x7fffffff;
|
|
|
|
if( 0x477ff000 <= absolute )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
|
|
|
// Inf in Fp16
|
|
|
|
result.i = result.i | 0x7C00;
|
|
|
|
if( exponent == 128 && significand != 0 )
|
|
|
|
{
|
|
|
|
// NaN
|
2016-06-07 07:59:28 +08:00
|
|
|
result.i = (short)( result.i | 0x200 | ( significand >> kShiftSignificand ) );
|
2016-05-21 20:31:33 +08:00
|
|
|
}
|
|
|
|
}
|
2016-06-07 07:59:28 +08:00
|
|
|
else if ( absolute < 0x33000001 )
|
|
|
|
{
|
|
|
|
// too small for fp16
|
|
|
|
result.i = 0;
|
|
|
|
}
|
|
|
|
else if ( absolute < 0x33c00000 )
|
|
|
|
{
|
|
|
|
result.i = 1;
|
|
|
|
}
|
|
|
|
else if ( absolute < 0x34200001 )
|
|
|
|
{
|
|
|
|
result.i = 2;
|
|
|
|
}
|
|
|
|
else if ( absolute < 0x387fe000 )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
|
|
|
// subnormal in Fp16
|
|
|
|
int fp16Significand = significand | 0x800000;
|
|
|
|
int bitShift = (-exponent) - 1;
|
|
|
|
fp16Significand = fp16Significand >> bitShift;
|
|
|
|
|
|
|
|
// special cases to round up
|
2016-06-07 07:59:28 +08:00
|
|
|
bitShift = exponent + 24;
|
2016-06-07 17:31:18 +08:00
|
|
|
int threshold = ( ( 0x400000 >> bitShift ) | ( ( ( significand & ( 0x800000 >> bitShift ) ) >> ( 126 - a.fmt.exponent ) ) ^ 1 ) );
|
2016-06-07 07:59:28 +08:00
|
|
|
if( threshold <= ( significand & ( 0xffffff >> ( exponent + 25 ) ) ) )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
|
|
|
fp16Significand++;
|
|
|
|
}
|
|
|
|
result.i = (short)fp16Significand;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// usual situation
|
|
|
|
// exponent
|
2016-06-07 07:59:28 +08:00
|
|
|
result.fmt.exponent = ( exponent + kBiasFp16Exponent );
|
2016-05-21 20:31:33 +08:00
|
|
|
|
|
|
|
// significand;
|
|
|
|
short fp16Significand = (short)(significand >> kShiftSignificand);
|
|
|
|
result.fmt.significand = fp16Significand;
|
|
|
|
|
|
|
|
// special cases to round up
|
|
|
|
short lsb10bitsFp32 = (significand & 0x1fff);
|
|
|
|
short threshold = 0x1000 + ( ( fp16Significand & 0x1 ) ? 0 : 1 );
|
|
|
|
if( threshold <= lsb10bitsFp32 )
|
|
|
|
{
|
|
|
|
result.i++;
|
|
|
|
}
|
|
|
|
else if ( fp16Significand == 0x3ff && exponent == -15)
|
|
|
|
{
|
|
|
|
result.i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sign bit
|
|
|
|
result.fmt.sign = a.fmt.sign;
|
|
|
|
return result.i;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// template for FP16 HW conversion function
|
|
|
|
template<typename T, typename DT> static void
|
2016-08-03 15:53:52 +08:00
|
|
|
cvtScaleHalf_( const T* src, size_t sstep, DT* dst, size_t dstep, Size size);
|
2016-05-21 20:31:33 +08:00
|
|
|
|
|
|
|
template<> void
|
2016-06-09 07:41:37 +08:00
|
|
|
cvtScaleHalf_<float, short>( const float* src, size_t sstep, short* dst, size_t dstep, Size size)
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
|
|
|
|
2016-07-29 06:00:10 +08:00
|
|
|
if( checkHardwareSupport(CV_CPU_FP16) )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
int x = 0;
|
|
|
|
|
2016-08-03 15:53:52 +08:00
|
|
|
#if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(i386)
|
|
|
|
if ( ( (intptr_t)dst & 0xf ) == 0 )
|
|
|
|
#endif
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
#if CV_FP16
|
|
|
|
for ( ; x <= size.width - 4; x += 4)
|
|
|
|
{
|
2016-09-02 20:57:46 +08:00
|
|
|
v_float32x4 v_src = v_load(src + x);
|
2016-05-21 20:31:33 +08:00
|
|
|
|
2016-09-02 20:57:46 +08:00
|
|
|
v_float16x4 v_dst = v_cvt_f16(v_src);
|
2016-05-21 20:31:33 +08:00
|
|
|
|
2016-09-02 20:57:46 +08:00
|
|
|
v_store_f16(dst + x, v_dst);
|
2016-06-09 07:41:37 +08:00
|
|
|
}
|
2016-05-21 20:31:33 +08:00
|
|
|
#endif
|
2016-06-09 07:41:37 +08:00
|
|
|
}
|
|
|
|
for ( ; x < size.width; x++ )
|
|
|
|
{
|
|
|
|
dst[x] = convertFp16SW(src[x]);
|
|
|
|
}
|
2016-05-21 20:31:33 +08:00
|
|
|
}
|
2016-06-09 07:41:37 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
int x = 0;
|
|
|
|
for ( ; x < size.width; x++ )
|
|
|
|
{
|
|
|
|
dst[x] = convertFp16SW(src[x]);
|
|
|
|
}
|
2016-05-21 20:31:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<> void
|
2016-06-09 07:41:37 +08:00
|
|
|
cvtScaleHalf_<short, float>( const short* src, size_t sstep, float* dst, size_t dstep, Size size)
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
|
|
|
|
2016-07-29 06:00:10 +08:00
|
|
|
if( checkHardwareSupport(CV_CPU_FP16) )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
int x = 0;
|
|
|
|
|
2016-08-03 15:53:52 +08:00
|
|
|
#if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(i386)
|
|
|
|
if ( ( (intptr_t)src & 0xf ) == 0 )
|
|
|
|
#endif
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
#if CV_FP16
|
|
|
|
for ( ; x <= size.width - 4; x += 4)
|
|
|
|
{
|
2016-09-02 20:57:46 +08:00
|
|
|
v_float16x4 v_src = v_load_f16(src + x);
|
2016-05-21 20:31:33 +08:00
|
|
|
|
2016-09-02 20:57:46 +08:00
|
|
|
v_float32x4 v_dst = v_cvt_f32(v_src);
|
2016-05-21 20:31:33 +08:00
|
|
|
|
2016-09-02 20:57:46 +08:00
|
|
|
v_store(dst + x, v_dst);
|
2016-06-09 07:41:37 +08:00
|
|
|
}
|
2016-05-21 20:31:33 +08:00
|
|
|
#endif
|
2016-06-09 07:41:37 +08:00
|
|
|
}
|
|
|
|
for ( ; x < size.width; x++ )
|
|
|
|
{
|
|
|
|
dst[x] = convertFp16SW(src[x]);
|
|
|
|
}
|
2016-05-21 20:31:33 +08:00
|
|
|
}
|
2016-06-09 07:41:37 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
int x = 0;
|
|
|
|
for ( ; x < size.width; x++ )
|
|
|
|
{
|
|
|
|
dst[x] = convertFp16SW(src[x]);
|
|
|
|
}
|
2016-05-21 20:31:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
template<typename T, typename DT> static void
|
2011-04-17 21:14:45 +08:00
|
|
|
cvt_( const T* src, size_t sstep,
|
|
|
|
DT* dst, size_t dstep, Size size )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
2014-09-23 22:03:07 +08:00
|
|
|
Cvt_SIMD<T, DT> vop;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2014-09-23 22:03:07 +08:00
|
|
|
int x = vop(src, dst, size.width);
|
2012-06-09 23:00:04 +08:00
|
|
|
#if CV_ENABLE_UNROLLED
|
|
|
|
for( ; x <= size.width - 4; x += 4 )
|
|
|
|
{
|
|
|
|
DT t0, t1;
|
|
|
|
t0 = saturate_cast<DT>(src[x]);
|
|
|
|
t1 = saturate_cast<DT>(src[x+1]);
|
|
|
|
dst[x] = t0; dst[x+1] = t1;
|
|
|
|
t0 = saturate_cast<DT>(src[x+2]);
|
|
|
|
t1 = saturate_cast<DT>(src[x+3]);
|
|
|
|
dst[x+2] = t0; dst[x+3] = t1;
|
|
|
|
}
|
2012-02-10 21:47:53 +08:00
|
|
|
#endif
|
2010-05-12 01:44:00 +08:00
|
|
|
for( ; x < size.width; x++ )
|
|
|
|
dst[x] = saturate_cast<DT>(src[x]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-10 14:05:04 +08:00
|
|
|
//vz optimized template specialization, test Core_ConvertScale/ElemWiseTest
|
2012-02-10 20:46:34 +08:00
|
|
|
template<> void
|
2012-02-10 14:05:04 +08:00
|
|
|
cvt_<float, short>( const float* src, size_t sstep,
|
|
|
|
short* dst, size_t dstep, Size size )
|
|
|
|
{
|
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2012-02-10 14:05:04 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
|
|
|
{
|
|
|
|
int x = 0;
|
2012-06-09 23:00:04 +08:00
|
|
|
#if CV_SSE2
|
2015-01-12 15:59:31 +08:00
|
|
|
if(USE_SSE2)
|
|
|
|
{
|
|
|
|
for( ; x <= size.width - 8; x += 8 )
|
2012-06-09 23:00:04 +08:00
|
|
|
{
|
|
|
|
__m128 src128 = _mm_loadu_ps (src + x);
|
|
|
|
__m128i src_int128 = _mm_cvtps_epi32 (src128);
|
|
|
|
|
|
|
|
src128 = _mm_loadu_ps (src + x + 4);
|
|
|
|
__m128i src1_int128 = _mm_cvtps_epi32 (src128);
|
|
|
|
|
|
|
|
src1_int128 = _mm_packs_epi32(src_int128, src1_int128);
|
|
|
|
_mm_storeu_si128((__m128i*)(dst + x),src1_int128);
|
|
|
|
}
|
|
|
|
}
|
2014-09-23 03:39:34 +08:00
|
|
|
#elif CV_NEON
|
|
|
|
for( ; x <= size.width - 8; x += 8 )
|
|
|
|
{
|
|
|
|
float32x4_t v_src1 = vld1q_f32(src + x), v_src2 = vld1q_f32(src + x + 4);
|
|
|
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_src1)),
|
|
|
|
vqmovn_s32(cv_vrndq_s32_f32(v_src2)));
|
|
|
|
vst1q_s16(dst + x, v_dst);
|
|
|
|
}
|
2012-02-10 14:05:04 +08:00
|
|
|
#endif
|
|
|
|
for( ; x < size.width; x++ )
|
2012-02-10 21:47:53 +08:00
|
|
|
dst[x] = saturate_cast<short>(src[x]);
|
2012-02-10 14:05:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2012-02-10 21:47:53 +08:00
|
|
|
|
2012-02-10 14:05:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
template<typename T> static void
|
|
|
|
cpy_( const T* src, size_t sstep, T* dst, size_t dstep, Size size )
|
|
|
|
{
|
|
|
|
sstep /= sizeof(src[0]);
|
|
|
|
dstep /= sizeof(dst[0]);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( ; size.height--; src += sstep, dst += dstep )
|
|
|
|
memcpy(dst, src, size.width*sizeof(src[0]));
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
#define DEF_CVT_SCALE_ABS_FUNC(suffix, tfunc, stype, dtype, wtype) \
|
|
|
|
static void cvtScaleAbs##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
|
|
|
dtype* dst, size_t dstep, Size size, double* scale) \
|
|
|
|
{ \
|
|
|
|
tfunc(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2016-06-09 07:41:37 +08:00
|
|
|
#define DEF_CVT_SCALE_FP16_FUNC(suffix, stype, dtype) \
|
|
|
|
static void cvtScaleHalf##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
2016-05-21 20:31:33 +08:00
|
|
|
dtype* dst, size_t dstep, Size size, double*) \
|
|
|
|
{ \
|
2016-08-03 15:53:52 +08:00
|
|
|
cvtScaleHalf_<stype,dtype>(src, sstep, dst, dstep, size); \
|
2016-05-21 20:31:33 +08:00
|
|
|
}
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
#define DEF_CVT_SCALE_FUNC(suffix, stype, dtype, wtype) \
|
|
|
|
static void cvtScale##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
|
|
|
dtype* dst, size_t dstep, Size size, double* scale) \
|
|
|
|
{ \
|
|
|
|
cvtScale_(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2014-04-28 23:31:18 +08:00
|
|
|
#if defined(HAVE_IPP)
|
2014-04-06 05:21:18 +08:00
|
|
|
#define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
|
|
|
|
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
|
|
|
dtype* dst, size_t dstep, Size size, double*) \
|
|
|
|
{ \
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_IPP_RUN(src && dst, CV_INSTRUMENT_FUN_IPP(ippiConvert_##ippFavor, src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height)) >= 0) \
|
2014-04-06 05:21:18 +08:00
|
|
|
cvt_(src, sstep, dst, dstep, size); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DEF_CVT_FUNC_F2(suffix, stype, dtype, ippFavor) \
|
|
|
|
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
|
|
|
dtype* dst, size_t dstep, Size size, double*) \
|
|
|
|
{ \
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_IPP_RUN(src && dst, CV_INSTRUMENT_FUN_IPP(ippiConvert_##ippFavor, src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height), ippRndFinancial, 0) >= 0) \
|
2014-04-06 05:21:18 +08:00
|
|
|
cvt_(src, sstep, dst, dstep, size); \
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
|
|
|
|
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
|
|
|
dtype* dst, size_t dstep, Size size, double*) \
|
|
|
|
{ \
|
|
|
|
cvt_(src, sstep, dst, dstep, size); \
|
|
|
|
}
|
|
|
|
#define DEF_CVT_FUNC_F2 DEF_CVT_FUNC_F
|
|
|
|
#endif
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
#define DEF_CVT_FUNC(suffix, stype, dtype) \
|
|
|
|
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
|
|
|
dtype* dst, size_t dstep, Size size, double*) \
|
|
|
|
{ \
|
|
|
|
cvt_(src, sstep, dst, dstep, size); \
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
#define DEF_CPY_FUNC(suffix, stype) \
|
|
|
|
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
|
2014-04-06 05:21:18 +08:00
|
|
|
stype* dst, size_t dstep, Size size, double*) \
|
2011-04-17 21:14:45 +08:00
|
|
|
{ \
|
|
|
|
cpy_(src, sstep, dst, dstep, size); \
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
|
|
|
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_SCALE_ABS_FUNC(8u, cvtScaleAbs_, uchar, uchar, float)
|
|
|
|
DEF_CVT_SCALE_ABS_FUNC(8s8u, cvtScaleAbs_, schar, uchar, float)
|
|
|
|
DEF_CVT_SCALE_ABS_FUNC(16u8u, cvtScaleAbs_, ushort, uchar, float)
|
|
|
|
DEF_CVT_SCALE_ABS_FUNC(16s8u, cvtScaleAbs_, short, uchar, float)
|
|
|
|
DEF_CVT_SCALE_ABS_FUNC(32s8u, cvtScaleAbs_, int, uchar, float)
|
|
|
|
DEF_CVT_SCALE_ABS_FUNC(32f8u, cvtScaleAbs_, float, uchar, float)
|
|
|
|
DEF_CVT_SCALE_ABS_FUNC(64f8u, cvtScaleAbs_, double, uchar, float)
|
|
|
|
|
2016-06-09 07:41:37 +08:00
|
|
|
DEF_CVT_SCALE_FP16_FUNC(32f16f, float, short)
|
|
|
|
DEF_CVT_SCALE_FP16_FUNC(16f32f, short, float)
|
2016-05-21 20:31:33 +08:00
|
|
|
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_SCALE_FUNC(8u, uchar, uchar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(8s8u, schar, uchar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16u8u, ushort, uchar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16s8u, short, uchar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32s8u, int, uchar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32f8u, float, uchar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(64f8u, double, uchar, float)
|
|
|
|
|
|
|
|
DEF_CVT_SCALE_FUNC(8u8s, uchar, schar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(8s, schar, schar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16u8s, ushort, schar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16s8s, short, schar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32s8s, int, schar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32f8s, float, schar, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(64f8s, double, schar, float)
|
|
|
|
|
|
|
|
DEF_CVT_SCALE_FUNC(8u16u, uchar, ushort, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(8s16u, schar, ushort, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16u, ushort, ushort, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16s16u, short, ushort, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32s16u, int, ushort, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32f16u, float, ushort, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(64f16u, double, ushort, float)
|
|
|
|
|
|
|
|
DEF_CVT_SCALE_FUNC(8u16s, uchar, short, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(8s16s, schar, short, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16u16s, ushort, short, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16s, short, short, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32s16s, int, short, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32f16s, float, short, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(64f16s, double, short, float)
|
|
|
|
|
|
|
|
DEF_CVT_SCALE_FUNC(8u32s, uchar, int, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(8s32s, schar, int, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16u32s, ushort, int, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16s32s, short, int, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32s, int, int, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(32f32s, float, int, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(64f32s, double, int, double)
|
|
|
|
|
|
|
|
DEF_CVT_SCALE_FUNC(8u32f, uchar, float, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(8s32f, schar, float, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16u32f, ushort, float, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(16s32f, short, float, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(32s32f, int, float, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(32f, float, float, float)
|
|
|
|
DEF_CVT_SCALE_FUNC(64f32f, double, float, double)
|
|
|
|
|
|
|
|
DEF_CVT_SCALE_FUNC(8u64f, uchar, double, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(8s64f, schar, double, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(16u64f, ushort, double, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(16s64f, short, double, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(32s64f, int, double, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(32f64f, float, double, double)
|
|
|
|
DEF_CVT_SCALE_FUNC(64f, double, double, double)
|
|
|
|
|
|
|
|
DEF_CPY_FUNC(8u, uchar)
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F(8s8u, schar, uchar, 8s8u_C1Rs)
|
|
|
|
DEF_CVT_FUNC_F(16u8u, ushort, uchar, 16u8u_C1R)
|
|
|
|
DEF_CVT_FUNC_F(16s8u, short, uchar, 16s8u_C1R)
|
|
|
|
DEF_CVT_FUNC_F(32s8u, int, uchar, 32s8u_C1R)
|
|
|
|
DEF_CVT_FUNC_F2(32f8u, float, uchar, 32f8u_C1RSfs)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_FUNC(64f8u, double, uchar)
|
|
|
|
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F2(8u8s, uchar, schar, 8u8s_C1RSfs)
|
|
|
|
DEF_CVT_FUNC_F2(16u8s, ushort, schar, 16u8s_C1RSfs)
|
|
|
|
DEF_CVT_FUNC_F2(16s8s, short, schar, 16s8s_C1RSfs)
|
|
|
|
DEF_CVT_FUNC_F(32s8s, int, schar, 32s8s_C1R)
|
|
|
|
DEF_CVT_FUNC_F2(32f8s, float, schar, 32f8s_C1RSfs)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_FUNC(64f8s, double, schar)
|
|
|
|
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F(8u16u, uchar, ushort, 8u16u_C1R)
|
|
|
|
DEF_CVT_FUNC_F(8s16u, schar, ushort, 8s16u_C1Rs)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CPY_FUNC(16u, ushort)
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F(16s16u, short, ushort, 16s16u_C1Rs)
|
|
|
|
DEF_CVT_FUNC_F2(32s16u, int, ushort, 32s16u_C1RSfs)
|
|
|
|
DEF_CVT_FUNC_F2(32f16u, float, ushort, 32f16u_C1RSfs)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_FUNC(64f16u, double, ushort)
|
|
|
|
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F(8u16s, uchar, short, 8u16s_C1R)
|
|
|
|
DEF_CVT_FUNC_F(8s16s, schar, short, 8s16s_C1R)
|
|
|
|
DEF_CVT_FUNC_F2(16u16s, ushort, short, 16u16s_C1RSfs)
|
|
|
|
DEF_CVT_FUNC_F2(32s16s, int, short, 32s16s_C1RSfs)
|
2014-08-11 19:40:44 +08:00
|
|
|
DEF_CVT_FUNC(32f16s, float, short)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_FUNC(64f16s, double, short)
|
|
|
|
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F(8u32s, uchar, int, 8u32s_C1R)
|
|
|
|
DEF_CVT_FUNC_F(8s32s, schar, int, 8s32s_C1R)
|
|
|
|
DEF_CVT_FUNC_F(16u32s, ushort, int, 16u32s_C1R)
|
|
|
|
DEF_CVT_FUNC_F(16s32s, short, int, 16s32s_C1R)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CPY_FUNC(32s, int)
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F2(32f32s, float, int, 32f32s_C1RSfs)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_FUNC(64f32s, double, int)
|
|
|
|
|
2014-04-06 05:21:18 +08:00
|
|
|
DEF_CVT_FUNC_F(8u32f, uchar, float, 8u32f_C1R)
|
|
|
|
DEF_CVT_FUNC_F(8s32f, schar, float, 8s32f_C1R)
|
|
|
|
DEF_CVT_FUNC_F(16u32f, ushort, float, 16u32f_C1R)
|
|
|
|
DEF_CVT_FUNC_F(16s32f, short, float, 16s32f_C1R)
|
|
|
|
DEF_CVT_FUNC_F(32s32f, int, float, 32s32f_C1R)
|
2014-01-07 06:38:41 +08:00
|
|
|
DEF_CVT_FUNC(64f32f, double, float)
|
|
|
|
|
|
|
|
DEF_CVT_FUNC(8u64f, uchar, double)
|
|
|
|
DEF_CVT_FUNC(8s64f, schar, double)
|
|
|
|
DEF_CVT_FUNC(16u64f, ushort, double)
|
|
|
|
DEF_CVT_FUNC(16s64f, short, double)
|
|
|
|
DEF_CVT_FUNC(32s64f, int, double)
|
|
|
|
DEF_CVT_FUNC(32f64f, float, double)
|
|
|
|
DEF_CPY_FUNC(64s, int64)
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2013-08-15 15:01:40 +08:00
|
|
|
static BinaryFunc getCvtScaleAbsFunc(int depth)
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2013-08-15 15:01:40 +08:00
|
|
|
static BinaryFunc cvtScaleAbsTab[] =
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2013-08-15 15:01:40 +08:00
|
|
|
(BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u,
|
|
|
|
(BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u,
|
|
|
|
(BinaryFunc)cvtScaleAbs64f8u, 0
|
|
|
|
};
|
2011-04-17 21:14:45 +08:00
|
|
|
|
2013-08-15 15:01:40 +08:00
|
|
|
return cvtScaleAbsTab[depth];
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2016-06-09 07:41:37 +08:00
|
|
|
BinaryFunc getConvertFuncFp16(int ddepth)
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-06-09 07:41:37 +08:00
|
|
|
static BinaryFunc cvtTab[] =
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
|
|
|
0, 0, 0,
|
2016-06-09 07:41:37 +08:00
|
|
|
(BinaryFunc)(cvtScaleHalf32f16f), 0, (BinaryFunc)(cvtScaleHalf16f32f),
|
2016-05-21 20:31:33 +08:00
|
|
|
0, 0,
|
|
|
|
};
|
2016-06-09 07:41:37 +08:00
|
|
|
return cvtTab[CV_MAT_DEPTH(ddepth)];
|
2016-05-21 20:31:33 +08:00
|
|
|
}
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
BinaryFunc getConvertFunc(int sdepth, int ddepth)
|
|
|
|
{
|
2013-08-15 15:01:40 +08:00
|
|
|
static BinaryFunc cvtTab[][8] =
|
|
|
|
{
|
|
|
|
{
|
|
|
|
(BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u,
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s),
|
|
|
|
(BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s,
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f),
|
|
|
|
(BinaryFunc)(cvt64s), 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
|
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2013-04-09 17:10:54 +08:00
|
|
|
static BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2013-08-15 15:01:40 +08:00
|
|
|
static BinaryFunc cvtScaleTab[][8] =
|
|
|
|
{
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u),
|
|
|
|
(BinaryFunc)cvtScale64f8u, 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s),
|
|
|
|
(BinaryFunc)cvtScale64f8s, 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u),
|
|
|
|
(BinaryFunc)cvtScale64f16u, 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s),
|
|
|
|
(BinaryFunc)cvtScale64f16s, 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s),
|
|
|
|
(BinaryFunc)cvtScale64f32s, 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f),
|
|
|
|
(BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f),
|
|
|
|
(BinaryFunc)cvtScale64f32f, 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
(BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f,
|
|
|
|
(BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f,
|
|
|
|
(BinaryFunc)cvtScale64f, 0
|
|
|
|
},
|
|
|
|
{
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
|
|
|
|
}
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
|
|
|
|
2013-12-29 22:01:01 +08:00
|
|
|
static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
|
|
|
|
{
|
2014-05-14 19:42:30 +08:00
|
|
|
const ocl::Device & d = ocl::Device::getDefault();
|
2013-12-29 22:01:01 +08:00
|
|
|
|
2014-10-20 21:43:18 +08:00
|
|
|
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
|
|
|
|
bool doubleSupport = d.doubleFPConfig() > 0;
|
2014-08-13 14:21:16 +08:00
|
|
|
if (!doubleSupport && depth == CV_64F)
|
2013-12-29 22:01:01 +08:00
|
|
|
return false;
|
|
|
|
|
2014-10-20 21:43:18 +08:00
|
|
|
_dst.create(_src.size(), CV_8UC(cn));
|
|
|
|
int kercn = 1;
|
|
|
|
if (d.isIntel())
|
|
|
|
{
|
|
|
|
static const int vectorWidths[] = {4, 4, 4, 4, 4, 4, 4, -1};
|
|
|
|
kercn = ocl::checkOptimalVectorWidth( vectorWidths, _src, _dst,
|
|
|
|
noArray(), noArray(), noArray(),
|
|
|
|
noArray(), noArray(), noArray(),
|
|
|
|
noArray(), ocl::OCL_VECTOR_MAX);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
kercn = ocl::predictOptimalVectorWidthMax(_src, _dst);
|
|
|
|
|
|
|
|
int rowsPerWI = d.isIntel() ? 4 : 1;
|
2013-12-29 22:01:01 +08:00
|
|
|
char cvt[2][50];
|
|
|
|
int wdepth = std::max(depth, CV_32F);
|
2014-08-13 14:21:16 +08:00
|
|
|
String build_opt = format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D srcT1=%s"
|
2014-05-14 19:42:30 +08:00
|
|
|
" -D workT=%s -D wdepth=%d -D convertToWT1=%s -D convertToDT=%s"
|
|
|
|
" -D workT1=%s -D rowsPerWI=%d%s",
|
2014-03-08 05:29:27 +08:00
|
|
|
ocl::typeToStr(CV_8UC(kercn)),
|
|
|
|
ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
|
|
|
|
ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)), wdepth,
|
|
|
|
ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
|
|
|
|
ocl::convertTypeStr(wdepth, CV_8U, kercn, cvt[1]),
|
2014-05-14 19:42:30 +08:00
|
|
|
ocl::typeToStr(wdepth), rowsPerWI,
|
2014-08-13 14:21:16 +08:00
|
|
|
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
|
|
|
|
ocl::Kernel k("KF", ocl::core::arithm_oclsrc, build_opt);
|
2013-12-29 22:01:01 +08:00
|
|
|
if (k.empty())
|
|
|
|
return false;
|
|
|
|
|
2014-03-08 05:29:27 +08:00
|
|
|
UMat src = _src.getUMat();
|
|
|
|
UMat dst = _dst.getUMat();
|
2013-12-29 22:01:01 +08:00
|
|
|
|
|
|
|
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
|
2014-03-08 05:29:27 +08:00
|
|
|
dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
|
2013-12-29 22:01:01 +08:00
|
|
|
|
|
|
|
if (wdepth == CV_32F)
|
|
|
|
k.args(srcarg, dstarg, (float)alpha, (float)beta);
|
|
|
|
else if (wdepth == CV_64F)
|
|
|
|
k.args(srcarg, dstarg, alpha, beta);
|
|
|
|
|
2015-10-16 22:10:00 +08:00
|
|
|
size_t globalsize[2] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
|
2013-12-29 22:01:01 +08:00
|
|
|
return k.run(2, globalsize, NULL, false);
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#endif
|
|
|
|
|
2012-06-09 23:00:04 +08:00
|
|
|
}
|
|
|
|
|
2011-06-06 22:51:27 +08:00
|
|
|
void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
|
|
|
|
ocl_convertScaleAbs(_src, _dst, alpha, beta))
|
2013-12-29 22:01:01 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
Mat src = _src.getMat();
|
|
|
|
int cn = src.channels();
|
|
|
|
double scale[] = {alpha, beta};
|
|
|
|
_dst.create( src.dims, src.size, CV_8UC(cn) );
|
|
|
|
Mat dst = _dst.getMat();
|
2013-08-15 15:01:40 +08:00
|
|
|
BinaryFunc func = getCvtScaleAbsFunc(src.depth());
|
2010-05-12 01:44:00 +08:00
|
|
|
CV_Assert( func != 0 );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-02-10 04:55:11 +08:00
|
|
|
if( src.dims <= 2 )
|
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
Size sz = getContinuousSize(src, dst, cn);
|
2014-08-13 19:08:27 +08:00
|
|
|
func( src.ptr(), src.step, 0, 0, dst.ptr(), dst.step, sz, scale );
|
2011-02-10 04:55:11 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
const Mat* arrays[] = {&src, &dst, 0};
|
2011-04-17 21:14:45 +08:00
|
|
|
uchar* ptrs[2];
|
|
|
|
NAryMatIterator it(arrays, ptrs);
|
|
|
|
Size sz((int)it.size*cn, 1);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
|
|
|
func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
|
2011-02-10 04:55:11 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
|
2016-06-06 07:56:37 +08:00
|
|
|
void cv::convertFp16( InputArray _src, OutputArray _dst)
|
2016-05-21 20:31:33 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2016-05-21 20:31:33 +08:00
|
|
|
Mat src = _src.getMat();
|
|
|
|
int ddepth = 0;
|
|
|
|
|
|
|
|
switch( src.depth() )
|
|
|
|
{
|
|
|
|
case CV_32F:
|
|
|
|
ddepth = CV_16S;
|
|
|
|
break;
|
|
|
|
case CV_16S:
|
|
|
|
ddepth = CV_32F;
|
|
|
|
break;
|
|
|
|
default:
|
2016-08-03 15:53:52 +08:00
|
|
|
CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth");
|
2016-05-21 20:31:33 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int type = CV_MAKETYPE(ddepth, src.channels());
|
|
|
|
_dst.create( src.dims, src.size, type );
|
|
|
|
Mat dst = _dst.getMat();
|
2016-06-09 07:41:37 +08:00
|
|
|
BinaryFunc func = getConvertFuncFp16(ddepth);
|
2016-05-21 20:31:33 +08:00
|
|
|
int cn = src.channels();
|
|
|
|
CV_Assert( func != 0 );
|
|
|
|
|
|
|
|
if( src.dims <= 2 )
|
|
|
|
{
|
|
|
|
Size sz = getContinuousSize(src, dst, cn);
|
|
|
|
func( src.data, src.step, 0, 0, dst.data, dst.step, sz, 0);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
const Mat* arrays[] = {&src, &dst, 0};
|
|
|
|
uchar* ptrs[2];
|
|
|
|
NAryMatIterator it(arrays, ptrs);
|
|
|
|
Size sz((int)(it.size*cn), 1);
|
|
|
|
|
|
|
|
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
|
|
|
func(ptrs[0], 1, 0, 0, ptrs[1], 1, sz, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON;
|
|
|
|
|
|
|
|
if( _type < 0 )
|
2011-04-17 21:14:45 +08:00
|
|
|
_type = _dst.fixedType() ? _dst.type() : type();
|
2010-05-12 01:44:00 +08:00
|
|
|
else
|
|
|
|
_type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels());
|
|
|
|
|
|
|
|
int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type);
|
|
|
|
if( sdepth == ddepth && noScale )
|
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
copyTo(_dst);
|
2010-05-12 01:44:00 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
Mat src = *this;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth);
|
|
|
|
double scale[] = {alpha, beta};
|
|
|
|
int cn = channels();
|
|
|
|
CV_Assert( func != 0 );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2010-10-12 20:31:40 +08:00
|
|
|
if( dims <= 2 )
|
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
_dst.create( size(), _type );
|
|
|
|
Mat dst = _dst.getMat();
|
|
|
|
Size sz = getContinuousSize(src, dst, cn);
|
|
|
|
func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
|
2010-10-12 20:31:40 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
_dst.create( dims, size, _type );
|
|
|
|
Mat dst = _dst.getMat();
|
|
|
|
const Mat* arrays[] = {&src, &dst, 0};
|
|
|
|
uchar* ptrs[2];
|
|
|
|
NAryMatIterator it(arrays, ptrs);
|
|
|
|
Size sz((int)(it.size*cn), 1);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
2014-04-06 05:21:18 +08:00
|
|
|
func(ptrs[0], 1, 0, 0, ptrs[1], 1, sz, scale);
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************************\
|
|
|
|
* LUT Transform *
|
|
|
|
\****************************************************************************************/
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
namespace cv
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
template<typename T> static void
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( const uchar* src, const T* lut, T* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
if( lutcn == 1 )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
for( int i = 0; i < len*cn; i++ )
|
2011-04-17 21:14:45 +08:00
|
|
|
dst[i] = lut[src[i]];
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
2011-04-17 21:14:45 +08:00
|
|
|
else
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2011-04-17 21:14:45 +08:00
|
|
|
for( int i = 0; i < len*cn; i += cn )
|
|
|
|
for( int k = 0; k < cn; k++ )
|
|
|
|
dst[i+k] = lut[src[i+k]*cn+k];
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
static void LUT8u_8u( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( src, lut, dst, len, cn, lutcn );
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
static void LUT8u_8s( const uchar* src, const schar* lut, schar* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( src, lut, dst, len, cn, lutcn );
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
static void LUT8u_16u( const uchar* src, const ushort* lut, ushort* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( src, lut, dst, len, cn, lutcn );
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
static void LUT8u_16s( const uchar* src, const short* lut, short* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( src, lut, dst, len, cn, lutcn );
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
static void LUT8u_32s( const uchar* src, const int* lut, int* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( src, lut, dst, len, cn, lutcn );
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
static void LUT8u_32f( const uchar* src, const float* lut, float* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( src, lut, dst, len, cn, lutcn );
|
2011-04-17 21:14:45 +08:00
|
|
|
}
|
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
static void LUT8u_64f( const uchar* src, const double* lut, double* dst, int len, int cn, int lutcn )
|
2011-04-17 21:14:45 +08:00
|
|
|
{
|
2011-05-10 14:11:03 +08:00
|
|
|
LUT8u_( src, lut, dst, len, cn, lutcn );
|
2012-06-09 23:00:04 +08:00
|
|
|
}
|
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
typedef void (*LUTFunc)( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
static LUTFunc lutTab[] =
|
|
|
|
{
|
|
|
|
(LUTFunc)LUT8u_8u, (LUTFunc)LUT8u_8s, (LUTFunc)LUT8u_16u, (LUTFunc)LUT8u_16s,
|
|
|
|
(LUTFunc)LUT8u_32s, (LUTFunc)LUT8u_32f, (LUTFunc)LUT8u_64f, 0
|
|
|
|
};
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
2013-11-28 04:57:36 +08:00
|
|
|
|
|
|
|
static bool ocl_LUT(InputArray _src, InputArray _lut, OutputArray _dst)
|
|
|
|
{
|
2014-04-22 23:52:37 +08:00
|
|
|
int lcn = _lut.channels(), dcn = _src.channels(), ddepth = _lut.depth();
|
2013-11-28 04:57:36 +08:00
|
|
|
|
|
|
|
UMat src = _src.getUMat(), lut = _lut.getUMat();
|
2014-04-22 23:52:37 +08:00
|
|
|
_dst.create(src.size(), CV_MAKETYPE(ddepth, dcn));
|
2013-11-28 04:57:36 +08:00
|
|
|
UMat dst = _dst.getUMat();
|
2014-08-11 20:21:55 +08:00
|
|
|
int kercn = lcn == 1 ? std::min(4, ocl::predictOptimalVectorWidth(_src, _dst)) : dcn;
|
2013-11-28 04:57:36 +08:00
|
|
|
|
|
|
|
ocl::Kernel k("LUT", ocl::core::lut_oclsrc,
|
2014-07-02 21:06:34 +08:00
|
|
|
format("-D dcn=%d -D lcn=%d -D srcT=%s -D dstT=%s", kercn, lcn,
|
|
|
|
ocl::typeToStr(src.depth()), ocl::memopTypeToStr(ddepth)));
|
2014-02-01 19:07:03 +08:00
|
|
|
if (k.empty())
|
|
|
|
return false;
|
|
|
|
|
2013-11-28 04:57:36 +08:00
|
|
|
k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::ReadOnlyNoSize(lut),
|
2014-07-02 21:06:34 +08:00
|
|
|
ocl::KernelArg::WriteOnly(dst, dcn, kercn));
|
2013-11-28 04:57:36 +08:00
|
|
|
|
2015-10-16 22:10:00 +08:00
|
|
|
size_t globalSize[2] = { (size_t)dst.cols * dcn / kercn, ((size_t)dst.rows + 3) / 4 };
|
2013-11-28 04:57:36 +08:00
|
|
|
return k.run(2, globalSize, NULL, false);
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#endif
|
|
|
|
|
2014-04-28 23:31:18 +08:00
|
|
|
#if defined(HAVE_IPP)
|
2014-04-22 23:52:37 +08:00
|
|
|
namespace ipp {
|
|
|
|
|
2015-09-25 22:50:15 +08:00
|
|
|
#if IPP_DISABLE_BLOCK // there are no performance benefits (PR #2653)
|
2014-04-22 23:52:37 +08:00
|
|
|
class IppLUTParallelBody_LUTC1 : public ParallelLoopBody
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
bool* ok;
|
|
|
|
const Mat& src_;
|
|
|
|
const Mat& lut_;
|
|
|
|
Mat& dst_;
|
|
|
|
|
|
|
|
typedef IppStatus (*IppFn)(const Ipp8u* pSrc, int srcStep, void* pDst, int dstStep,
|
|
|
|
IppiSize roiSize, const void* pTable, int nBitSize);
|
|
|
|
IppFn fn;
|
|
|
|
|
|
|
|
int width;
|
|
|
|
|
|
|
|
IppLUTParallelBody_LUTC1(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
|
|
|
|
: ok(_ok), src_(src), lut_(lut), dst_(dst)
|
|
|
|
{
|
|
|
|
width = dst.cols * dst.channels();
|
|
|
|
|
|
|
|
size_t elemSize1 = CV_ELEM_SIZE1(dst.depth());
|
|
|
|
|
|
|
|
fn =
|
|
|
|
elemSize1 == 1 ? (IppFn)ippiLUTPalette_8u_C1R :
|
|
|
|
elemSize1 == 4 ? (IppFn)ippiLUTPalette_8u32u_C1R :
|
|
|
|
NULL;
|
|
|
|
|
|
|
|
*ok = (fn != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()( const cv::Range& range ) const
|
|
|
|
{
|
|
|
|
if (!*ok)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const int row0 = range.start;
|
|
|
|
const int row1 = range.end;
|
|
|
|
|
|
|
|
Mat src = src_.rowRange(row0, row1);
|
|
|
|
Mat dst = dst_.rowRange(row0, row1);
|
|
|
|
|
|
|
|
IppiSize sz = { width, dst.rows };
|
|
|
|
|
|
|
|
CV_DbgAssert(fn != NULL);
|
|
|
|
if (fn(src.data, (int)src.step[0], dst.data, (int)dst.step[0], sz, lut_.data, 8) < 0)
|
|
|
|
{
|
|
|
|
setIppErrorStatus();
|
|
|
|
*ok = false;
|
|
|
|
}
|
2014-10-03 19:17:28 +08:00
|
|
|
CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT);
|
2014-04-22 23:52:37 +08:00
|
|
|
}
|
|
|
|
private:
|
|
|
|
IppLUTParallelBody_LUTC1(const IppLUTParallelBody_LUTC1&);
|
|
|
|
IppLUTParallelBody_LUTC1& operator=(const IppLUTParallelBody_LUTC1&);
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
class IppLUTParallelBody_LUTCN : public ParallelLoopBody
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
bool *ok;
|
|
|
|
const Mat& src_;
|
|
|
|
const Mat& lut_;
|
|
|
|
Mat& dst_;
|
|
|
|
|
|
|
|
int lutcn;
|
|
|
|
|
|
|
|
uchar* lutBuffer;
|
|
|
|
uchar* lutTable[4];
|
|
|
|
|
|
|
|
IppLUTParallelBody_LUTCN(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
|
|
|
|
: ok(_ok), src_(src), lut_(lut), dst_(dst), lutBuffer(NULL)
|
|
|
|
{
|
|
|
|
lutcn = lut.channels();
|
|
|
|
IppiSize sz256 = {256, 1};
|
|
|
|
|
|
|
|
size_t elemSize1 = dst.elemSize1();
|
|
|
|
CV_DbgAssert(elemSize1 == 1);
|
|
|
|
lutBuffer = (uchar*)ippMalloc(256 * (int)elemSize1 * 4);
|
|
|
|
lutTable[0] = lutBuffer + 0;
|
|
|
|
lutTable[1] = lutBuffer + 1 * 256 * elemSize1;
|
|
|
|
lutTable[2] = lutBuffer + 2 * 256 * elemSize1;
|
|
|
|
lutTable[3] = lutBuffer + 3 * 256 * elemSize1;
|
|
|
|
|
|
|
|
CV_DbgAssert(lutcn == 3 || lutcn == 4);
|
|
|
|
if (lutcn == 3)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
IppStatus status = CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C3P3R, lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
|
2014-04-22 23:52:37 +08:00
|
|
|
if (status < 0)
|
|
|
|
{
|
|
|
|
setIppErrorStatus();
|
|
|
|
return;
|
|
|
|
}
|
2014-10-03 19:17:28 +08:00
|
|
|
CV_IMPL_ADD(CV_IMPL_IPP);
|
2014-04-22 23:52:37 +08:00
|
|
|
}
|
|
|
|
else if (lutcn == 4)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
IppStatus status = CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C4P4R, lut.ptr(), (int)lut.step[0], lutTable, (int)lut.step[0], sz256);
|
2014-04-22 23:52:37 +08:00
|
|
|
if (status < 0)
|
|
|
|
{
|
|
|
|
setIppErrorStatus();
|
|
|
|
return;
|
|
|
|
}
|
2014-10-03 19:17:28 +08:00
|
|
|
CV_IMPL_ADD(CV_IMPL_IPP);
|
2014-04-22 23:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*ok = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
~IppLUTParallelBody_LUTCN()
|
|
|
|
{
|
|
|
|
if (lutBuffer != NULL)
|
|
|
|
ippFree(lutBuffer);
|
|
|
|
lutBuffer = NULL;
|
|
|
|
lutTable[0] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()( const cv::Range& range ) const
|
|
|
|
{
|
|
|
|
if (!*ok)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const int row0 = range.start;
|
|
|
|
const int row1 = range.end;
|
|
|
|
|
|
|
|
Mat src = src_.rowRange(row0, row1);
|
|
|
|
Mat dst = dst_.rowRange(row0, row1);
|
|
|
|
|
|
|
|
if (lutcn == 3)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
if (CV_INSTRUMENT_FUN_IPP(ippiLUTPalette_8u_C3R,
|
2014-08-13 19:08:27 +08:00
|
|
|
src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
|
2014-04-22 23:52:37 +08:00
|
|
|
ippiSize(dst.size()), lutTable, 8) >= 0)
|
2014-10-03 19:17:28 +08:00
|
|
|
{
|
|
|
|
CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT);
|
2014-04-22 23:52:37 +08:00
|
|
|
return;
|
2014-10-03 19:17:28 +08:00
|
|
|
}
|
2014-04-22 23:52:37 +08:00
|
|
|
}
|
|
|
|
else if (lutcn == 4)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
if (CV_INSTRUMENT_FUN_IPP(ippiLUTPalette_8u_C4R,
|
2014-08-13 19:08:27 +08:00
|
|
|
src.ptr(), (int)src.step[0], dst.ptr(), (int)dst.step[0],
|
2014-04-22 23:52:37 +08:00
|
|
|
ippiSize(dst.size()), lutTable, 8) >= 0)
|
2014-10-03 19:17:28 +08:00
|
|
|
{
|
|
|
|
CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT);
|
2014-04-22 23:52:37 +08:00
|
|
|
return;
|
2014-10-03 19:17:28 +08:00
|
|
|
}
|
2014-04-22 23:52:37 +08:00
|
|
|
}
|
|
|
|
setIppErrorStatus();
|
|
|
|
*ok = false;
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
IppLUTParallelBody_LUTCN(const IppLUTParallelBody_LUTCN&);
|
|
|
|
IppLUTParallelBody_LUTCN& operator=(const IppLUTParallelBody_LUTCN&);
|
|
|
|
};
|
|
|
|
} // namespace ipp
|
2015-06-24 18:50:17 +08:00
|
|
|
|
|
|
|
static bool ipp_lut(Mat &src, Mat &lut, Mat &dst)
|
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION_IPP()
|
|
|
|
|
2015-06-24 18:50:17 +08:00
|
|
|
int lutcn = lut.channels();
|
|
|
|
|
|
|
|
if(src.dims > 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool ok = false;
|
|
|
|
Ptr<ParallelLoopBody> body;
|
|
|
|
|
|
|
|
size_t elemSize1 = CV_ELEM_SIZE1(dst.depth());
|
2015-09-25 22:50:15 +08:00
|
|
|
#if IPP_DISABLE_BLOCK // there are no performance benefits (PR #2653)
|
2015-06-24 18:50:17 +08:00
|
|
|
if (lutcn == 1)
|
|
|
|
{
|
|
|
|
ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTC1(src, lut, dst, &ok);
|
|
|
|
body.reset(p);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if ((lutcn == 3 || lutcn == 4) && elemSize1 == 1)
|
|
|
|
{
|
|
|
|
ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTCN(src, lut, dst, &ok);
|
|
|
|
body.reset(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (body != NULL && ok)
|
|
|
|
{
|
|
|
|
Range all(0, dst.rows);
|
|
|
|
if (dst.total()>>18)
|
|
|
|
parallel_for_(all, *body, (double)std::max((size_t)1, dst.total()>>16));
|
|
|
|
else
|
|
|
|
(*body)(all);
|
|
|
|
if (ok)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2014-04-22 23:52:37 +08:00
|
|
|
#endif // IPP
|
|
|
|
|
|
|
|
class LUTParallelBody : public ParallelLoopBody
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
bool* ok;
|
|
|
|
const Mat& src_;
|
|
|
|
const Mat& lut_;
|
|
|
|
Mat& dst_;
|
|
|
|
|
|
|
|
LUTFunc func;
|
|
|
|
|
|
|
|
LUTParallelBody(const Mat& src, const Mat& lut, Mat& dst, bool* _ok)
|
|
|
|
: ok(_ok), src_(src), lut_(lut), dst_(dst)
|
|
|
|
{
|
|
|
|
func = lutTab[lut.depth()];
|
|
|
|
*ok = (func != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()( const cv::Range& range ) const
|
|
|
|
{
|
|
|
|
CV_DbgAssert(*ok);
|
|
|
|
|
|
|
|
const int row0 = range.start;
|
|
|
|
const int row1 = range.end;
|
|
|
|
|
|
|
|
Mat src = src_.rowRange(row0, row1);
|
|
|
|
Mat dst = dst_.rowRange(row0, row1);
|
|
|
|
|
|
|
|
int cn = src.channels();
|
|
|
|
int lutcn = lut_.channels();
|
|
|
|
|
|
|
|
const Mat* arrays[] = {&src, &dst, 0};
|
|
|
|
uchar* ptrs[2];
|
|
|
|
NAryMatIterator it(arrays, ptrs);
|
|
|
|
int len = (int)it.size;
|
|
|
|
|
|
|
|
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
2014-08-13 19:08:27 +08:00
|
|
|
func(ptrs[0], lut_.ptr(), ptrs[1], len, cn, lutcn);
|
2014-04-22 23:52:37 +08:00
|
|
|
}
|
|
|
|
private:
|
|
|
|
LUTParallelBody(const LUTParallelBody&);
|
|
|
|
LUTParallelBody& operator=(const LUTParallelBody&);
|
|
|
|
};
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
}
|
2013-11-28 04:57:36 +08:00
|
|
|
|
2013-04-09 17:10:54 +08:00
|
|
|
void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2013-11-28 04:57:36 +08:00
|
|
|
int cn = _src.channels(), depth = _src.depth();
|
|
|
|
int lutcn = _lut.channels();
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2011-05-10 14:11:03 +08:00
|
|
|
CV_Assert( (lutcn == cn || lutcn == 1) &&
|
2013-11-28 04:57:36 +08:00
|
|
|
_lut.total() == 256 && _lut.isContinuous() &&
|
|
|
|
(depth == CV_8U || depth == CV_8S) );
|
|
|
|
|
2014-02-25 04:29:17 +08:00
|
|
|
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
|
2014-01-25 01:03:31 +08:00
|
|
|
ocl_LUT(_src, _lut, _dst))
|
2013-11-28 04:57:36 +08:00
|
|
|
|
|
|
|
Mat src = _src.getMat(), lut = _lut.getMat();
|
|
|
|
_dst.create(src.dims, src.size, CV_MAKETYPE(_lut.depth(), cn));
|
2011-04-17 21:14:45 +08:00
|
|
|
Mat dst = _dst.getMat();
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2015-06-24 18:50:17 +08:00
|
|
|
CV_IPP_RUN(_src.dims() <= 2, ipp_lut(src, lut, dst));
|
|
|
|
|
2014-04-22 23:52:37 +08:00
|
|
|
if (_src.dims() <= 2)
|
|
|
|
{
|
|
|
|
bool ok = false;
|
|
|
|
Ptr<ParallelLoopBody> body;
|
2015-06-24 18:50:17 +08:00
|
|
|
|
2014-04-22 23:52:37 +08:00
|
|
|
if (body == NULL || ok == false)
|
|
|
|
{
|
|
|
|
ok = false;
|
|
|
|
ParallelLoopBody* p = new LUTParallelBody(src, lut, dst, &ok);
|
|
|
|
body.reset(p);
|
|
|
|
}
|
|
|
|
if (body != NULL && ok)
|
|
|
|
{
|
|
|
|
Range all(0, dst.rows);
|
|
|
|
if (dst.total()>>18)
|
|
|
|
parallel_for_(all, *body, (double)std::max((size_t)1, dst.total()>>16));
|
|
|
|
else
|
|
|
|
(*body)(all);
|
|
|
|
if (ok)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-11 13:32:00 +08:00
|
|
|
LUTFunc func = lutTab[lut.depth()];
|
2011-04-17 21:14:45 +08:00
|
|
|
CV_Assert( func != 0 );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
const Mat* arrays[] = {&src, &dst, 0};
|
|
|
|
uchar* ptrs[2];
|
|
|
|
NAryMatIterator it(arrays, ptrs);
|
|
|
|
int len = (int)it.size;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2011-04-17 21:14:45 +08:00
|
|
|
for( size_t i = 0; i < it.nplanes; i++, ++it )
|
2014-08-13 19:08:27 +08:00
|
|
|
func(ptrs[0], lut.ptr(), ptrs[1], len, cn, lutcn);
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
|
2013-12-21 00:10:43 +08:00
|
|
|
namespace cv {
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
|
|
|
|
2014-05-29 21:46:34 +08:00
|
|
|
static bool ocl_normalize( InputArray _src, InputOutputArray _dst, InputArray _mask, int dtype,
|
|
|
|
double scale, double delta )
|
2013-12-21 00:10:43 +08:00
|
|
|
{
|
2014-05-29 21:46:34 +08:00
|
|
|
UMat src = _src.getUMat();
|
2013-12-21 00:10:43 +08:00
|
|
|
|
|
|
|
if( _mask.empty() )
|
2014-05-29 21:46:34 +08:00
|
|
|
src.convertTo( _dst, dtype, scale, delta );
|
|
|
|
else if (src.channels() <= 4)
|
|
|
|
{
|
|
|
|
const ocl::Device & dev = ocl::Device::getDefault();
|
|
|
|
|
|
|
|
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
|
|
|
|
ddepth = CV_MAT_DEPTH(dtype), wdepth = std::max(CV_32F, std::max(sdepth, ddepth)),
|
|
|
|
rowsPerWI = dev.isIntel() ? 4 : 1;
|
|
|
|
|
|
|
|
float fscale = static_cast<float>(scale), fdelta = static_cast<float>(delta);
|
|
|
|
bool haveScale = std::fabs(scale - 1) > DBL_EPSILON,
|
|
|
|
haveZeroScale = !(std::fabs(scale) > DBL_EPSILON),
|
|
|
|
haveDelta = std::fabs(delta) > DBL_EPSILON,
|
|
|
|
doubleSupport = dev.doubleFPConfig() > 0;
|
|
|
|
|
|
|
|
if (!haveScale && !haveDelta && stype == dtype)
|
|
|
|
{
|
|
|
|
_src.copyTo(_dst, _mask);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (haveZeroScale)
|
|
|
|
{
|
|
|
|
_dst.setTo(Scalar(delta), _mask);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((sdepth == CV_64F || ddepth == CV_64F) && !doubleSupport)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
char cvt[2][40];
|
|
|
|
String opts = format("-D srcT=%s -D dstT=%s -D convertToWT=%s -D cn=%d -D rowsPerWI=%d"
|
|
|
|
" -D convertToDT=%s -D workT=%s%s%s%s -D srcT1=%s -D dstT1=%s",
|
|
|
|
ocl::typeToStr(stype), ocl::typeToStr(dtype),
|
|
|
|
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]), cn,
|
|
|
|
rowsPerWI, ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
|
|
|
|
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
|
|
|
|
doubleSupport ? " -D DOUBLE_SUPPORT" : "",
|
|
|
|
haveScale ? " -D HAVE_SCALE" : "",
|
|
|
|
haveDelta ? " -D HAVE_DELTA" : "",
|
|
|
|
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth));
|
|
|
|
|
|
|
|
ocl::Kernel k("normalizek", ocl::core::normalize_oclsrc, opts);
|
|
|
|
if (k.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
UMat mask = _mask.getUMat(), dst = _dst.getUMat();
|
|
|
|
|
|
|
|
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
|
|
|
|
maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
|
|
|
|
dstarg = ocl::KernelArg::ReadWrite(dst);
|
|
|
|
|
|
|
|
if (haveScale)
|
|
|
|
{
|
|
|
|
if (haveDelta)
|
|
|
|
k.args(srcarg, maskarg, dstarg, fscale, fdelta);
|
|
|
|
else
|
|
|
|
k.args(srcarg, maskarg, dstarg, fscale);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (haveDelta)
|
|
|
|
k.args(srcarg, maskarg, dstarg, fdelta);
|
|
|
|
else
|
|
|
|
k.args(srcarg, maskarg, dstarg);
|
|
|
|
}
|
|
|
|
|
2015-10-16 22:10:00 +08:00
|
|
|
size_t globalsize[2] = { (size_t)src.cols, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
|
2014-05-29 21:46:34 +08:00
|
|
|
return k.run(2, globalsize, NULL, false);
|
|
|
|
}
|
2013-12-21 00:10:43 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
UMat temp;
|
2014-05-29 21:46:34 +08:00
|
|
|
src.convertTo( temp, dtype, scale, delta );
|
|
|
|
temp.copyTo( _dst, _mask );
|
2013-12-21 00:10:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
#endif
|
|
|
|
|
2013-12-21 00:10:43 +08:00
|
|
|
}
|
2010-05-12 01:44:00 +08:00
|
|
|
|
2014-05-29 21:46:34 +08:00
|
|
|
void cv::normalize( InputArray _src, InputOutputArray _dst, double a, double b,
|
2011-06-06 22:51:27 +08:00
|
|
|
int norm_type, int rtype, InputArray _mask )
|
2010-05-12 01:44:00 +08:00
|
|
|
{
|
2016-08-18 14:53:00 +08:00
|
|
|
CV_INSTRUMENT_REGION()
|
|
|
|
|
2010-05-12 01:44:00 +08:00
|
|
|
double scale = 1, shift = 0;
|
|
|
|
if( norm_type == CV_MINMAX )
|
|
|
|
{
|
|
|
|
double smin = 0, smax = 0;
|
|
|
|
double dmin = MIN( a, b ), dmax = MAX( a, b );
|
2013-12-21 00:10:43 +08:00
|
|
|
minMaxLoc( _src, &smin, &smax, 0, 0, _mask );
|
2010-05-12 01:44:00 +08:00
|
|
|
scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
|
|
|
|
shift = dmin - smin*scale;
|
|
|
|
}
|
|
|
|
else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
|
|
|
|
{
|
2013-12-21 00:10:43 +08:00
|
|
|
scale = norm( _src, norm_type, _mask );
|
2010-05-12 01:44:00 +08:00
|
|
|
scale = scale > DBL_EPSILON ? a/scale : 0.;
|
|
|
|
shift = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2015-12-25 20:33:06 +08:00
|
|
|
int type = _src.type(), depth = CV_MAT_DEPTH(type);
|
2011-04-17 21:14:45 +08:00
|
|
|
if( rtype < 0 )
|
2013-12-21 00:10:43 +08:00
|
|
|
rtype = _dst.fixedType() ? _dst.depth() : depth;
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2014-01-25 01:03:31 +08:00
|
|
|
CV_OCL_RUN(_dst.isUMat(),
|
|
|
|
ocl_normalize(_src, _dst, _mask, rtype, scale, shift))
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2015-12-25 20:33:06 +08:00
|
|
|
Mat src = _src.getMat();
|
2013-12-21 00:10:43 +08:00
|
|
|
if( _mask.empty() )
|
2015-12-25 20:33:06 +08:00
|
|
|
src.convertTo( _dst, rtype, scale, shift );
|
2010-05-12 01:44:00 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
Mat temp;
|
|
|
|
src.convertTo( temp, rtype, scale, shift );
|
2015-12-25 20:33:06 +08:00
|
|
|
temp.copyTo( _dst, _mask );
|
2010-05-12 01:44:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CV_IMPL void
|
|
|
|
cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 )
|
|
|
|
{
|
|
|
|
void* dptrs[] = { dstarr0, dstarr1, dstarr2, dstarr3 };
|
|
|
|
cv::Mat src = cv::cvarrToMat(srcarr);
|
|
|
|
int i, j, nz = 0;
|
|
|
|
for( i = 0; i < 4; i++ )
|
|
|
|
nz += dptrs[i] != 0;
|
|
|
|
CV_Assert( nz > 0 );
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector<cv::Mat> dvec(nz);
|
|
|
|
std::vector<int> pairs(nz*2);
|
2010-05-12 01:44:00 +08:00
|
|
|
|
|
|
|
for( i = j = 0; i < 4; i++ )
|
|
|
|
{
|
|
|
|
if( dptrs[i] != 0 )
|
|
|
|
{
|
|
|
|
dvec[j] = cv::cvarrToMat(dptrs[i]);
|
2012-04-16 07:19:57 +08:00
|
|
|
CV_Assert( dvec[j].size() == src.size() );
|
|
|
|
CV_Assert( dvec[j].depth() == src.depth() );
|
|
|
|
CV_Assert( dvec[j].channels() == 1 );
|
|
|
|
CV_Assert( i < src.channels() );
|
2010-05-12 01:44:00 +08:00
|
|
|
pairs[j*2] = i;
|
|
|
|
pairs[j*2+1] = j;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if( nz == src.channels() )
|
|
|
|
cv::split( src, dvec );
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cv::mixChannels( &src, 1, &dvec[0], nz, &pairs[0], nz );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CV_IMPL void
|
|
|
|
cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2,
|
|
|
|
const void* srcarr3, void* dstarr )
|
|
|
|
{
|
|
|
|
const void* sptrs[] = { srcarr0, srcarr1, srcarr2, srcarr3 };
|
|
|
|
cv::Mat dst = cv::cvarrToMat(dstarr);
|
|
|
|
int i, j, nz = 0;
|
|
|
|
for( i = 0; i < 4; i++ )
|
|
|
|
nz += sptrs[i] != 0;
|
|
|
|
CV_Assert( nz > 0 );
|
2013-02-25 00:14:01 +08:00
|
|
|
std::vector<cv::Mat> svec(nz);
|
|
|
|
std::vector<int> pairs(nz*2);
|
2010-05-12 01:44:00 +08:00
|
|
|
|
|
|
|
for( i = j = 0; i < 4; i++ )
|
|
|
|
{
|
|
|
|
if( sptrs[i] != 0 )
|
|
|
|
{
|
|
|
|
svec[j] = cv::cvarrToMat(sptrs[i]);
|
2010-10-20 20:33:57 +08:00
|
|
|
CV_Assert( svec[j].size == dst.size &&
|
2010-05-12 01:44:00 +08:00
|
|
|
svec[j].depth() == dst.depth() &&
|
|
|
|
svec[j].channels() == 1 && i < dst.channels() );
|
|
|
|
pairs[j*2] = j;
|
|
|
|
pairs[j*2+1] = i;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if( nz == dst.channels() )
|
|
|
|
cv::merge( svec, dst );
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cv::mixChannels( &svec[0], nz, &dst, 1, &pairs[0], nz );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CV_IMPL void
|
|
|
|
cvMixChannels( const CvArr** src, int src_count,
|
|
|
|
CvArr** dst, int dst_count,
|
|
|
|
const int* from_to, int pair_count )
|
|
|
|
{
|
2013-01-20 04:58:51 +08:00
|
|
|
cv::AutoBuffer<cv::Mat> buf(src_count + dst_count);
|
2010-05-12 01:44:00 +08:00
|
|
|
|
|
|
|
int i;
|
|
|
|
for( i = 0; i < src_count; i++ )
|
|
|
|
buf[i] = cv::cvarrToMat(src[i]);
|
|
|
|
for( i = 0; i < dst_count; i++ )
|
|
|
|
buf[i+src_count] = cv::cvarrToMat(dst[i]);
|
|
|
|
cv::mixChannels(&buf[0], src_count, &buf[src_count], dst_count, from_to, pair_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
CV_IMPL void
|
|
|
|
cvConvertScaleAbs( const void* srcarr, void* dstarr,
|
|
|
|
double scale, double shift )
|
|
|
|
{
|
|
|
|
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
|
2010-10-20 20:33:57 +08:00
|
|
|
CV_Assert( src.size == dst.size && dst.type() == CV_8UC(src.channels()));
|
2010-05-12 01:44:00 +08:00
|
|
|
cv::convertScaleAbs( src, dst, scale, shift );
|
|
|
|
}
|
|
|
|
|
|
|
|
CV_IMPL void
|
|
|
|
cvConvertScale( const void* srcarr, void* dstarr,
|
|
|
|
double scale, double shift )
|
|
|
|
{
|
|
|
|
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
|
2012-06-09 23:00:04 +08:00
|
|
|
|
2010-10-20 20:33:57 +08:00
|
|
|
CV_Assert( src.size == dst.size && src.channels() == dst.channels() );
|
2010-05-12 01:44:00 +08:00
|
|
|
src.convertTo(dst, dst.type(), scale, shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
CV_IMPL void cvLUT( const void* srcarr, void* dstarr, const void* lutarr )
|
|
|
|
{
|
|
|
|
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), lut = cv::cvarrToMat(lutarr);
|
|
|
|
|
|
|
|
CV_Assert( dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels()) );
|
|
|
|
cv::LUT( src, lut, dst );
|
|
|
|
}
|
|
|
|
|
|
|
|
CV_IMPL void cvNormalize( const CvArr* srcarr, CvArr* dstarr,
|
|
|
|
double a, double b, int norm_type, const CvArr* maskarr )
|
|
|
|
{
|
|
|
|
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
|
|
|
|
if( maskarr )
|
|
|
|
mask = cv::cvarrToMat(maskarr);
|
|
|
|
CV_Assert( dst.size() == src.size() && src.channels() == dst.channels() );
|
|
|
|
cv::normalize( src, dst, a, b, norm_type, dst.type(), mask );
|
|
|
|
}
|
|
|
|
|
2013-08-21 21:26:54 +08:00
|
|
|
/* End of file. */
|