opencv/modules/core/src/sum.simd.hpp
HAN Liutong 0dd7769bb1
Merge pull request #23980 from hanliutong:rewrite-core
Rewrite Universal Intrinsic code by using new API: Core module. #23980

The goal of this PR is to match and modify all SIMD code blocks guarded by `CV_SIMD` macro in the `opencv/modules/core` folder and rewrite them by using the new Universal Intrinsic API.

The patch is almost auto-generated by using the [rewriter](https://github.com/hanliutong/rewriter), related PR #23885.

Most of the files have been rewritten, but I marked this PR as draft because, the `CV_SIMD` macro also exists in the following files, and the reasons why they are not rewrited are:

1. ~~code design for fixed-size SIMD (v_int16x8, v_float32x4, etc.), need to manually rewrite.~~ Rewrited
- ./modules/core/src/stat.simd.hpp
- ./modules/core/src/matrix_transform.cpp
- ./modules/core/src/matmul.simd.hpp

2. Vector types are wrapped in other class/struct, that are not supported by the compiler in variable-length backends. Can not be rewrited directly.
- ./modules/core/src/mathfuncs_core.simd.hpp 
```cpp
struct v_atan_f32
{
    explicit v_atan_f32(const float& scale)
    {
...
    }

    v_float32 compute(const v_float32& y, const v_float32& x)
    {
...
    }

...
    v_float32 val90; // sizeless type can not used in a class
    v_float32 val180;
    v_float32 val360;
    v_float32 s;
};
```

3. The API interface does not support/does not match

- ./modules/core/src/norm.cpp 
Use `v_popcount`, ~~waiting for #23966~~ Fixed
- ./modules/core/src/has_non_zero.simd.hpp
Use illegal Universal Intrinsic API: For float type, there is no logical operation `|`. Further discussion needed

```cpp
/** @brief Bitwise OR

Only for integer types. */
template<typename _Tp, int n> CV_INLINE v_reg<_Tp, n> operator|(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b);
template<typename _Tp, int n> CV_INLINE v_reg<_Tp, n>& operator|=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b);
```

```cpp
#if CV_SIMD
    typedef v_float32 v_type;
    const v_type v_zero = vx_setzero_f32();
    constexpr const int unrollCount = 8;
    int step = v_type::nlanes * unrollCount;
    int len0 = len & -step;
    const float* srcSimdEnd = src+len0;

    int countSIMD = static_cast<int>((srcSimdEnd-src)/step);
    while(!res && countSIMD--)
    {
        v_type v0 = vx_load(src);
        src += v_type::nlanes;
        v_type v1 = vx_load(src);
        src += v_type::nlanes;
....
        src += v_type::nlanes;
        v0 |= v1; //Illegal ?
....
        //res = v_check_any(((v0 | v4) != v_zero));//beware : (NaN != 0) returns "false" since != is mapped to _CMP_NEQ_OQ and not _CMP_NEQ_UQ
        res = !v_check_all(((v0 | v4) == v_zero));
    }

    v_cleanup();
#endif
```

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [ ] I agree to contribute to the project under Apache 2 License.
- [ ] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [ ] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake
2023-08-11 08:33:33 +03:00

453 lines
13 KiB
C++

// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "stat.hpp"
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
SumFunc getSumFunc(int depth);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
template <typename T, typename ST>
struct Sum_SIMD
{
int operator () (const T *, const uchar *, ST *, int, int) const
{
return 0;
}
};
#if (CV_SIMD || CV_SIMD_SCALABLE)
template <>
struct Sum_SIMD<uchar, int>
{
int operator () (const uchar * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
len *= cn;
int x = 0;
v_uint32 v_sum = vx_setzero_u32();
int len0 = len & -VTraits<v_uint8>::vlanes();
while (x < len0)
{
const int len_tmp = min(x + 256*VTraits<v_uint16>::vlanes(), len0);
v_uint16 v_sum16 = vx_setzero_u16();
for (; x < len_tmp; x += VTraits<v_uint8>::vlanes())
{
v_uint16 v_src0, v_src1;
v_expand(vx_load(src0 + x), v_src0, v_src1);
v_sum16 = v_add(v_sum16, v_add(v_src0, v_src1));
}
v_uint32 v_half0, v_half1;
v_expand(v_sum16, v_half0, v_half1);
v_sum = v_add(v_sum, v_add(v_half0, v_half1));
}
if (x <= len - VTraits<v_uint16>::vlanes())
{
v_uint32 v_half0, v_half1;
v_expand(vx_load_expand(src0 + x), v_half0, v_half1);
v_sum = v_add(v_sum, v_add(v_half0, v_half1));
x += VTraits<v_uint16>::vlanes();
}
if (x <= len - VTraits<v_uint32>::vlanes())
{
v_sum = v_add(v_sum, vx_load_expand_q(src0 + x));
x += VTraits<v_uint32>::vlanes();
}
if (cn == 1)
*dst += v_reduce_sum(v_sum);
else
{
uint32_t CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[VTraits<v_uint32>::max_nlanes];
v_store_aligned(ar, v_sum);
for (int i = 0; i < VTraits<v_uint32>::vlanes(); ++i)
dst[i % cn] += ar[i];
}
v_cleanup();
return x / cn;
}
};
template <>
struct Sum_SIMD<schar, int>
{
int operator () (const schar * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
len *= cn;
int x = 0;
v_int32 v_sum = vx_setzero_s32();
int len0 = len & -VTraits<v_int8>::vlanes();
while (x < len0)
{
const int len_tmp = min(x + 256*VTraits<v_int16>::vlanes(), len0);
v_int16 v_sum16 = vx_setzero_s16();
for (; x < len_tmp; x += VTraits<v_int8>::vlanes())
{
v_int16 v_src0, v_src1;
v_expand(vx_load(src0 + x), v_src0, v_src1);
v_sum16 = v_add(v_sum16, v_add(v_src0, v_src1));
}
v_int32 v_half0, v_half1;
v_expand(v_sum16, v_half0, v_half1);
v_sum = v_add(v_sum, v_add(v_half0, v_half1));
}
if (x <= len - VTraits<v_int16>::vlanes())
{
v_int32 v_half0, v_half1;
v_expand(vx_load_expand(src0 + x), v_half0, v_half1);
v_sum = v_add(v_sum, v_add(v_half0, v_half1));
x += VTraits<v_int16>::vlanes();
}
if (x <= len - VTraits<v_int32>::vlanes())
{
v_sum = v_add(v_sum, vx_load_expand_q(src0 + x));
x += VTraits<v_int32>::vlanes();
}
if (cn == 1)
*dst += v_reduce_sum(v_sum);
else
{
int32_t CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[VTraits<v_int32>::max_nlanes];
v_store_aligned(ar, v_sum);
for (int i = 0; i < VTraits<v_int32>::vlanes(); ++i)
dst[i % cn] += ar[i];
}
v_cleanup();
return x / cn;
}
};
template <>
struct Sum_SIMD<ushort, int>
{
int operator () (const ushort * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
len *= cn;
int x = 0;
v_uint32 v_sum = vx_setzero_u32();
for (; x <= len - VTraits<v_uint16>::vlanes(); x += VTraits<v_uint16>::vlanes())
{
v_uint32 v_src0, v_src1;
v_expand(vx_load(src0 + x), v_src0, v_src1);
v_sum = v_add(v_sum, v_add(v_src0, v_src1));
}
if (x <= len - VTraits<v_uint32>::vlanes())
{
v_sum = v_add(v_sum, vx_load_expand(src0 + x));
x += VTraits<v_uint32>::vlanes();
}
if (cn == 1)
*dst += v_reduce_sum(v_sum);
else
{
uint32_t CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[VTraits<v_uint32>::max_nlanes];
v_store_aligned(ar, v_sum);
for (int i = 0; i < VTraits<v_uint32>::vlanes(); ++i)
dst[i % cn] += ar[i];
}
v_cleanup();
return x / cn;
}
};
template <>
struct Sum_SIMD<short, int>
{
int operator () (const short * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
len *= cn;
int x = 0;
v_int32 v_sum = vx_setzero_s32();
for (; x <= len - VTraits<v_int16>::vlanes(); x += VTraits<v_int16>::vlanes())
{
v_int32 v_src0, v_src1;
v_expand(vx_load(src0 + x), v_src0, v_src1);
v_sum = v_add(v_sum, v_add(v_src0, v_src1));
}
if (x <= len - VTraits<v_int32>::vlanes())
{
v_sum = v_add(v_sum, vx_load_expand(src0 + x));
x += VTraits<v_int32>::vlanes();
}
if (cn == 1)
*dst += v_reduce_sum(v_sum);
else
{
int32_t CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[VTraits<v_int32>::max_nlanes];
v_store_aligned(ar, v_sum);
for (int i = 0; i < VTraits<v_int32>::vlanes(); ++i)
dst[i % cn] += ar[i];
}
v_cleanup();
return x / cn;
}
};
#if (CV_SIMD_64F || CV_SIMD_SCALABLE_64F)
template <>
struct Sum_SIMD<int, double>
{
int operator () (const int * src0, const uchar * mask, double * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
len *= cn;
int x = 0;
v_float64 v_sum0 = vx_setzero_f64();
v_float64 v_sum1 = vx_setzero_f64();
for (; x <= len - 2 * VTraits<v_int32>::vlanes(); x += 2 * VTraits<v_int32>::vlanes())
{
v_int32 v_src0 = vx_load(src0 + x);
v_int32 v_src1 = vx_load(src0 + x + VTraits<v_int32>::vlanes());
v_sum0 = v_add(v_sum0, v_add(v_cvt_f64(v_src0), v_cvt_f64(v_src1)));
v_sum1 = v_add(v_sum1, v_add(v_cvt_f64_high(v_src0), v_cvt_f64_high(v_src1)));
}
#if CV_SIMD256 || CV_SIMD512
double CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[VTraits<v_float64>::max_nlanes];
v_store_aligned(ar, v_add(v_sum0, v_sum1));
for (int i = 0; i < VTraits<v_float64>::vlanes(); ++i)
dst[i % cn] += ar[i];
#else
double CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[2 * VTraits<v_float64>::max_nlanes];
v_store_aligned(ar, v_sum0);
v_store_aligned(ar + VTraits<v_float64>::vlanes(), v_sum1);
for (int i = 0; i < 2 * VTraits<v_float64>::vlanes(); ++i)
dst[i % cn] += ar[i];
#endif
v_cleanup();
return x / cn;
}
};
template <>
struct Sum_SIMD<float, double>
{
int operator () (const float * src0, const uchar * mask, double * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0;
len *= cn;
int x = 0;
v_float64 v_sum0 = vx_setzero_f64();
v_float64 v_sum1 = vx_setzero_f64();
for (; x <= len - 2 * VTraits<v_float32>::vlanes(); x += 2 * VTraits<v_float32>::vlanes())
{
v_float32 v_src0 = vx_load(src0 + x);
v_float32 v_src1 = vx_load(src0 + x + VTraits<v_float32>::vlanes());
v_sum0 = v_add(v_sum0, v_add(v_cvt_f64(v_src0), v_cvt_f64(v_src1)));
v_sum1 = v_add(v_sum1, v_add(v_cvt_f64_high(v_src0), v_cvt_f64_high(v_src1)));
}
#if CV_SIMD256 || CV_SIMD512
double CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[VTraits<v_float64>::max_nlanes];
v_store_aligned(ar, v_add(v_sum0, v_sum1));
for (int i = 0; i < VTraits<v_float64>::vlanes(); ++i)
dst[i % cn] += ar[i];
#else
double CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[2 * VTraits<v_float64>::max_nlanes];
v_store_aligned(ar, v_sum0);
v_store_aligned(ar + VTraits<v_float64>::vlanes(), v_sum1);
for (int i = 0; i < 2 * VTraits<v_float64>::vlanes(); ++i)
dst[i % cn] += ar[i];
#endif
v_cleanup();
return x / cn;
}
};
#endif
#endif
template<typename T, typename ST>
static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
{
const T* src = src0;
if( !mask )
{
Sum_SIMD<T, ST> vop;
int i = vop(src0, mask, dst, len, cn), k = cn % 4;
src += i * cn;
if( k == 1 )
{
ST s0 = dst[0];
#if CV_ENABLE_UNROLLED
for(; i <= len - 4; i += 4, src += cn*4 )
s0 += src[0] + src[cn] + src[cn*2] + src[cn*3];
#endif
for( ; i < len; i++, src += cn )
s0 += src[0];
dst[0] = s0;
}
else if( k == 2 )
{
ST s0 = dst[0], s1 = dst[1];
for( ; i < len; i++, src += cn )
{
s0 += src[0];
s1 += src[1];
}
dst[0] = s0;
dst[1] = s1;
}
else if( k == 3 )
{
ST s0 = dst[0], s1 = dst[1], s2 = dst[2];
for( ; i < len; i++, src += cn )
{
s0 += src[0];
s1 += src[1];
s2 += src[2];
}
dst[0] = s0;
dst[1] = s1;
dst[2] = s2;
}
for( ; k < cn; k += 4 )
{
src = src0 + i*cn + k;
ST s0 = dst[k], s1 = dst[k+1], s2 = dst[k+2], s3 = dst[k+3];
for( ; i < len; i++, src += cn )
{
s0 += src[0]; s1 += src[1];
s2 += src[2]; s3 += src[3];
}
dst[k] = s0;
dst[k+1] = s1;
dst[k+2] = s2;
dst[k+3] = s3;
}
return len;
}
int i, nzm = 0;
if( cn == 1 )
{
ST s = dst[0];
for( i = 0; i < len; i++ )
if( mask[i] )
{
s += src[i];
nzm++;
}
dst[0] = s;
}
else if( cn == 3 )
{
ST s0 = dst[0], s1 = dst[1], s2 = dst[2];
for( i = 0; i < len; i++, src += 3 )
if( mask[i] )
{
s0 += src[0];
s1 += src[1];
s2 += src[2];
nzm++;
}
dst[0] = s0;
dst[1] = s1;
dst[2] = s2;
}
else
{
for( i = 0; i < len; i++, src += cn )
if( mask[i] )
{
int k = 0;
#if CV_ENABLE_UNROLLED
for( ; k <= cn - 4; k += 4 )
{
ST s0, s1;
s0 = dst[k] + src[k];
s1 = dst[k+1] + src[k+1];
dst[k] = s0; dst[k+1] = s1;
s0 = dst[k+2] + src[k+2];
s1 = dst[k+3] + src[k+3];
dst[k+2] = s0; dst[k+3] = s1;
}
#endif
for( ; k < cn; k++ )
dst[k] += src[k];
nzm++;
}
}
return nzm;
}
static int sum8u( const uchar* src, const uchar* mask, int* dst, int len, int cn )
{ CV_INSTRUMENT_REGION(); return sum_(src, mask, dst, len, cn); }
static int sum8s( const schar* src, const uchar* mask, int* dst, int len, int cn )
{ CV_INSTRUMENT_REGION(); return sum_(src, mask, dst, len, cn); }
static int sum16u( const ushort* src, const uchar* mask, int* dst, int len, int cn )
{ CV_INSTRUMENT_REGION(); return sum_(src, mask, dst, len, cn); }
static int sum16s( const short* src, const uchar* mask, int* dst, int len, int cn )
{ CV_INSTRUMENT_REGION(); return sum_(src, mask, dst, len, cn); }
static int sum32s( const int* src, const uchar* mask, double* dst, int len, int cn )
{ CV_INSTRUMENT_REGION(); return sum_(src, mask, dst, len, cn); }
static int sum32f( const float* src, const uchar* mask, double* dst, int len, int cn )
{ CV_INSTRUMENT_REGION(); return sum_(src, mask, dst, len, cn); }
static int sum64f( const double* src, const uchar* mask, double* dst, int len, int cn )
{ CV_INSTRUMENT_REGION(); return sum_(src, mask, dst, len, cn); }
SumFunc getSumFunc(int depth)
{
static SumFunc sumTab[] =
{
(SumFunc)GET_OPTIMIZED(sum8u), (SumFunc)sum8s,
(SumFunc)sum16u, (SumFunc)sum16s,
(SumFunc)sum32s,
(SumFunc)GET_OPTIMIZED(sum32f), (SumFunc)sum64f,
0
};
return sumTab[depth];
}
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace