diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 5dae935cc7..5ef238e46f 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -2035,6 +2035,910 @@ struct cvtScale_SIMD } }; +// from ushort + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const ushort * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + +// from short + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const short * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const short * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + +// from int + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const int * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const int * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(src + x)); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + v_src = _mm_loadu_si128((__m128i const *)(src + x + 4)); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + +// from float + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, uchar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, schar * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst, v_zero)); + } + + return x; + } +}; + +#if CV_SSE4_1 + +template <> +struct cvtScale_SIMD +{ + cvtScale_SIMD() + { + haveSSE = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator () (const float * src, ushort * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!haveSSE) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } + + bool haveSSE; +}; + +#endif + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, short * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + __m128i v_dst = _mm_packs_epi32(_mm_cvtps_epi32(v_dst_0), + _mm_cvtps_epi32(v_dst_1)); + _mm_storeu_si128((__m128i *)(dst + x), v_dst); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, int * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_cvtps_epi32(v_dst_0)); + _mm_storeu_si128((__m128i *)(dst + x + 4), _mm_cvtps_epi32(v_dst_1)); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, float * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + _mm_storeu_ps(dst + x, v_dst_0); + _mm_storeu_ps(dst + x + 4, v_dst_1); + } + + return x; + } +}; + +template <> +struct cvtScale_SIMD +{ + int operator () (const float * src, double * dst, int width, float scale, float shift) const + { + int x = 0; + + if (!USE_SSE2) + return x; + + __m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift); + + for ( ; x <= width - 8; x += 8) + { + __m128 v_src = _mm_loadu_ps(src + x); + __m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + v_src = _mm_loadu_ps(src + x + 4); + __m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift); + + _mm_storeu_pd(dst + x, _mm_cvtps_pd(v_dst_0)); + _mm_storeu_pd(dst + x + 4, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_0), 16)))); + + _mm_storeu_pd(dst + x + 8, _mm_cvtps_pd(v_dst_1)); + _mm_storeu_pd(dst + x + 12, _mm_cvtps_pd(_mm_castsi128_ps( + _mm_srli_si128(_mm_castps_si128(v_dst_1), 16)))); + } + + return x; + } +}; + #elif CV_NEON // from uchar