From f6b3bc01e51b69ccbbad69bd55f011c58eddd00e Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 29 Dec 2014 20:32:11 +0300 Subject: [PATCH 1/2] addWeighted --- modules/core/src/arithm.cpp | 93 ++++++++++++++++++++++++++++++++++++- 1 file changed, 92 insertions(+), 1 deletion(-) diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 68c8979a8d..2f377350e8 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -2772,7 +2772,98 @@ struct AddWeighted_SIMD } }; -#if CV_NEON +#if CV_SSE2 + +template <> +struct AddWeighted_SIMD +{ + AddWeighted_SIMD() + { + haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); + } + + int operator() (const schar * src1, const schar * src2, schar * dst, int width, float alpha, float beta, float gamma) const + { + int x = 0; + + if (!haveSSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_alpha = _mm_set1_ps(alpha), v_beta = _mm_set1_ps(beta), + v_gamma = _mm_set1_ps(gamma); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_src1 = _mm_loadl_epi64((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadl_epi64((const __m128i *)(src2 + x)); + + __m128i v_src1_p = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src1), 8); + __m128i v_src2_p = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src2), 8); + + __m128 v_dstf0 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src1_p), 16)), v_alpha); + v_dstf0 = _mm_add_ps(_mm_add_ps(v_dstf0, v_gamma), + _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2_p), 16)), v_beta)); + + __m128 v_dstf1 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src1_p), 16)), v_alpha); + v_dstf1 = _mm_add_ps(_mm_add_ps(v_dstf1, v_gamma), + _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2_p), 16)), v_beta)); + + __m128i v_dst16 = _mm_packs_epi32(_mm_cvtps_epi32(v_dstf0), + _mm_cvtps_epi32(v_dstf1)); + + _mm_storel_epi64((__m128i *)(dst + x), _mm_packs_epi16(v_dst16, v_zero)); + } + + return x; + } + + bool haveSSE2; +}; + +template <> +struct AddWeighted_SIMD +{ + AddWeighted_SIMD() + { + haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); + } + + int operator() (const short * src1, const short * src2, short * dst, int width, float alpha, float beta, float gamma) const + { + int x = 0; + + if (!haveSSE2) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_alpha = _mm_set1_ps(alpha), v_beta = _mm_set1_ps(beta), + v_gamma = _mm_set1_ps(gamma); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128 v_dstf0 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src1), 16)), v_alpha); + v_dstf0 = _mm_add_ps(_mm_add_ps(v_dstf0, v_gamma), + _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src2), 16)), v_beta)); + + __m128 v_dstf1 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src1), 16)), v_alpha); + v_dstf1 = _mm_add_ps(_mm_add_ps(v_dstf1, v_gamma), + _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src2), 16)), v_beta)); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_packs_epi32(_mm_cvtps_epi32(v_dstf0), + _mm_cvtps_epi32(v_dstf1))); + } + + return x; + } + + bool haveSSE2; +}; + +#elif CV_NEON template <> struct AddWeighted_SIMD From 60f2f7898af95b3d815715e4891ba97bb1b9782e Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 30 Dec 2014 00:20:45 +0300 Subject: [PATCH 2/2] SSE4.1 addWeighted fo 16u --- modules/core/src/arithm.cpp | 46 +++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 2f377350e8..40bb097711 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -2863,6 +2863,52 @@ struct AddWeighted_SIMD bool haveSSE2; }; +#if CV_SSE4_1 + +template <> +struct AddWeighted_SIMD +{ + AddWeighted_SIMD() + { + haveSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, float alpha, float beta, float gamma) const + { + int x = 0; + + if (!haveSSE4_1) + return x; + + __m128i v_zero = _mm_setzero_si128(); + __m128 v_alpha = _mm_set1_ps(alpha), v_beta = _mm_set1_ps(beta), + v_gamma = _mm_set1_ps(gamma); + + for( ; x <= width - 8; x += 8 ) + { + __m128i v_src1 = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_src2 = _mm_loadu_si128((const __m128i *)(src2 + x)); + + __m128 v_dstf0 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src1, v_zero)), v_alpha); + v_dstf0 = _mm_add_ps(_mm_add_ps(v_dstf0, v_gamma), + _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src2, v_zero)), v_beta)); + + __m128 v_dstf1 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src1, v_zero)), v_alpha); + v_dstf1 = _mm_add_ps(_mm_add_ps(v_dstf1, v_gamma), + _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src2, v_zero)), v_beta)); + + _mm_storeu_si128((__m128i *)(dst + x), _mm_packus_epi32(_mm_cvtps_epi32(v_dstf0), + _mm_cvtps_epi32(v_dstf1))); + } + + return x; + } + + bool haveSSE4_1; +}; + +#endif + #elif CV_NEON template <>