From 4b5f948307ce7e1a579b8e998f10d13f35cb271b Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Tue, 7 Aug 2012 17:59:52 +0400 Subject: [PATCH] added SSE2-optimized 3x3 invert by Grigoriy Frolov --- modules/core/src/lapack.cpp | 76 ++++++++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 17 deletions(-) diff --git a/modules/core/src/lapack.cpp b/modules/core/src/lapack.cpp index 1c76df6413..13cbdd14c6 100644 --- a/modules/core/src/lapack.cpp +++ b/modules/core/src/lapack.cpp @@ -1024,7 +1024,7 @@ double cv::invert( InputArray _src, OutputArray _dst, int method ) __m128 s0 = _mm_or_ps(t0, t1); __m128 det =_mm_set1_ps((float)d); s0 = _mm_mul_ps(s0, det); - const uchar CV_DECL_ALIGNED(16) inv[16] = {0,0,0,0,0,0,0,0x80,0,0,0,0x80,0,0,0,0}; + static const uchar CV_DECL_ALIGNED(16) inv[16] = {0,0,0,0,0,0,0,0x80,0,0,0,0x80,0,0,0,0}; __m128 pattern = _mm_load_ps((const float*)inv); s0 = _mm_xor_ps(s0, pattern);//==-1*s0 s0 = _mm_shuffle_ps(s0, s0, _MM_SHUFFLE(0,2,1,3)); @@ -1064,7 +1064,7 @@ double cv::invert( InputArray _src, OutputArray _dst, int method ) __m128d det = _mm_load1_pd((const double*)&d); sm = _mm_mul_pd(sm, det); - uchar CV_DECL_ALIGNED(16) inv[8] = {0,0,0,0,0,0,0,0x80}; + static const uchar CV_DECL_ALIGNED(16) inv[8] = {0,0,0,0,0,0,0,0x80}; __m128d pattern = _mm_load1_pd((double*)inv); ss = _mm_mul_pd(ss, det); ss = _mm_xor_pd(ss, pattern);//==-1*ss @@ -1097,24 +1097,66 @@ double cv::invert( InputArray _src, OutputArray _dst, int method ) double d = det3(Sf); if( d != 0. ) { + float CV_DECL_ALIGNED(16) t[12]; + result = true; d = 1./d; - float t[9]; - t[0] = (float)(((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d); - t[1] = (float)(((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d); - t[2] = (float)(((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d); - - t[3] = (float)(((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d); - t[4] = (float)(((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d); - t[5] = (float)(((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d); - - t[6] = (float)(((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d); - t[7] = (float)(((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d); - t[8] = (float)(((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d); + #if CV_SSE2 + if(USE_SSE2) + { + __m128 det =_mm_set1_ps((float)d); + __m128 s0 = _mm_loadu_ps((const float*)srcdata);//s0 = Sf(0,0) Sf(0,1) Sf(0,2) *** + __m128 s1 = _mm_loadu_ps((const float*)(srcdata+srcstep));//s1 = Sf(1,0) Sf(1,1) Sf(1,2) *** + __m128 s2 = _mm_set_ps(0.f, Sf(2,2), Sf(2,1), Sf(2,0)); //s2 = Sf(2,0) Sf(2,1) Sf(2,2) *** - Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2]; - Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5]; - Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8]; + __m128 r0 = _mm_shuffle_ps(s1,s1,_MM_SHUFFLE(3,0,2,1)); //r0 = Sf(1,1) Sf(1,2) Sf(1,0) *** + __m128 r1 = _mm_shuffle_ps(s2,s2,_MM_SHUFFLE(3,1,0,2)); //r1 = Sf(2,2) Sf(2,0) Sf(2,1) *** + __m128 r2 = _mm_shuffle_ps(s2,s2,_MM_SHUFFLE(3,0,2,1)); //r2 = Sf(2,1) Sf(2,2) Sf(2,0) *** + + __m128 t0 = _mm_mul_ps(s0, r0);//t0 = Sf(0,0)*Sf(1,1) Sf(0,1)*Sf(1,2) Sf(0,2)*Sf(1,0) *** + __m128 t1 = _mm_mul_ps(s0, r1);//t1 = Sf(0,0)*Sf(2,2) Sf(0,1)*Sf(2,0) Sf(0,2)*Sf(2,1) *** + __m128 t2 = _mm_mul_ps(s1, r2);//t2 = Sf(1,0)*Sf(2,1) Sf(1,1)*Sf(2,2) Sf(1,2)*Sf(2,0) *** + + __m128 r3 = _mm_shuffle_ps(s0,s0,_MM_SHUFFLE(3,0,2,1));//r3 = Sf(0,1) Sf(0,2) Sf(0,0) *** + __m128 r4 = _mm_shuffle_ps(s0,s0,_MM_SHUFFLE(3,1,0,2));//r4 = Sf(0,2) Sf(0,0) Sf(0,1) *** + + __m128 t00 = _mm_mul_ps(s1, r3);//t00 = Sf(1,0)*Sf(0,1) Sf(1,1)*Sf(0,2) Sf(1,2)*Sf(0,0) *** + __m128 t11 = _mm_mul_ps(s2, r4);//t11 = Sf(2,0)*Sf(0,2) Sf(2,1)*Sf(0,0) Sf(2,2)*Sf(0,1) *** + __m128 t22 = _mm_mul_ps(s2, r0);//t22 = Sf(2,0)*Sf(1,1) Sf(2,1)*Sf(1,2) Sf(2,2)*Sf(1,0) *** + + t0 = _mm_mul_ps(_mm_sub_ps(t0,t00), det);//Sf(0,0)*Sf(1,1) Sf(0,1)*Sf(1,2) Sf(0,2)*Sf(1,0) *** + //-Sf(1,0)*Sf(0,1) -Sf(1,1)*Sf(0,2) -Sf(1,2)*Sf(0,0) + t1 = _mm_mul_ps(_mm_sub_ps(t1,t11), det);//Sf(0,0)*Sf(2,2) Sf(0,1)*Sf(2,0) Sf(0,2)*Sf(2,1) *** + //-Sf(2,0)*Sf(0,2) -Sf(2,1)*Sf(0,0) -Sf(2,2)*Sf(0,1) + t2 = _mm_mul_ps(_mm_sub_ps(t2,t22), det);//Sf(1,0)*Sf(2,1) Sf(1,1)*Sf(2,2) Sf(1,2)*Sf(2,0) *** + //-Sf(2,0)*Sf(1,1) -Sf(2,1)*Sf(1,2) -Sf(2,2)*Sf(1,0) + _mm_store_ps(t, t0); + _mm_store_ps(t+4, t1); + _mm_store_ps(t+8, t2); + + Df(0,0) = t[9]; Df(0,1) = t[6]; Df(0,2) = t[1]; + Df(1,0) = t[10]; Df(1,1) = t[4]; Df(1,2) = t[2]; + Df(2,0) = t[8]; Df(2,1) = t[5]; Df(2,2) = t[0]; + } + else + #endif + { + t[0] = (float)(((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d); + t[1] = (float)(((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d); + t[2] = (float)(((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d); + + t[3] = (float)(((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d); + t[4] = (float)(((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d); + t[5] = (float)(((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d); + + t[6] = (float)(((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d); + t[7] = (float)(((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d); + t[8] = (float)(((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d); + + Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2]; + Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5]; + Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8]; + } } } else