From 8facf61bed81570ed0c93302a06dba5adc9c36fd Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 28 Jan 2020 15:20:37 +0300 Subject: [PATCH 1/8] objdetect(QR): don't introduce deprecated API, compatibility code --- .../objdetect/include/opencv2/objdetect.hpp | 60 ++++++++++++------- modules/objdetect/src/qrcode.cpp | 20 ------- 2 files changed, 37 insertions(+), 43 deletions(-) diff --git a/modules/objdetect/include/opencv2/objdetect.hpp b/modules/objdetect/include/opencv2/objdetect.hpp index 5bd6a11002..ea7b1ac801 100644 --- a/modules/objdetect/include/opencv2/objdetect.hpp +++ b/modules/objdetect/include/opencv2/objdetect.hpp @@ -743,6 +743,43 @@ public: OutputArrayOfArrays straight_qrcode = noArray() ) const; + +#ifndef CV_DOXYGEN // COMPATIBILITY + inline bool decodeMulti( + InputArray img, InputArray points, + CV_OUT std::vector& decoded_info, + OutputArrayOfArrays straight_qrcode = noArray() + ) const + { + std::vector decoded_info_; + bool res = decodeMulti(img, points, decoded_info_, straight_qrcode); + decoded_info.resize(decoded_info_.size()); + for (size_t i = 0; i < decoded_info.size(); ++i) + { + cv::String s; std::swap(s, decoded_info_[i]); + decoded_info[i] = s; + } + return res; + } + + inline bool detectAndDecodeMulti( + InputArray img, CV_OUT std::vector& decoded_info, + OutputArray points = noArray(), + OutputArrayOfArrays straight_qrcode = noArray() + ) const + { + std::vector decoded_info_; + bool res = detectAndDecodeMulti(img, decoded_info_, points, straight_qrcode); + decoded_info.resize(decoded_info_.size()); + for (size_t i = 0; i < decoded_info.size(); ++i) + { + cv::String s; std::swap(s, decoded_info_[i]); + decoded_info[i] = s; + } + return res; + } +#endif + protected: struct Impl; Ptr p; @@ -764,29 +801,6 @@ CV_EXPORTS bool detectQRCode(InputArray in, std::vector &points, double e */ CV_EXPORTS bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray()); -/** @brief Detect QR codes in image and return vector of minimum area of quadrangle that describes QR codes. - @param in Matrix of the type CV_8UC1 containing an image where QR codes are detected. - @param points Output vector of vertices of quadrangles of minimal area that describes QR codes. - @param eps_x Epsilon neighborhood, which allows you to determine the horizontal pattern of the scheme 1:1:3:1:1 according to QR code standard. - @param eps_y Epsilon neighborhood, which allows you to determine the vertical pattern of the scheme 1:1:3:1:1 according to QR code standard. - */ -CV_EXPORTS -bool detectQRCodeMulti( - InputArray in, std::vector &points, - double eps_x = 0.2, double eps_y = 0.1); - -/** @brief Decode QR codes in image and return text that is encrypted in QR code. - @param in Matrix of the type CV_8UC1 containing an image where QR code are detected. - @param points Input vector of vertices of quadrangles of minimal area that describes QR codes. - @param decoded_info vector of String information that is encrypted in QR codes. - @param straight_qrcode vector of Matrixes of the type CV_8UC1 containing an binary straight QR codes. - */ -CV_EXPORTS -bool decodeQRCodeMulti( - InputArray in, InputArray points, - CV_OUT std::vector &decoded_info, - OutputArrayOfArrays straight_qrcode = noArray()); - //! @} objdetect } diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp index 3467803a25..6ceb6d4ee4 100644 --- a/modules/objdetect/src/qrcode.cpp +++ b/modules/objdetect/src/qrcode.cpp @@ -2234,14 +2234,6 @@ bool QRCodeDetector::detectMulti(InputArray in, OutputArray points) const return true; } -bool detectQRCodeMulti(InputArray in, vector< Point > &points, double eps_x, double eps_y) -{ - QRCodeDetector qrdetector; - qrdetector.setEpsX(eps_x); - qrdetector.setEpsY(eps_y); - return qrdetector.detectMulti(in, points); -} - class ParallelDecodeProcess : public ParallelLoopBody { public: @@ -2385,16 +2377,4 @@ bool QRCodeDetector::detectAndDecodeMulti( return ok; } -bool decodeQRCodeMulti( - InputArray in, InputArray points, - vector &decoded_info, OutputArrayOfArrays straight_qrcode) -{ - QRCodeDetector qrcode; - vector info; - bool ok = qrcode.decodeMulti(in, points, info, straight_qrcode); - for (size_t i = 0; i < info.size(); i++) - decoded_info.push_back(info[i]); - return ok; -} - } // namespace From bd531bd82852808f7fa403e3ee159bd62b1c08cc Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 28 Jan 2020 15:16:48 +0200 Subject: [PATCH 2/8] core:vsx fix inline asm constraints generalize constraints to 'wa' for VSX registers --- cmake/checks/cpu_vsx_asm.cpp | 2 +- .../include/opencv2/core/hal/intrin_vsx.hpp | 4 +- .../core/include/opencv2/core/vsx_utils.hpp | 50 ++++++++----------- 3 files changed, 25 insertions(+), 31 deletions(-) diff --git a/cmake/checks/cpu_vsx_asm.cpp b/cmake/checks/cpu_vsx_asm.cpp index bb4c25507e..9c1bf7a946 100644 --- a/cmake/checks/cpu_vsx_asm.cpp +++ b/cmake/checks/cpu_vsx_asm.cpp @@ -16,6 +16,6 @@ int main() { __vector float vf; __vector signed int vi; - __asm__ __volatile__ ("xvcvsxwsp %x0,%x1" : "=wf" (vf) : "wa" (vi)); + __asm__ __volatile__ ("xvcvsxwsp %x0,%x1" : "=wa" (vf) : "wa" (vi)); return 0; } \ No newline at end of file diff --git a/modules/core/include/opencv2/core/hal/intrin_vsx.hpp b/modules/core/include/opencv2/core/hal/intrin_vsx.hpp index bda1d8558f..6e8b439182 100644 --- a/modules/core/include/opencv2/core/hal/intrin_vsx.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_vsx.hpp @@ -1338,7 +1338,7 @@ inline v_float32x4 v_load_expand(const float16_t* ptr) return v_float32x4(vec_extract_fp_from_shorth(vf16)); #elif CV_VSX3 && !defined(CV_COMPILER_VSX_BROKEN_ASM) vec_float4 vf32; - __asm__ __volatile__ ("xvcvhpsp %x0,%x1" : "=wf" (vf32) : "wa" (vec_mergeh(vf16, vf16))); + __asm__ __volatile__ ("xvcvhpsp %x0,%x1" : "=wa" (vf32) : "wa" (vec_mergeh(vf16, vf16))); return v_float32x4(vf32); #else const vec_int4 z = vec_int4_z, delta = vec_int4_sp(0x38000000); @@ -1363,7 +1363,7 @@ inline void v_pack_store(float16_t* ptr, const v_float32x4& v) // fixme: Is there any builtin op or intrinsic that cover "xvcvsphp"? #if CV_VSX3 && !defined(CV_COMPILER_VSX_BROKEN_ASM) vec_ushort8 vf16; - __asm__ __volatile__ ("xvcvsphp %x0,%x1" : "=wa" (vf16) : "wf" (v.val)); + __asm__ __volatile__ ("xvcvsphp %x0,%x1" : "=wa" (vf16) : "wa" (v.val)); vec_st_l8(vec_mergesqe(vf16, vf16), ptr); #else const vec_int4 signmask = vec_int4_sp(0x80000000); diff --git a/modules/core/include/opencv2/core/vsx_utils.hpp b/modules/core/include/opencv2/core/vsx_utils.hpp index d7c7140607..bcc97fe529 100644 --- a/modules/core/include/opencv2/core/vsx_utils.hpp +++ b/modules/core/include/opencv2/core/vsx_utils.hpp @@ -110,9 +110,9 @@ VSX_FINLINE(rt) fnm(const rg& a, const rg& b) { return fn2(a, b); } #if defined(__GNUG__) && !defined(__clang__) // inline asm helper -#define VSX_IMPL_1RG(rt, rto, rg, rgo, opc, fnm) \ -VSX_FINLINE(rt) fnm(const rg& a) \ -{ rt rs; __asm__ __volatile__(#opc" %x0,%x1" : "="#rto (rs) : #rgo (a)); return rs; } +#define VSX_IMPL_1RG(rt, rg, opc, fnm) \ +VSX_FINLINE(rt) fnm(const rg& a) \ +{ rt rs; __asm__ __volatile__(#opc" %x0,%x1" : "=wa" (rs) : "wa" (a)); return rs; } #define VSX_IMPL_1VRG(rt, rg, opc, fnm) \ VSX_FINLINE(rt) fnm(const rg& a) \ @@ -257,44 +257,38 @@ VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp) VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp) // converts word and doubleword to double-precision -#ifdef vec_ctd -# undef vec_ctd -#endif -VSX_IMPL_1RG(vec_double2, wd, vec_int4, wa, xvcvsxwdp, vec_ctdo) -VSX_IMPL_1RG(vec_double2, wd, vec_uint4, wa, xvcvuxwdp, vec_ctdo) -VSX_IMPL_1RG(vec_double2, wd, vec_dword2, wi, xvcvsxddp, vec_ctd) -VSX_IMPL_1RG(vec_double2, wd, vec_udword2, wi, xvcvuxddp, vec_ctd) +#undef vec_ctd +VSX_IMPL_1RG(vec_double2, vec_int4, xvcvsxwdp, vec_ctdo) +VSX_IMPL_1RG(vec_double2, vec_uint4, xvcvuxwdp, vec_ctdo) +VSX_IMPL_1RG(vec_double2, vec_dword2, xvcvsxddp, vec_ctd) +VSX_IMPL_1RG(vec_double2, vec_udword2, xvcvuxddp, vec_ctd) // converts word and doubleword to single-precision #undef vec_ctf -VSX_IMPL_1RG(vec_float4, wf, vec_int4, wa, xvcvsxwsp, vec_ctf) -VSX_IMPL_1RG(vec_float4, wf, vec_uint4, wa, xvcvuxwsp, vec_ctf) -VSX_IMPL_1RG(vec_float4, wf, vec_dword2, wi, xvcvsxdsp, vec_ctfo) -VSX_IMPL_1RG(vec_float4, wf, vec_udword2, wi, xvcvuxdsp, vec_ctfo) +VSX_IMPL_1RG(vec_float4, vec_int4, xvcvsxwsp, vec_ctf) +VSX_IMPL_1RG(vec_float4, vec_uint4, xvcvuxwsp, vec_ctf) +VSX_IMPL_1RG(vec_float4, vec_dword2, xvcvsxdsp, vec_ctfo) +VSX_IMPL_1RG(vec_float4, vec_udword2, xvcvuxdsp, vec_ctfo) // converts single and double precision to signed word #undef vec_cts -VSX_IMPL_1RG(vec_int4, wa, vec_double2, wd, xvcvdpsxws, vec_ctso) -VSX_IMPL_1RG(vec_int4, wa, vec_float4, wf, xvcvspsxws, vec_cts) +VSX_IMPL_1RG(vec_int4, vec_double2, xvcvdpsxws, vec_ctso) +VSX_IMPL_1RG(vec_int4, vec_float4, xvcvspsxws, vec_cts) // converts single and double precision to unsigned word #undef vec_ctu -VSX_IMPL_1RG(vec_uint4, wa, vec_double2, wd, xvcvdpuxws, vec_ctuo) -VSX_IMPL_1RG(vec_uint4, wa, vec_float4, wf, xvcvspuxws, vec_ctu) +VSX_IMPL_1RG(vec_uint4, vec_double2, xvcvdpuxws, vec_ctuo) +VSX_IMPL_1RG(vec_uint4, vec_float4, xvcvspuxws, vec_ctu) // converts single and double precision to signed doubleword -#ifdef vec_ctsl -# undef vec_ctsl -#endif -VSX_IMPL_1RG(vec_dword2, wi, vec_double2, wd, xvcvdpsxds, vec_ctsl) -VSX_IMPL_1RG(vec_dword2, wi, vec_float4, wf, xvcvspsxds, vec_ctslo) +#undef vec_ctsl +VSX_IMPL_1RG(vec_dword2, vec_double2, xvcvdpsxds, vec_ctsl) +VSX_IMPL_1RG(vec_dword2, vec_float4, xvcvspsxds, vec_ctslo) // converts single and double precision to unsigned doubleword -#ifdef vec_ctul -# undef vec_ctul -#endif -VSX_IMPL_1RG(vec_udword2, wi, vec_double2, wd, xvcvdpuxds, vec_ctul) -VSX_IMPL_1RG(vec_udword2, wi, vec_float4, wf, xvcvspuxds, vec_ctulo) +#undef vec_ctul +VSX_IMPL_1RG(vec_udword2, vec_double2, xvcvdpuxds, vec_ctul) +VSX_IMPL_1RG(vec_udword2, vec_float4, xvcvspuxds, vec_ctulo) // just in case if GCC doesn't define it #ifndef vec_xl From ec033330df10086fa44629154ab9e581fbf29872 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 29 Jan 2020 14:20:55 +0200 Subject: [PATCH 3/8] core:vsx workaround for the unexpected results of `vec_vbpermq` in gcc4.9 --- modules/core/include/opencv2/core/vsx_utils.hpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/core/include/opencv2/core/vsx_utils.hpp b/modules/core/include/opencv2/core/vsx_utils.hpp index bcc97fe529..08ae890175 100644 --- a/modules/core/include/opencv2/core/vsx_utils.hpp +++ b/modules/core/include/opencv2/core/vsx_utils.hpp @@ -233,6 +233,10 @@ VSX_FINLINE(rt) fnm(const rg& a, const rg& b) \ #if __GNUG__ < 5 // vec_xxpermdi in gcc4 missing little-endian supports just like clang # define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ (((c) & 1) << 1 | (c) >> 1))) +// same as vec_xxpermdi +# undef vec_vbpermq + VSX_IMPL_2VRG(vec_udword2, vec_uchar16, vbpermq, vec_vbpermq) + VSX_IMPL_2VRG(vec_dword2, vec_char16, vbpermq, vec_vbpermq) #else # define vec_permi vec_xxpermdi #endif // __GNUG__ < 5 From 70cbc3d8832913c650461b3860e561d0570c6996 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20Hamb=C3=BCchen?= Date: Wed, 29 Jan 2020 02:39:33 +0100 Subject: [PATCH 4/8] cvdef.h: Don't use C's limits.h under C++ Just like with the other headers in the rest of the file. See e.g. https://stackoverflow.com/questions/36831465/what-difference-does-it-make-when-i-include-limits-or-limits-h-in-my-c-cod for the reasons, the most important one being that limits.h does not respect namespaces, which can make problems for downstream consumers of cvdef.h. --- modules/core/include/opencv2/core/cvdef.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 787be9e730..28a5187b17 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -181,7 +181,12 @@ namespace cv { #undef abs #undef Complex +#if defined __cplusplus +#include +#else #include +#endif + #include "opencv2/core/hal/interface.h" #if defined __ICL From ecbba852cff631d62f938c5bfeb5767d25220728 Mon Sep 17 00:00:00 2001 From: Arnaud Brejeon Date: Wed, 29 Jan 2020 12:55:43 -0800 Subject: [PATCH 5/8] Merge pull request #16415 from arnaudbrejeon:bug_fix_16410 * Fix bug 16410 and add test * imgproc(connectedcomponents): avoid manual uninitialized allocations * imgproc(connectedcomponents): force 'odd' chunk range size * imgproc(connectedcomponents): reuse stripeFirstLabel{4/8}Connectivity * imgproc(connectedcomponents): extend fix from PR14964 --- modules/imgproc/src/connectedcomponents.cpp | 112 ++++++++++-------- .../imgproc/test/test_connectedcomponents.cpp | 76 ++++++++++++ 2 files changed, 140 insertions(+), 48 deletions(-) diff --git a/modules/imgproc/src/connectedcomponents.cpp b/modules/imgproc/src/connectedcomponents.cpp index 7b815d6345..2e06be4f98 100644 --- a/modules/imgproc/src/connectedcomponents.cpp +++ b/modules/imgproc/src/connectedcomponents.cpp @@ -265,6 +265,21 @@ namespace cv{ } } + template static inline + LT stripeFirstLabel4Connectivity(int y, int w) + { + CV_DbgAssert((y & 1) == 0); + return (LT(y) * LT(w) /*+ 1*/) / 2 + 1; + } + + template static inline + LT stripeFirstLabel8Connectivity(int y, int w) + { + CV_DbgAssert((y & 1) == 0); + return LT((y /*+ 1*/) / 2) * LT((w + 1) / 2) + 1; + } + + //Based on "Two Strategies to Speed up Connected Components Algorithms", the SAUF (Scan array union find) variant //using decision trees //Kesheng Wu, et al @@ -283,12 +298,14 @@ namespace cv{ FirstScan8Connectivity& operator=(const FirstScan8Connectivity& ) { return *this; } - void operator()(const cv::Range& range) const CV_OVERRIDE + void operator()(const cv::Range& range2) const CV_OVERRIDE { + const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows)); int r = range.start; + chunksSizeAndLabels_[r] = range.end; - LabelT label = LabelT((r + 1) / 2) * LabelT((imgLabels_.cols + 1) / 2) + 1; + LabelT label = stripeFirstLabel8Connectivity(r, imgLabels_.cols); const LabelT firstLabel = label; const int w = img_.cols; @@ -385,12 +402,14 @@ namespace cv{ FirstScan4Connectivity& operator=(const FirstScan4Connectivity& ) { return *this; } - void operator()(const cv::Range& range) const CV_OVERRIDE + void operator()(const cv::Range& range2) const CV_OVERRIDE { + const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows)); int r = range.start; + chunksSizeAndLabels_[r] = range.end; - LabelT label = LabelT((r * imgLabels_.cols + 1) / 2 + 1); + LabelT label = stripeFirstLabel4Connectivity(r, imgLabels_.cols); const LabelT firstLabel = label; const int w = img_.cols; @@ -462,8 +481,9 @@ namespace cv{ SecondScan& operator=(const SecondScan& ) { return *this; } - void operator()(const cv::Range& range) const CV_OVERRIDE + void operator()(const cv::Range& range2) const CV_OVERRIDE { + const Range range(range2.start * 2, std::min(range2.end * 2, imgLabels_.rows)); int r = range.start; const int rowBegin = r; const int rowEnd = range.end; @@ -595,53 +615,51 @@ namespace cv{ //Array used to store info and labeled pixel by each thread. //Different threads affect different memory location of chunksSizeAndLabels - int *chunksSizeAndLabels = (int *)cv::fastMalloc(h * sizeof(int)); + std::vector chunksSizeAndLabels(roundUp(h, 2)); //Tree of labels - LabelT *P = (LabelT *)cv::fastMalloc(Plength * sizeof(LabelT)); + std::vector P_(Plength, 0); + LabelT *P = P_.data(); //First label is for background - P[0] = 0; + //P[0] = 0; - cv::Range range(0, h); + cv::Range range2(0, divUp(h, 2)); const double nParallelStripes = std::max(1, std::min(h / 2, getNumThreads()*4)); LabelT nLabels = 1; if (connectivity == 8){ //First scan - cv::parallel_for_(range, FirstScan8Connectivity(img, imgLabels, P, chunksSizeAndLabels), nParallelStripes); + cv::parallel_for_(range2, FirstScan8Connectivity(img, imgLabels, P, chunksSizeAndLabels.data()), nParallelStripes); //merge labels of different chunks - mergeLabels8Connectivity(imgLabels, P, chunksSizeAndLabels); + mergeLabels8Connectivity(imgLabels, P, chunksSizeAndLabels.data()); for (int i = 0; i < h; i = chunksSizeAndLabels[i]){ - flattenL(P, int((i + 1) / 2) * int((w + 1) / 2) + 1, chunksSizeAndLabels[i + 1], nLabels); + flattenL(P, stripeFirstLabel8Connectivity(i, w), chunksSizeAndLabels[i + 1], nLabels); } } else{ //First scan - cv::parallel_for_(range, FirstScan4Connectivity(img, imgLabels, P, chunksSizeAndLabels), nParallelStripes); + cv::parallel_for_(range2, FirstScan4Connectivity(img, imgLabels, P, chunksSizeAndLabels.data()), nParallelStripes); //merge labels of different chunks - mergeLabels4Connectivity(imgLabels, P, chunksSizeAndLabels); + mergeLabels4Connectivity(imgLabels, P, chunksSizeAndLabels.data()); for (int i = 0; i < h; i = chunksSizeAndLabels[i]){ - flattenL(P, int(i * w + 1) / 2 + 1, chunksSizeAndLabels[i + 1], nLabels); + flattenL(P, stripeFirstLabel4Connectivity(i, w), chunksSizeAndLabels[i + 1], nLabels); } } //Array for statistics dataof threads - StatsOp *sopArray = new StatsOp[h]; + std::vector sopArray(h); sop.init(nLabels); //Second scan - cv::parallel_for_(range, SecondScan(imgLabels, P, sop, sopArray, nLabels), nParallelStripes); - StatsOp::mergeStats(imgLabels, sopArray, sop, nLabels); + cv::parallel_for_(range2, SecondScan(imgLabels, P, sop, sopArray.data(), nLabels), nParallelStripes); + StatsOp::mergeStats(imgLabels, sopArray.data(), sop, nLabels); sop.finish(); - delete[] sopArray; - cv::fastFree(chunksSizeAndLabels); - cv::fastFree(P); return nLabels; } };//End struct LabelingWuParallel @@ -671,9 +689,10 @@ namespace cv{ //Obviously, 4-way connectivity upper bound is also good for 8-way connectivity labeling const size_t Plength = (size_t(h) * size_t(w) + 1) / 2 + 1; //array P for equivalences resolution - LabelT *P = (LabelT *)fastMalloc(sizeof(LabelT) *Plength); + std::vector P_(Plength, 0); + LabelT *P = P_.data(); //first label is for background pixels - P[0] = 0; + //P[0] = 0; LabelT lunique = 1; if (connectivity == 8){ @@ -811,7 +830,6 @@ namespace cv{ } sop.finish(); - fastFree(P); return nLabels; }//End function LabelingWu operator() @@ -836,14 +854,14 @@ namespace cv{ FirstScan& operator=(const FirstScan&) { return *this; } - void operator()(const cv::Range& range) const CV_OVERRIDE + void operator()(const cv::Range& range2) const CV_OVERRIDE { + const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows)); int r = range.start; - r += (r % 2); - chunksSizeAndLabels_[r] = range.end + (range.end % 2); + chunksSizeAndLabels_[r] = range.end; - LabelT label = LabelT((r + 1) / 2) * LabelT((imgLabels_.cols + 1) / 2) + 1; + LabelT label = stripeFirstLabel8Connectivity(r, imgLabels_.cols); const LabelT firstLabel = label; const int h = img_.rows, w = img_.cols; @@ -1902,14 +1920,13 @@ namespace cv{ SecondScan(const cv::Mat& img, cv::Mat& imgLabels, LabelT *P, StatsOp& sop, StatsOp *sopArray, LabelT& nLabels) : img_(img), imgLabels_(imgLabels), P_(P), sop_(sop), sopArray_(sopArray), nLabels_(nLabels){} - SecondScan& operator=(const SecondScan& ) { return *this; } - - void operator()(const cv::Range& range) const CV_OVERRIDE + void operator()(const cv::Range& range2) const CV_OVERRIDE { + const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows)); int r = range.start; - r += (r % 2); + const int rowBegin = r; - const int rowEnd = range.end + range.end % 2; + const int rowEnd = range.end; if (rowBegin > 0){ sopArray_[rowBegin].initElement(nLabels_); @@ -2542,36 +2559,35 @@ namespace cv{ //Array used to store info and labeled pixel by each thread. //Different threads affect different memory location of chunksSizeAndLabels - const int chunksSizeAndLabelsSize = h + 1; - cv::AutoBuffer chunksSizeAndLabels(chunksSizeAndLabelsSize); + const int chunksSizeAndLabelsSize = roundUp(h, 2); + std::vector chunksSizeAndLabels(chunksSizeAndLabelsSize); //Tree of labels - cv::AutoBuffer P(Plength); + std::vector P(Plength, 0); //First label is for background - P[0] = 0; + //P[0] = 0; - cv::Range range(0, h); + cv::Range range2(0, divUp(h, 2)); const double nParallelStripes = std::max(1, std::min(h / 2, getNumThreads()*4)); - //First scan, each thread works with chunk of img.rows/nThreads rows - //e.g. 300 rows, 4 threads -> each chunks is composed of 75 rows - cv::parallel_for_(range, FirstScan(img, imgLabels, P.data(), chunksSizeAndLabels.data()), nParallelStripes); + //First scan + cv::parallel_for_(range2, FirstScan(img, imgLabels, P.data(), chunksSizeAndLabels.data()), nParallelStripes); //merge labels of different chunks mergeLabels(img, imgLabels, P.data(), chunksSizeAndLabels.data()); LabelT nLabels = 1; for (int i = 0; i < h; i = chunksSizeAndLabels[i]){ - CV_Assert(i + 1 < chunksSizeAndLabelsSize); - flattenL(P.data(), LabelT((i + 1) / 2) * LabelT((w + 1) / 2) + 1, chunksSizeAndLabels[i + 1], nLabels); + CV_DbgAssert(i + 1 < chunksSizeAndLabelsSize); + flattenL(P.data(), stripeFirstLabel8Connectivity(i, w), chunksSizeAndLabels[i + 1], nLabels); } //Array for statistics data - cv::AutoBuffer sopArray(h); + std::vector sopArray(h); sop.init(nLabels); //Second scan - cv::parallel_for_(range, SecondScan(img, imgLabels, P.data(), sop, sopArray.data(), nLabels), nParallelStripes); + cv::parallel_for_(range2, SecondScan(img, imgLabels, P.data(), sop, sopArray.data(), nLabels), nParallelStripes); StatsOp::mergeStats(imgLabels, sopArray.data(), sop, nLabels); sop.finish(); @@ -2602,8 +2618,9 @@ namespace cv{ //............ const size_t Plength = size_t(((h + 1) / 2) * size_t((w + 1) / 2)) + 1; - LabelT *P = (LabelT *)fastMalloc(sizeof(LabelT) *Plength); - P[0] = 0; + std::vector P_(Plength, 0); + LabelT *P = P_.data(); + //P[0] = 0; LabelT lunique = 1; // First scan @@ -3911,7 +3928,6 @@ namespace cv{ } sop.finish(); - fastFree(P); return nLabels; diff --git a/modules/imgproc/test/test_connectedcomponents.cpp b/modules/imgproc/test/test_connectedcomponents.cpp index 3817f6d172..5952577691 100644 --- a/modules/imgproc/test/test_connectedcomponents.cpp +++ b/modules/imgproc/test/test_connectedcomponents.cpp @@ -150,4 +150,80 @@ TEST(Imgproc_ConnectedComponents, grana_buffer_overflow) EXPECT_EQ(1, nbComponents); } + +static cv::Mat createCrashMat(int numThreads) { + const int h = numThreads * 4 * 2 + 8; + const double nParallelStripes = std::max(1, std::min(h / 2, numThreads * 4)); + const int w = 4; + + const int nstripes = cvRound(nParallelStripes <= 0 ? h : MIN(MAX(nParallelStripes, 1.), h)); + const cv::Range stripeRange(0, nstripes); + const cv::Range wholeRange(0, h); + + cv::Mat m(h, w, CV_8U); + m = 0; + + // Look for a range that starts with odd value and ends with even value + cv::Range bugRange; + for (int s = stripeRange.start; s < stripeRange.end; s++) { + cv::Range sr(s, s + 1); + cv::Range r; + r.start = (int) (wholeRange.start + + ((uint64) sr.start * (wholeRange.end - wholeRange.start) + nstripes / 2) / nstripes); + r.end = sr.end >= nstripes ? + wholeRange.end : + (int) (wholeRange.start + + ((uint64) sr.end * (wholeRange.end - wholeRange.start) + nstripes / 2) / nstripes); + + if (r.start > 0 && r.start % 2 == 1 && r.end % 2 == 0 && r.end >= r.start + 2) { + bugRange = r; + break; + } + } + + if (bugRange.empty()) { // Could not create a buggy range + return m; + } + + // Fill in bug Range + for (int x = 1; x < w; x++) { + m.at(bugRange.start - 1, x) = 1; + } + + m.at(bugRange.start + 0, 0) = 1; + m.at(bugRange.start + 0, 1) = 1; + m.at(bugRange.start + 0, 3) = 1; + m.at(bugRange.start + 1, 1) = 1; + m.at(bugRange.start + 2, 1) = 1; + m.at(bugRange.start + 2, 3) = 1; + m.at(bugRange.start + 3, 0) = 1; + m.at(bugRange.start + 3, 1) = 1; + + return m; +} + +TEST(Imgproc_ConnectedComponents, parallel_wu_labels) +{ + cv::Mat mat = createCrashMat(cv::getNumThreads()); + if(mat.empty()) { + return; + } + + const int nbPixels = cv::countNonZero(mat); + + cv::Mat labels; + cv::Mat stats; + cv::Mat centroids; + int nb = 0; + EXPECT_NO_THROW( nb = cv::connectedComponentsWithStats(mat, labels, stats, centroids, 8, CV_32S, cv::CCL_WU) ); + + int area = 0; + for(int i=1; i(i, cv::CC_STAT_AREA); + } + + EXPECT_EQ(nbPixels, area); +} + + }} // namespace From 86e5e8d765101b44a51924f791f32b47bbebd4b7 Mon Sep 17 00:00:00 2001 From: midjji Date: Fri, 31 Jan 2020 15:50:21 +0100 Subject: [PATCH 6/8] Merge pull request #15993 from midjji:master This is a correction of the previously missleading documentation and a warning related to a common calibration failure described in issue 15992 * corrected incorrect description of failed calibration state. see issue 15992 * calib3d: apply suggestions from code review by catree --- modules/calib3d/include/opencv2/calib3d.hpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index a2ee47f730..6275ef0f47 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -118,7 +118,18 @@ v = f_y \times y'' + c_y tangential distortion coefficients. \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$, are the thin prism distortion coefficients. Higher-order coefficients are not considered in OpenCV. -The next figures show two common types of radial distortion: barrel distortion (typically \f$ k_1 < 0 \f$) and pincushion distortion (typically \f$ k_1 > 0 \f$). +The next figures show two common types of radial distortion: barrel distortion +(\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically decreasing) +and pincushion distortion (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically increasing). +Radial distortion is always monotonic for real lenses, +and if the estimator produces a non monotonic result, +this should be considered a calibration failure. +More generally, radial distortion must be monotonic and the distortion function, must be bijective. +A failed estimation result may look deceptively good near the image center +but will work poorly in e.g. AR/SFM applications. +The optimization method used in OpenCV camera calibration does not include these constraints as +the framework does not support the required integer programming and polynomial inequalities. +See [issue #15992](https://github.com/opencv/opencv/issues/15992) for additional information. ![](pics/distortion_examples.png) ![](pics/distortion_examples2.png) From ac9f8c1f416c233e5f45925e2e2c680fdd88820f Mon Sep 17 00:00:00 2001 From: gapry Date: Fri, 31 Jan 2020 23:33:32 +0800 Subject: [PATCH 7/8] Fixed Compilation warnings | Issue #16336 --- modules/core/include/opencv2/core/types.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/types.hpp b/modules/core/include/opencv2/core/types.hpp index ef9ab59383..4d04bef90a 100644 --- a/modules/core/include/opencv2/core/types.hpp +++ b/modules/core/include/opencv2/core/types.hpp @@ -1192,7 +1192,7 @@ _Tp Point_<_Tp>::dot(const Point_& pt) const template inline double Point_<_Tp>::ddot(const Point_& pt) const { - return (double)x*pt.x + (double)y*pt.y; + return (double)x*(double)(pt.x) + (double)y*(double)(pt.y); } template inline From 126b0d855f3514f295caa673b963015333cdf5a3 Mon Sep 17 00:00:00 2001 From: Gourav Roy <34737471+themechanicalcoder@users.noreply.github.com> Date: Sat, 1 Feb 2020 22:50:00 +0530 Subject: [PATCH 8/8] Merge pull request #16366 from themechanicalcoder:features2D-tutorial-python * Add python version of panorama_stitching_rotating_camera and perspective_correction * Updated code * added in the docs * added python code in the docs * docs change * Add java tutorial as well * Add toggle in documentation * Added the link for Java code * format code * Refactored code --- .../features2d/homography/homography.markdown | 84 ++++++++++++++++- .../PanoramaStitchingRotatingCamera.java | 89 +++++++++++++++++++ .../Homography/PerspectiveCorrection.java | 89 +++++++++++++++++++ .../panorama_stitching_rotating_camera.py | 71 +++++++++++++++ .../Homography/perspective_correction.py | 74 +++++++++++++++ 5 files changed, 406 insertions(+), 1 deletion(-) create mode 100644 samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java create mode 100644 samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java create mode 100644 samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py create mode 100644 samples/python/tutorial_code/features2D/Homography/perspective_correction.py diff --git a/doc/tutorials/features2d/homography/homography.markdown b/doc/tutorials/features2d/homography/homography.markdown index 1fc8a9e3c4..80c1984bde 100644 --- a/doc/tutorials/features2d/homography/homography.markdown +++ b/doc/tutorials/features2d/homography/homography.markdown @@ -12,7 +12,9 @@ For detailed explanations about the theory, please refer to a computer vision co * An Invitation to 3-D Vision: From Images to Geometric Models, @cite Ma:2003:IVI * Computer Vision: Algorithms and Applications, @cite RS10 -The tutorial code can be found [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/features2D/Homography). +The tutorial code can be found here [C++](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/features2D/Homography), +[Python](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/features2D/Homography), +[Java](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/features2D/Homography). The images used in this tutorial can be found [here](https://github.com/opencv/opencv/tree/3.4/samples/data) (`left*.jpg`). Basic theory {#tutorial_homography_Basic_theory} @@ -171,15 +173,45 @@ The following image shows the source image (left) and the chessboard view that w The first step consists to detect the chessboard corners in the source and desired images: +@add_toggle_cpp @snippet perspective_correction.cpp find-corners +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py find-corners +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java find-corners +@end_toggle The homography is estimated easily with: +@add_toggle_cpp @snippet perspective_correction.cpp estimate-homography +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py estimate-homography +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java estimate-homography +@end_toggle To warp the source chessboard view into the desired chessboard view, we use @ref cv::warpPerspective +@add_toggle_cpp @snippet perspective_correction.cpp warp-chessboard +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py warp-chessboard +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java warp-chessboard +@end_toggle The result image is: @@ -187,7 +219,17 @@ The result image is: To compute the coordinates of the source corners transformed by the homography: +@add_toggle_cpp @snippet perspective_correction.cpp compute-transformed-corners +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py compute-transformed-corners +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java compute-transformed-corners +@end_toggle To check the correctness of the calculation, the matching lines are displayed: @@ -499,17 +541,57 @@ The figure below shows the two generated views of the Suzanne model, with only a With the known associated camera poses and the intrinsic parameters, the relative rotation between the two views can be computed: +@add_toggle_cpp @snippet panorama_stitching_rotating_camera.cpp extract-rotation +@end_toggle +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py extract-rotation +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java extract-rotation +@end_toggle + +@add_toggle_cpp @snippet panorama_stitching_rotating_camera.cpp compute-rotation-displacement +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py compute-rotation-displacement +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java compute-rotation-displacement +@end_toggle Here, the second image will be stitched with respect to the first image. The homography can be calculated using the formula above: +@add_toggle_cpp @snippet panorama_stitching_rotating_camera.cpp compute-homography +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py compute-homography +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java compute-homography +@end_toggle The stitching is made simply with: +@add_toggle_cpp @snippet panorama_stitching_rotating_camera.cpp stitch +@end_toggle + +@add_toggle_python +@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py stitch +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java stitch +@end_toggle The resulting image is: diff --git a/samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java b/samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java new file mode 100644 index 0000000000..ba86895d38 --- /dev/null +++ b/samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java @@ -0,0 +1,89 @@ +import java.util.ArrayList; +import java.util.List; + +import org.opencv.core.*; +import org.opencv.core.Range; +import org.opencv.highgui.HighGui; +import org.opencv.imgcodecs.Imgcodecs; +import org.opencv.imgproc.Imgproc; + + +class PanoramaStitchingRotatingCameraRun { + void basicPanoramaStitching (String[] args) { + String img1path = args[0], img2path = args[1]; + Mat img1 = new Mat(), img2 = new Mat(); + img1 = Imgcodecs.imread(img1path); + img2 = Imgcodecs.imread(img2path); + + //! [camera-pose-from-Blender-at-location-1] + Mat c1Mo = new Mat( 4, 4, CvType.CV_64FC1 ); + c1Mo.put(0 ,0 ,0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112, + 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443, + -0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654, + 0, 0, 0, 1 ); + //! [camera-pose-from-Blender-at-location-1] + + //! [camera-pose-from-Blender-at-location-2] + Mat c2Mo = new Mat( 4, 4, CvType.CV_64FC1 ); + c2Mo.put(0, 0, 0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112, + -0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443, + 0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654, + 0, 0, 0, 1); + //! [camera-pose-from-Blender-at-location-2] + + //! [camera-intrinsics-from-Blender] + Mat cameraMatrix = new Mat(3, 3, CvType.CV_64FC1); + cameraMatrix.put(0, 0, 700.0, 0.0, 320.0, 0.0, 700.0, 240.0, 0, 0, 1 ); + //! [camera-intrinsics-from-Blender] + + //! [extract-rotation] + Range rowRange = new Range(0,3); + Range colRange = new Range(0,3); + //! [extract-rotation] + + //! [compute-rotation-displacement] + //c1Mo * oMc2 + Mat R1 = new Mat(c1Mo, rowRange, colRange); + Mat R2 = new Mat(c2Mo, rowRange, colRange); + Mat R_2to1 = new Mat(); + Core.gemm(R1, R2.t(), 1, new Mat(), 0, R_2to1 ); + //! [compute-rotation-displacement] + + //! [compute-homography] + Mat tmp = new Mat(), H = new Mat(); + Core.gemm(cameraMatrix, R_2to1, 1, new Mat(), 0, tmp); + Core.gemm(tmp, cameraMatrix.inv(), 1, new Mat(), 0, H); + Scalar s = new Scalar(H.get(2, 2)[0]); + Core.divide(H, s, H); + System.out.println(H.dump()); + //! [compute-homography] + + //! [stitch] + Mat img_stitch = new Mat(); + Imgproc.warpPerspective(img2, img_stitch, H, new Size(img2.cols()*2, img2.rows()) ); + Mat half = new Mat(); + half = new Mat(img_stitch, new Rect(0, 0, img1.cols(), img1.rows())); + img1.copyTo(half); + //! [stitch] + + Mat img_compare = new Mat(); + Mat img_space = Mat.zeros(new Size(50, img1.rows()), CvType.CV_8UC3); + Listlist = new ArrayList<>(); + list.add(img1); + list.add(img_space); + list.add(img2); + Core.hconcat(list, img_compare); + + HighGui.imshow("Compare Images", img_compare); + HighGui.imshow("Panorama Stitching", img_stitch); + HighGui.waitKey(0); + System.exit(0); + } +} + +public class PanoramaStitchingRotatingCamera { + public static void main(String[] args) { + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); + new PanoramaStitchingRotatingCameraRun().basicPanoramaStitching(args); + } +} diff --git a/samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java b/samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java new file mode 100644 index 0000000000..f702c1944a --- /dev/null +++ b/samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java @@ -0,0 +1,89 @@ +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import org.opencv.core.*; +import org.opencv.calib3d.Calib3d; +import org.opencv.highgui.HighGui; +import org.opencv.imgcodecs.Imgcodecs; +import org.opencv.imgproc.Imgproc; + + +class PerspectiveCorrectionRun { + void perspectiveCorrection (String[] args) { + String img1Path = args[0], img2Path = args[1]; + Mat img1 = Imgcodecs.imread(img1Path); + Mat img2 = Imgcodecs.imread(img2Path); + + //! [find-corners] + MatOfPoint2f corners1 = new MatOfPoint2f(), corners2 = new MatOfPoint2f(); + boolean found1 = Calib3d.findChessboardCorners(img1, new Size(9, 6), corners1 ); + boolean found2 = Calib3d.findChessboardCorners(img2, new Size(9, 6), corners2 ); + //! [find-corners] + + if (!found1 || !found2) { + System.out.println("Error, cannot find the chessboard corners in both images."); + System.exit(-1); + } + + //! [estimate-homography] + Mat H = new Mat(); + H = Calib3d.findHomography(corners1, corners2); + System.out.println(H.dump()); + //! [estimate-homography] + + //! [warp-chessboard] + Mat img1_warp = new Mat(); + Imgproc.warpPerspective(img1, img1_warp, H, img1.size()); + //! [warp-chessboard] + + Mat img_draw_warp = new Mat(); + List list1 = new ArrayList<>(), list2 = new ArrayList<>() ; + list1.add(img2); + list1.add(img1_warp); + Core.hconcat(list1, img_draw_warp); + HighGui.imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp); + + //! [compute-transformed-corners] + Mat img_draw_matches = new Mat(); + list2.add(img1); + list2.add(img2); + Core.hconcat(list2, img_draw_matches); + Point []corners1Arr = corners1.toArray(); + + for (int i = 0 ; i < corners1Arr.length; i++) { + Mat pt1 = new Mat(3, 1, CvType.CV_64FC1), pt2 = new Mat(); + pt1.put(0, 0, corners1Arr[i].x, corners1Arr[i].y, 1 ); + + Core.gemm(H, pt1, 1, new Mat(), 0, pt2); + double[] data = pt2.get(2, 0); + Core.divide(pt2, new Scalar(data[0]), pt2); + + double[] data1 =pt2.get(0, 0); + double[] data2 = pt2.get(1, 0); + Point end = new Point((int)(img1.cols()+ data1[0]), (int)data2[0]); + Imgproc.line(img_draw_matches, corners1Arr[i], end, RandomColor(), 2); + } + + HighGui.imshow("Draw matches", img_draw_matches); + HighGui.waitKey(0); + //! [compute-transformed-corners] + + System.exit(0); + } + + Scalar RandomColor () { + Random rng = new Random(); + int r = rng.nextInt(256); + int g = rng.nextInt(256); + int b = rng.nextInt(256); + return new Scalar(r, g, b); + } +} + +public class PerspectiveCorrection { + public static void main (String[] args) { + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); + new PerspectiveCorrectionRun().perspectiveCorrection(args); + } +} diff --git a/samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py b/samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py new file mode 100644 index 0000000000..81daed93ac --- /dev/null +++ b/samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv + +def basicPanoramaStitching(img1Path, img2Path): + img1 = cv.imread(cv.samples.findFile(img1Path)) + img2 = cv.imread(cv.samples.findFile(img2Path)) + + # [camera-pose-from-Blender-at-location-1] + c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112], + [ 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443], + [-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654], + [0, 0, 0, 1]],dtype=np.float64) + # [camera-pose-from-Blender-at-location-1] + + # [camera-pose-from-Blender-at-location-2] + c2Mo = np.array([[0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112], + [-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443], + [0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654], + [0, 0, 0, 1]],dtype=np.float64) + # [camera-pose-from-Blender-at-location-2] + + # [camera-intrinsics-from-Blender] + cameraMatrix = np.array([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0, 0, 1]], dtype=np.float32) + # [camera-intrinsics-from-Blender] + + # [extract-rotation] + R1 = c1Mo[0:3, 0:3] + R2 = c2Mo[0:3, 0:3] + #[extract-rotation] + + # [compute-rotation-displacement] + R2 = R2.transpose() + R_2to1 = np.dot(R1,R2) + # [compute-rotation-displacement] + + # [compute-homography] + H = cameraMatrix.dot(R_2to1).dot(np.linalg.inv(cameraMatrix)) + H = H / H[2][2] + # [compute-homography] + + # [stitch] + img_stitch = cv.warpPerspective(img2, H, (img2.shape[1]*2, img2.shape[0])) + img_stitch[0:img1.shape[0], 0:img1.shape[1]] = img1 + # [stitch] + + img_space = np.zeros((img1.shape[0],50,3), dtype=np.uint8) + img_compare = cv.hconcat([img1,img_space, img2]) + + cv.imshow("Final", img_compare) + cv.imshow("Panorama", img_stitch) + cv.waitKey(0) + +def main(): + import argparse + parser = argparse.ArgumentParser(description="Code for homography tutorial. Example 5: basic panorama stitching from a rotating camera.") + parser.add_argument("-I1","--image1", help = "path to first image", default="Blender_Suzanne1.jpg") + parser.add_argument("-I2","--image2", help = "path to second image", default="Blender_Suzanne2.jpg") + args = parser.parse_args() + print("Panorama Stitching Started") + basicPanoramaStitching(args.image1, args.image2) + print("Panorama Stitching Completed Successfully") + + +if __name__ == '__main__': + main() diff --git a/samples/python/tutorial_code/features2D/Homography/perspective_correction.py b/samples/python/tutorial_code/features2D/Homography/perspective_correction.py new file mode 100644 index 0000000000..184c44efd8 --- /dev/null +++ b/samples/python/tutorial_code/features2D/Homography/perspective_correction.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Python 2/3 compatibility +from __future__ import print_function + +import numpy as np +import cv2 as cv +import sys + + +def randomColor(): + color = np.random.randint(0, 255,(1, 3)) + return color[0].tolist() + +def perspectiveCorrection(img1Path, img2Path ,patternSize ): + img1 = cv.imread(cv.samples.findFile(img1Path)) + img2 = cv.imread(cv.samples.findFile(img2Path)) + + # [find-corners] + ret1, corners1 = cv.findChessboardCorners(img1, patternSize) + ret2, corners2 = cv.findChessboardCorners(img2, patternSize) + # [find-corners] + + if not ret1 or not ret2: + print("Error, cannot find the chessboard corners in both images.") + sys.exit(-1) + + # [estimate-homography] + H, _ = cv.findHomography(corners1, corners2) + print(H) + # [estimate-homography] + + # [warp-chessboard] + img1_warp = cv.warpPerspective(img1, H, (img1.shape[1], img1.shape[0])) + # [warp-chessboard] + + img_draw_warp = cv.hconcat([img2, img1_warp]) + cv.imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp ) + + corners1 = corners1.tolist() + corners1 = [a[0] for a in corners1] + + # [compute-transformed-corners] + img_draw_matches = cv.hconcat([img1, img2]) + for i in range(len(corners1)): + pt1 = np.array([corners1[i][0], corners1[i][1], 1]) + pt1 = pt1.reshape(3, 1) + pt2 = np.dot(H, pt1) + pt2 = pt2/pt2[2] + end = (int(img1.shape[1] + pt2[0]), int(pt2[1])) + cv.line(img_draw_matches, tuple([int(j) for j in corners1[i]]), end, randomColor(), 2) + + cv.imshow("Draw matches", img_draw_matches) + cv.waitKey(0) + # [compute-transformed-corners] + +def main(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-I1', "--image1", help="Path to the first image", default="left02.jpg") + parser.add_argument('-I2', "--image2", help="Path to the second image", default="left01.jpg") + parser.add_argument('-H', "--height", help="Height of pattern size", default=6) + parser.add_argument('-W', "--width", help="Width of pattern size", default=9) + args = parser.parse_args() + + img1Path = args.image1 + img2Path = args.image2 + h = args.height + w = args.width + perspectiveCorrection(img1Path, img2Path, (w, h)) + +if __name__ == "__main__": + main()