mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 03:00:14 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
bf2f7b0f8b
@ -16,6 +16,6 @@ int main()
|
||||
{
|
||||
__vector float vf;
|
||||
__vector signed int vi;
|
||||
__asm__ __volatile__ ("xvcvsxwsp %x0,%x1" : "=wf" (vf) : "wa" (vi));
|
||||
__asm__ __volatile__ ("xvcvsxwsp %x0,%x1" : "=wa" (vf) : "wa" (vi));
|
||||
return 0;
|
||||
}
|
@ -12,7 +12,9 @@ For detailed explanations about the theory, please refer to a computer vision co
|
||||
* An Invitation to 3-D Vision: From Images to Geometric Models, @cite Ma:2003:IVI
|
||||
* Computer Vision: Algorithms and Applications, @cite RS10
|
||||
|
||||
The tutorial code can be found [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/Homography).
|
||||
The tutorial code can be found here [C++](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/Homography),
|
||||
[Python](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/Homography),
|
||||
[Java](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/Homography).
|
||||
The images used in this tutorial can be found [here](https://github.com/opencv/opencv/tree/master/samples/data) (`left*.jpg`).
|
||||
|
||||
Basic theory {#tutorial_homography_Basic_theory}
|
||||
@ -171,15 +173,45 @@ The following image shows the source image (left) and the chessboard view that w
|
||||
|
||||
The first step consists to detect the chessboard corners in the source and desired images:
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet perspective_correction.cpp find-corners
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py find-corners
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java find-corners
|
||||
@end_toggle
|
||||
|
||||
The homography is estimated easily with:
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet perspective_correction.cpp estimate-homography
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py estimate-homography
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java estimate-homography
|
||||
@end_toggle
|
||||
|
||||
To warp the source chessboard view into the desired chessboard view, we use @ref cv::warpPerspective
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet perspective_correction.cpp warp-chessboard
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py warp-chessboard
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java warp-chessboard
|
||||
@end_toggle
|
||||
|
||||
The result image is:
|
||||
|
||||
@ -187,7 +219,17 @@ The result image is:
|
||||
|
||||
To compute the coordinates of the source corners transformed by the homography:
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet perspective_correction.cpp compute-transformed-corners
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/perspective_correction.py compute-transformed-corners
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PerspectiveCorrection.java compute-transformed-corners
|
||||
@end_toggle
|
||||
|
||||
To check the correctness of the calculation, the matching lines are displayed:
|
||||
|
||||
@ -499,17 +541,57 @@ The figure below shows the two generated views of the Suzanne model, with only a
|
||||
|
||||
With the known associated camera poses and the intrinsic parameters, the relative rotation between the two views can be computed:
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet panorama_stitching_rotating_camera.cpp extract-rotation
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py extract-rotation
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java extract-rotation
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet panorama_stitching_rotating_camera.cpp compute-rotation-displacement
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py compute-rotation-displacement
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java compute-rotation-displacement
|
||||
@end_toggle
|
||||
|
||||
Here, the second image will be stitched with respect to the first image. The homography can be calculated using the formula above:
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet panorama_stitching_rotating_camera.cpp compute-homography
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py compute-homography
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java compute-homography
|
||||
@end_toggle
|
||||
|
||||
The stitching is made simply with:
|
||||
|
||||
@add_toggle_cpp
|
||||
@snippet panorama_stitching_rotating_camera.cpp stitch
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_python
|
||||
@snippet samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py stitch
|
||||
@end_toggle
|
||||
|
||||
@add_toggle_java
|
||||
@snippet samples/java/tutorial_code/features2D/Homography/PanoramaStitchingRotatingCamera.java stitch
|
||||
@end_toggle
|
||||
|
||||
The resulting image is:
|
||||
|
||||
|
@ -118,7 +118,18 @@ v = f_y \times y'' + c_y
|
||||
tangential distortion coefficients. \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$, are the thin prism distortion
|
||||
coefficients. Higher-order coefficients are not considered in OpenCV.
|
||||
|
||||
The next figures show two common types of radial distortion: barrel distortion (typically \f$ k_1 < 0 \f$) and pincushion distortion (typically \f$ k_1 > 0 \f$).
|
||||
The next figures show two common types of radial distortion: barrel distortion
|
||||
(\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically decreasing)
|
||||
and pincushion distortion (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically increasing).
|
||||
Radial distortion is always monotonic for real lenses,
|
||||
and if the estimator produces a non monotonic result,
|
||||
this should be considered a calibration failure.
|
||||
More generally, radial distortion must be monotonic and the distortion function, must be bijective.
|
||||
A failed estimation result may look deceptively good near the image center
|
||||
but will work poorly in e.g. AR/SFM applications.
|
||||
The optimization method used in OpenCV camera calibration does not include these constraints as
|
||||
the framework does not support the required integer programming and polynomial inequalities.
|
||||
See [issue #15992](https://github.com/opencv/opencv/issues/15992) for additional information.
|
||||
|
||||
![](pics/distortion_examples.png)
|
||||
![](pics/distortion_examples2.png)
|
||||
|
@ -167,7 +167,12 @@ namespace cv {
|
||||
#undef abs
|
||||
#undef Complex
|
||||
|
||||
#if defined __cplusplus
|
||||
#include <limits>
|
||||
#else
|
||||
#include <limits.h>
|
||||
#endif
|
||||
|
||||
#include "opencv2/core/hal/interface.h"
|
||||
|
||||
#if defined __ICL
|
||||
|
@ -1338,7 +1338,7 @@ inline v_float32x4 v_load_expand(const float16_t* ptr)
|
||||
return v_float32x4(vec_extract_fp_from_shorth(vf16));
|
||||
#elif CV_VSX3 && !defined(CV_COMPILER_VSX_BROKEN_ASM)
|
||||
vec_float4 vf32;
|
||||
__asm__ __volatile__ ("xvcvhpsp %x0,%x1" : "=wf" (vf32) : "wa" (vec_mergeh(vf16, vf16)));
|
||||
__asm__ __volatile__ ("xvcvhpsp %x0,%x1" : "=wa" (vf32) : "wa" (vec_mergeh(vf16, vf16)));
|
||||
return v_float32x4(vf32);
|
||||
#else
|
||||
const vec_int4 z = vec_int4_z, delta = vec_int4_sp(0x38000000);
|
||||
@ -1363,7 +1363,7 @@ inline void v_pack_store(float16_t* ptr, const v_float32x4& v)
|
||||
// fixme: Is there any builtin op or intrinsic that cover "xvcvsphp"?
|
||||
#if CV_VSX3 && !defined(CV_COMPILER_VSX_BROKEN_ASM)
|
||||
vec_ushort8 vf16;
|
||||
__asm__ __volatile__ ("xvcvsphp %x0,%x1" : "=wa" (vf16) : "wf" (v.val));
|
||||
__asm__ __volatile__ ("xvcvsphp %x0,%x1" : "=wa" (vf16) : "wa" (v.val));
|
||||
vec_st_l8(vec_mergesqe(vf16, vf16), ptr);
|
||||
#else
|
||||
const vec_int4 signmask = vec_int4_sp(0x80000000);
|
||||
|
@ -1215,7 +1215,7 @@ _Tp Point_<_Tp>::dot(const Point_& pt) const
|
||||
template<typename _Tp> inline
|
||||
double Point_<_Tp>::ddot(const Point_& pt) const
|
||||
{
|
||||
return (double)x*pt.x + (double)y*pt.y;
|
||||
return (double)x*(double)(pt.x) + (double)y*(double)(pt.y);
|
||||
}
|
||||
|
||||
template<typename _Tp> inline
|
||||
|
@ -110,9 +110,9 @@ VSX_FINLINE(rt) fnm(const rg& a, const rg& b) { return fn2(a, b); }
|
||||
#if defined(__GNUG__) && !defined(__clang__)
|
||||
|
||||
// inline asm helper
|
||||
#define VSX_IMPL_1RG(rt, rto, rg, rgo, opc, fnm) \
|
||||
VSX_FINLINE(rt) fnm(const rg& a) \
|
||||
{ rt rs; __asm__ __volatile__(#opc" %x0,%x1" : "="#rto (rs) : #rgo (a)); return rs; }
|
||||
#define VSX_IMPL_1RG(rt, rg, opc, fnm) \
|
||||
VSX_FINLINE(rt) fnm(const rg& a) \
|
||||
{ rt rs; __asm__ __volatile__(#opc" %x0,%x1" : "=wa" (rs) : "wa" (a)); return rs; }
|
||||
|
||||
#define VSX_IMPL_1VRG(rt, rg, opc, fnm) \
|
||||
VSX_FINLINE(rt) fnm(const rg& a) \
|
||||
@ -233,6 +233,10 @@ VSX_FINLINE(rt) fnm(const rg& a, const rg& b) \
|
||||
#if __GNUG__ < 5
|
||||
// vec_xxpermdi in gcc4 missing little-endian supports just like clang
|
||||
# define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ (((c) & 1) << 1 | (c) >> 1)))
|
||||
// same as vec_xxpermdi
|
||||
# undef vec_vbpermq
|
||||
VSX_IMPL_2VRG(vec_udword2, vec_uchar16, vbpermq, vec_vbpermq)
|
||||
VSX_IMPL_2VRG(vec_dword2, vec_char16, vbpermq, vec_vbpermq)
|
||||
#else
|
||||
# define vec_permi vec_xxpermdi
|
||||
#endif // __GNUG__ < 5
|
||||
@ -257,44 +261,38 @@ VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp)
|
||||
VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp)
|
||||
|
||||
// converts word and doubleword to double-precision
|
||||
#ifdef vec_ctd
|
||||
# undef vec_ctd
|
||||
#endif
|
||||
VSX_IMPL_1RG(vec_double2, wd, vec_int4, wa, xvcvsxwdp, vec_ctdo)
|
||||
VSX_IMPL_1RG(vec_double2, wd, vec_uint4, wa, xvcvuxwdp, vec_ctdo)
|
||||
VSX_IMPL_1RG(vec_double2, wd, vec_dword2, wi, xvcvsxddp, vec_ctd)
|
||||
VSX_IMPL_1RG(vec_double2, wd, vec_udword2, wi, xvcvuxddp, vec_ctd)
|
||||
#undef vec_ctd
|
||||
VSX_IMPL_1RG(vec_double2, vec_int4, xvcvsxwdp, vec_ctdo)
|
||||
VSX_IMPL_1RG(vec_double2, vec_uint4, xvcvuxwdp, vec_ctdo)
|
||||
VSX_IMPL_1RG(vec_double2, vec_dword2, xvcvsxddp, vec_ctd)
|
||||
VSX_IMPL_1RG(vec_double2, vec_udword2, xvcvuxddp, vec_ctd)
|
||||
|
||||
// converts word and doubleword to single-precision
|
||||
#undef vec_ctf
|
||||
VSX_IMPL_1RG(vec_float4, wf, vec_int4, wa, xvcvsxwsp, vec_ctf)
|
||||
VSX_IMPL_1RG(vec_float4, wf, vec_uint4, wa, xvcvuxwsp, vec_ctf)
|
||||
VSX_IMPL_1RG(vec_float4, wf, vec_dword2, wi, xvcvsxdsp, vec_ctfo)
|
||||
VSX_IMPL_1RG(vec_float4, wf, vec_udword2, wi, xvcvuxdsp, vec_ctfo)
|
||||
VSX_IMPL_1RG(vec_float4, vec_int4, xvcvsxwsp, vec_ctf)
|
||||
VSX_IMPL_1RG(vec_float4, vec_uint4, xvcvuxwsp, vec_ctf)
|
||||
VSX_IMPL_1RG(vec_float4, vec_dword2, xvcvsxdsp, vec_ctfo)
|
||||
VSX_IMPL_1RG(vec_float4, vec_udword2, xvcvuxdsp, vec_ctfo)
|
||||
|
||||
// converts single and double precision to signed word
|
||||
#undef vec_cts
|
||||
VSX_IMPL_1RG(vec_int4, wa, vec_double2, wd, xvcvdpsxws, vec_ctso)
|
||||
VSX_IMPL_1RG(vec_int4, wa, vec_float4, wf, xvcvspsxws, vec_cts)
|
||||
VSX_IMPL_1RG(vec_int4, vec_double2, xvcvdpsxws, vec_ctso)
|
||||
VSX_IMPL_1RG(vec_int4, vec_float4, xvcvspsxws, vec_cts)
|
||||
|
||||
// converts single and double precision to unsigned word
|
||||
#undef vec_ctu
|
||||
VSX_IMPL_1RG(vec_uint4, wa, vec_double2, wd, xvcvdpuxws, vec_ctuo)
|
||||
VSX_IMPL_1RG(vec_uint4, wa, vec_float4, wf, xvcvspuxws, vec_ctu)
|
||||
VSX_IMPL_1RG(vec_uint4, vec_double2, xvcvdpuxws, vec_ctuo)
|
||||
VSX_IMPL_1RG(vec_uint4, vec_float4, xvcvspuxws, vec_ctu)
|
||||
|
||||
// converts single and double precision to signed doubleword
|
||||
#ifdef vec_ctsl
|
||||
# undef vec_ctsl
|
||||
#endif
|
||||
VSX_IMPL_1RG(vec_dword2, wi, vec_double2, wd, xvcvdpsxds, vec_ctsl)
|
||||
VSX_IMPL_1RG(vec_dword2, wi, vec_float4, wf, xvcvspsxds, vec_ctslo)
|
||||
#undef vec_ctsl
|
||||
VSX_IMPL_1RG(vec_dword2, vec_double2, xvcvdpsxds, vec_ctsl)
|
||||
VSX_IMPL_1RG(vec_dword2, vec_float4, xvcvspsxds, vec_ctslo)
|
||||
|
||||
// converts single and double precision to unsigned doubleword
|
||||
#ifdef vec_ctul
|
||||
# undef vec_ctul
|
||||
#endif
|
||||
VSX_IMPL_1RG(vec_udword2, wi, vec_double2, wd, xvcvdpuxds, vec_ctul)
|
||||
VSX_IMPL_1RG(vec_udword2, wi, vec_float4, wf, xvcvspuxds, vec_ctulo)
|
||||
#undef vec_ctul
|
||||
VSX_IMPL_1RG(vec_udword2, vec_double2, xvcvdpuxds, vec_ctul)
|
||||
VSX_IMPL_1RG(vec_udword2, vec_float4, xvcvspuxds, vec_ctulo)
|
||||
|
||||
// just in case if GCC doesn't define it
|
||||
#ifndef vec_xl
|
||||
|
@ -265,6 +265,21 @@ namespace cv{
|
||||
}
|
||||
}
|
||||
|
||||
template <typename LT> static inline
|
||||
LT stripeFirstLabel4Connectivity(int y, int w)
|
||||
{
|
||||
CV_DbgAssert((y & 1) == 0);
|
||||
return (LT(y) * LT(w) /*+ 1*/) / 2 + 1;
|
||||
}
|
||||
|
||||
template <typename LT> static inline
|
||||
LT stripeFirstLabel8Connectivity(int y, int w)
|
||||
{
|
||||
CV_DbgAssert((y & 1) == 0);
|
||||
return LT((y /*+ 1*/) / 2) * LT((w + 1) / 2) + 1;
|
||||
}
|
||||
|
||||
|
||||
//Based on "Two Strategies to Speed up Connected Components Algorithms", the SAUF (Scan array union find) variant
|
||||
//using decision trees
|
||||
//Kesheng Wu, et al
|
||||
@ -283,12 +298,14 @@ namespace cv{
|
||||
|
||||
FirstScan8Connectivity& operator=(const FirstScan8Connectivity& ) { return *this; }
|
||||
|
||||
void operator()(const cv::Range& range) const CV_OVERRIDE
|
||||
void operator()(const cv::Range& range2) const CV_OVERRIDE
|
||||
{
|
||||
const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows));
|
||||
int r = range.start;
|
||||
|
||||
chunksSizeAndLabels_[r] = range.end;
|
||||
|
||||
LabelT label = LabelT((r + 1) / 2) * LabelT((imgLabels_.cols + 1) / 2) + 1;
|
||||
LabelT label = stripeFirstLabel8Connectivity<LabelT>(r, imgLabels_.cols);
|
||||
|
||||
const LabelT firstLabel = label;
|
||||
const int w = img_.cols;
|
||||
@ -385,12 +402,14 @@ namespace cv{
|
||||
|
||||
FirstScan4Connectivity& operator=(const FirstScan4Connectivity& ) { return *this; }
|
||||
|
||||
void operator()(const cv::Range& range) const CV_OVERRIDE
|
||||
void operator()(const cv::Range& range2) const CV_OVERRIDE
|
||||
{
|
||||
const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows));
|
||||
int r = range.start;
|
||||
|
||||
chunksSizeAndLabels_[r] = range.end;
|
||||
|
||||
LabelT label = LabelT((r * imgLabels_.cols + 1) / 2 + 1);
|
||||
LabelT label = stripeFirstLabel4Connectivity<LabelT>(r, imgLabels_.cols);
|
||||
|
||||
const LabelT firstLabel = label;
|
||||
const int w = img_.cols;
|
||||
@ -462,8 +481,9 @@ namespace cv{
|
||||
|
||||
SecondScan& operator=(const SecondScan& ) { return *this; }
|
||||
|
||||
void operator()(const cv::Range& range) const CV_OVERRIDE
|
||||
void operator()(const cv::Range& range2) const CV_OVERRIDE
|
||||
{
|
||||
const Range range(range2.start * 2, std::min(range2.end * 2, imgLabels_.rows));
|
||||
int r = range.start;
|
||||
const int rowBegin = r;
|
||||
const int rowEnd = range.end;
|
||||
@ -595,53 +615,51 @@ namespace cv{
|
||||
|
||||
//Array used to store info and labeled pixel by each thread.
|
||||
//Different threads affect different memory location of chunksSizeAndLabels
|
||||
int *chunksSizeAndLabels = (int *)cv::fastMalloc(h * sizeof(int));
|
||||
std::vector<int> chunksSizeAndLabels(roundUp(h, 2));
|
||||
|
||||
//Tree of labels
|
||||
LabelT *P = (LabelT *)cv::fastMalloc(Plength * sizeof(LabelT));
|
||||
std::vector<LabelT> P_(Plength, 0);
|
||||
LabelT *P = P_.data();
|
||||
//First label is for background
|
||||
P[0] = 0;
|
||||
//P[0] = 0;
|
||||
|
||||
cv::Range range(0, h);
|
||||
cv::Range range2(0, divUp(h, 2));
|
||||
const double nParallelStripes = std::max(1, std::min(h / 2, getNumThreads()*4));
|
||||
|
||||
LabelT nLabels = 1;
|
||||
|
||||
if (connectivity == 8){
|
||||
//First scan
|
||||
cv::parallel_for_(range, FirstScan8Connectivity(img, imgLabels, P, chunksSizeAndLabels), nParallelStripes);
|
||||
cv::parallel_for_(range2, FirstScan8Connectivity(img, imgLabels, P, chunksSizeAndLabels.data()), nParallelStripes);
|
||||
|
||||
//merge labels of different chunks
|
||||
mergeLabels8Connectivity(imgLabels, P, chunksSizeAndLabels);
|
||||
mergeLabels8Connectivity(imgLabels, P, chunksSizeAndLabels.data());
|
||||
|
||||
for (int i = 0; i < h; i = chunksSizeAndLabels[i]){
|
||||
flattenL(P, int((i + 1) / 2) * int((w + 1) / 2) + 1, chunksSizeAndLabels[i + 1], nLabels);
|
||||
flattenL(P, stripeFirstLabel8Connectivity<int>(i, w), chunksSizeAndLabels[i + 1], nLabels);
|
||||
}
|
||||
}
|
||||
else{
|
||||
//First scan
|
||||
cv::parallel_for_(range, FirstScan4Connectivity(img, imgLabels, P, chunksSizeAndLabels), nParallelStripes);
|
||||
cv::parallel_for_(range2, FirstScan4Connectivity(img, imgLabels, P, chunksSizeAndLabels.data()), nParallelStripes);
|
||||
|
||||
//merge labels of different chunks
|
||||
mergeLabels4Connectivity(imgLabels, P, chunksSizeAndLabels);
|
||||
mergeLabels4Connectivity(imgLabels, P, chunksSizeAndLabels.data());
|
||||
|
||||
for (int i = 0; i < h; i = chunksSizeAndLabels[i]){
|
||||
flattenL(P, int(i * w + 1) / 2 + 1, chunksSizeAndLabels[i + 1], nLabels);
|
||||
flattenL(P, stripeFirstLabel4Connectivity<int>(i, w), chunksSizeAndLabels[i + 1], nLabels);
|
||||
}
|
||||
}
|
||||
|
||||
//Array for statistics dataof threads
|
||||
StatsOp *sopArray = new StatsOp[h];
|
||||
std::vector<StatsOp> sopArray(h);
|
||||
|
||||
sop.init(nLabels);
|
||||
//Second scan
|
||||
cv::parallel_for_(range, SecondScan(imgLabels, P, sop, sopArray, nLabels), nParallelStripes);
|
||||
StatsOp::mergeStats(imgLabels, sopArray, sop, nLabels);
|
||||
cv::parallel_for_(range2, SecondScan(imgLabels, P, sop, sopArray.data(), nLabels), nParallelStripes);
|
||||
StatsOp::mergeStats(imgLabels, sopArray.data(), sop, nLabels);
|
||||
sop.finish();
|
||||
|
||||
delete[] sopArray;
|
||||
cv::fastFree(chunksSizeAndLabels);
|
||||
cv::fastFree(P);
|
||||
return nLabels;
|
||||
}
|
||||
};//End struct LabelingWuParallel
|
||||
@ -671,9 +689,10 @@ namespace cv{
|
||||
//Obviously, 4-way connectivity upper bound is also good for 8-way connectivity labeling
|
||||
const size_t Plength = (size_t(h) * size_t(w) + 1) / 2 + 1;
|
||||
//array P for equivalences resolution
|
||||
LabelT *P = (LabelT *)fastMalloc(sizeof(LabelT) *Plength);
|
||||
std::vector<LabelT> P_(Plength, 0);
|
||||
LabelT *P = P_.data();
|
||||
//first label is for background pixels
|
||||
P[0] = 0;
|
||||
//P[0] = 0;
|
||||
LabelT lunique = 1;
|
||||
|
||||
if (connectivity == 8){
|
||||
@ -811,7 +830,6 @@ namespace cv{
|
||||
}
|
||||
|
||||
sop.finish();
|
||||
fastFree(P);
|
||||
|
||||
return nLabels;
|
||||
}//End function LabelingWu operator()
|
||||
@ -836,14 +854,14 @@ namespace cv{
|
||||
|
||||
FirstScan& operator=(const FirstScan&) { return *this; }
|
||||
|
||||
void operator()(const cv::Range& range) const CV_OVERRIDE
|
||||
void operator()(const cv::Range& range2) const CV_OVERRIDE
|
||||
{
|
||||
const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows));
|
||||
int r = range.start;
|
||||
r += (r % 2);
|
||||
|
||||
chunksSizeAndLabels_[r] = range.end + (range.end % 2);
|
||||
chunksSizeAndLabels_[r] = range.end;
|
||||
|
||||
LabelT label = LabelT((r + 1) / 2) * LabelT((imgLabels_.cols + 1) / 2) + 1;
|
||||
LabelT label = stripeFirstLabel8Connectivity<LabelT>(r, imgLabels_.cols);
|
||||
|
||||
const LabelT firstLabel = label;
|
||||
const int h = img_.rows, w = img_.cols;
|
||||
@ -1902,14 +1920,13 @@ namespace cv{
|
||||
SecondScan(const cv::Mat& img, cv::Mat& imgLabels, LabelT *P, StatsOp& sop, StatsOp *sopArray, LabelT& nLabels)
|
||||
: img_(img), imgLabels_(imgLabels), P_(P), sop_(sop), sopArray_(sopArray), nLabels_(nLabels){}
|
||||
|
||||
SecondScan& operator=(const SecondScan& ) { return *this; }
|
||||
|
||||
void operator()(const cv::Range& range) const CV_OVERRIDE
|
||||
void operator()(const cv::Range& range2) const CV_OVERRIDE
|
||||
{
|
||||
const Range range(range2.start * 2, std::min(range2.end * 2, img_.rows));
|
||||
int r = range.start;
|
||||
r += (r % 2);
|
||||
|
||||
const int rowBegin = r;
|
||||
const int rowEnd = range.end + range.end % 2;
|
||||
const int rowEnd = range.end;
|
||||
|
||||
if (rowBegin > 0){
|
||||
sopArray_[rowBegin].initElement(nLabels_);
|
||||
@ -2542,36 +2559,35 @@ namespace cv{
|
||||
|
||||
//Array used to store info and labeled pixel by each thread.
|
||||
//Different threads affect different memory location of chunksSizeAndLabels
|
||||
const int chunksSizeAndLabelsSize = h + 1;
|
||||
cv::AutoBuffer<int, 0> chunksSizeAndLabels(chunksSizeAndLabelsSize);
|
||||
const int chunksSizeAndLabelsSize = roundUp(h, 2);
|
||||
std::vector<int> chunksSizeAndLabels(chunksSizeAndLabelsSize);
|
||||
|
||||
//Tree of labels
|
||||
cv::AutoBuffer<LabelT, 0> P(Plength);
|
||||
std::vector<LabelT> P(Plength, 0);
|
||||
//First label is for background
|
||||
P[0] = 0;
|
||||
//P[0] = 0;
|
||||
|
||||
cv::Range range(0, h);
|
||||
cv::Range range2(0, divUp(h, 2));
|
||||
const double nParallelStripes = std::max(1, std::min(h / 2, getNumThreads()*4));
|
||||
|
||||
//First scan, each thread works with chunk of img.rows/nThreads rows
|
||||
//e.g. 300 rows, 4 threads -> each chunks is composed of 75 rows
|
||||
cv::parallel_for_(range, FirstScan(img, imgLabels, P.data(), chunksSizeAndLabels.data()), nParallelStripes);
|
||||
//First scan
|
||||
cv::parallel_for_(range2, FirstScan(img, imgLabels, P.data(), chunksSizeAndLabels.data()), nParallelStripes);
|
||||
|
||||
//merge labels of different chunks
|
||||
mergeLabels(img, imgLabels, P.data(), chunksSizeAndLabels.data());
|
||||
|
||||
LabelT nLabels = 1;
|
||||
for (int i = 0; i < h; i = chunksSizeAndLabels[i]){
|
||||
CV_Assert(i + 1 < chunksSizeAndLabelsSize);
|
||||
flattenL(P.data(), LabelT((i + 1) / 2) * LabelT((w + 1) / 2) + 1, chunksSizeAndLabels[i + 1], nLabels);
|
||||
CV_DbgAssert(i + 1 < chunksSizeAndLabelsSize);
|
||||
flattenL(P.data(), stripeFirstLabel8Connectivity<LabelT>(i, w), chunksSizeAndLabels[i + 1], nLabels);
|
||||
}
|
||||
|
||||
//Array for statistics data
|
||||
cv::AutoBuffer<StatsOp, 0> sopArray(h);
|
||||
std::vector<StatsOp> sopArray(h);
|
||||
sop.init(nLabels);
|
||||
|
||||
//Second scan
|
||||
cv::parallel_for_(range, SecondScan(img, imgLabels, P.data(), sop, sopArray.data(), nLabels), nParallelStripes);
|
||||
cv::parallel_for_(range2, SecondScan(img, imgLabels, P.data(), sop, sopArray.data(), nLabels), nParallelStripes);
|
||||
|
||||
StatsOp::mergeStats(imgLabels, sopArray.data(), sop, nLabels);
|
||||
sop.finish();
|
||||
@ -2602,8 +2618,9 @@ namespace cv{
|
||||
//............
|
||||
const size_t Plength = size_t(((h + 1) / 2) * size_t((w + 1) / 2)) + 1;
|
||||
|
||||
LabelT *P = (LabelT *)fastMalloc(sizeof(LabelT) *Plength);
|
||||
P[0] = 0;
|
||||
std::vector<LabelT> P_(Plength, 0);
|
||||
LabelT *P = P_.data();
|
||||
//P[0] = 0;
|
||||
LabelT lunique = 1;
|
||||
|
||||
// First scan
|
||||
@ -3911,7 +3928,6 @@ namespace cv{
|
||||
}
|
||||
|
||||
sop.finish();
|
||||
fastFree(P);
|
||||
|
||||
return nLabels;
|
||||
|
||||
|
@ -150,4 +150,80 @@ TEST(Imgproc_ConnectedComponents, grana_buffer_overflow)
|
||||
EXPECT_EQ(1, nbComponents);
|
||||
}
|
||||
|
||||
|
||||
static cv::Mat createCrashMat(int numThreads) {
|
||||
const int h = numThreads * 4 * 2 + 8;
|
||||
const double nParallelStripes = std::max(1, std::min(h / 2, numThreads * 4));
|
||||
const int w = 4;
|
||||
|
||||
const int nstripes = cvRound(nParallelStripes <= 0 ? h : MIN(MAX(nParallelStripes, 1.), h));
|
||||
const cv::Range stripeRange(0, nstripes);
|
||||
const cv::Range wholeRange(0, h);
|
||||
|
||||
cv::Mat m(h, w, CV_8U);
|
||||
m = 0;
|
||||
|
||||
// Look for a range that starts with odd value and ends with even value
|
||||
cv::Range bugRange;
|
||||
for (int s = stripeRange.start; s < stripeRange.end; s++) {
|
||||
cv::Range sr(s, s + 1);
|
||||
cv::Range r;
|
||||
r.start = (int) (wholeRange.start +
|
||||
((uint64) sr.start * (wholeRange.end - wholeRange.start) + nstripes / 2) / nstripes);
|
||||
r.end = sr.end >= nstripes ?
|
||||
wholeRange.end :
|
||||
(int) (wholeRange.start +
|
||||
((uint64) sr.end * (wholeRange.end - wholeRange.start) + nstripes / 2) / nstripes);
|
||||
|
||||
if (r.start > 0 && r.start % 2 == 1 && r.end % 2 == 0 && r.end >= r.start + 2) {
|
||||
bugRange = r;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (bugRange.empty()) { // Could not create a buggy range
|
||||
return m;
|
||||
}
|
||||
|
||||
// Fill in bug Range
|
||||
for (int x = 1; x < w; x++) {
|
||||
m.at<char>(bugRange.start - 1, x) = 1;
|
||||
}
|
||||
|
||||
m.at<char>(bugRange.start + 0, 0) = 1;
|
||||
m.at<char>(bugRange.start + 0, 1) = 1;
|
||||
m.at<char>(bugRange.start + 0, 3) = 1;
|
||||
m.at<char>(bugRange.start + 1, 1) = 1;
|
||||
m.at<char>(bugRange.start + 2, 1) = 1;
|
||||
m.at<char>(bugRange.start + 2, 3) = 1;
|
||||
m.at<char>(bugRange.start + 3, 0) = 1;
|
||||
m.at<char>(bugRange.start + 3, 1) = 1;
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
TEST(Imgproc_ConnectedComponents, parallel_wu_labels)
|
||||
{
|
||||
cv::Mat mat = createCrashMat(cv::getNumThreads());
|
||||
if(mat.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const int nbPixels = cv::countNonZero(mat);
|
||||
|
||||
cv::Mat labels;
|
||||
cv::Mat stats;
|
||||
cv::Mat centroids;
|
||||
int nb = 0;
|
||||
EXPECT_NO_THROW( nb = cv::connectedComponentsWithStats(mat, labels, stats, centroids, 8, CV_32S, cv::CCL_WU) );
|
||||
|
||||
int area = 0;
|
||||
for(int i=1; i<nb; ++i) {
|
||||
area += stats.at<int32_t>(i, cv::CC_STAT_AREA);
|
||||
}
|
||||
|
||||
EXPECT_EQ(nbPixels, area);
|
||||
}
|
||||
|
||||
|
||||
}} // namespace
|
||||
|
@ -0,0 +1,89 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.opencv.core.*;
|
||||
import org.opencv.core.Range;
|
||||
import org.opencv.highgui.HighGui;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
import org.opencv.imgproc.Imgproc;
|
||||
|
||||
|
||||
class PanoramaStitchingRotatingCameraRun {
|
||||
void basicPanoramaStitching (String[] args) {
|
||||
String img1path = args[0], img2path = args[1];
|
||||
Mat img1 = new Mat(), img2 = new Mat();
|
||||
img1 = Imgcodecs.imread(img1path);
|
||||
img2 = Imgcodecs.imread(img2path);
|
||||
|
||||
//! [camera-pose-from-Blender-at-location-1]
|
||||
Mat c1Mo = new Mat( 4, 4, CvType.CV_64FC1 );
|
||||
c1Mo.put(0 ,0 ,0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112,
|
||||
0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443,
|
||||
-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654,
|
||||
0, 0, 0, 1 );
|
||||
//! [camera-pose-from-Blender-at-location-1]
|
||||
|
||||
//! [camera-pose-from-Blender-at-location-2]
|
||||
Mat c2Mo = new Mat( 4, 4, CvType.CV_64FC1 );
|
||||
c2Mo.put(0, 0, 0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112,
|
||||
-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443,
|
||||
0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654,
|
||||
0, 0, 0, 1);
|
||||
//! [camera-pose-from-Blender-at-location-2]
|
||||
|
||||
//! [camera-intrinsics-from-Blender]
|
||||
Mat cameraMatrix = new Mat(3, 3, CvType.CV_64FC1);
|
||||
cameraMatrix.put(0, 0, 700.0, 0.0, 320.0, 0.0, 700.0, 240.0, 0, 0, 1 );
|
||||
//! [camera-intrinsics-from-Blender]
|
||||
|
||||
//! [extract-rotation]
|
||||
Range rowRange = new Range(0,3);
|
||||
Range colRange = new Range(0,3);
|
||||
//! [extract-rotation]
|
||||
|
||||
//! [compute-rotation-displacement]
|
||||
//c1Mo * oMc2
|
||||
Mat R1 = new Mat(c1Mo, rowRange, colRange);
|
||||
Mat R2 = new Mat(c2Mo, rowRange, colRange);
|
||||
Mat R_2to1 = new Mat();
|
||||
Core.gemm(R1, R2.t(), 1, new Mat(), 0, R_2to1 );
|
||||
//! [compute-rotation-displacement]
|
||||
|
||||
//! [compute-homography]
|
||||
Mat tmp = new Mat(), H = new Mat();
|
||||
Core.gemm(cameraMatrix, R_2to1, 1, new Mat(), 0, tmp);
|
||||
Core.gemm(tmp, cameraMatrix.inv(), 1, new Mat(), 0, H);
|
||||
Scalar s = new Scalar(H.get(2, 2)[0]);
|
||||
Core.divide(H, s, H);
|
||||
System.out.println(H.dump());
|
||||
//! [compute-homography]
|
||||
|
||||
//! [stitch]
|
||||
Mat img_stitch = new Mat();
|
||||
Imgproc.warpPerspective(img2, img_stitch, H, new Size(img2.cols()*2, img2.rows()) );
|
||||
Mat half = new Mat();
|
||||
half = new Mat(img_stitch, new Rect(0, 0, img1.cols(), img1.rows()));
|
||||
img1.copyTo(half);
|
||||
//! [stitch]
|
||||
|
||||
Mat img_compare = new Mat();
|
||||
Mat img_space = Mat.zeros(new Size(50, img1.rows()), CvType.CV_8UC3);
|
||||
List<Mat>list = new ArrayList<>();
|
||||
list.add(img1);
|
||||
list.add(img_space);
|
||||
list.add(img2);
|
||||
Core.hconcat(list, img_compare);
|
||||
|
||||
HighGui.imshow("Compare Images", img_compare);
|
||||
HighGui.imshow("Panorama Stitching", img_stitch);
|
||||
HighGui.waitKey(0);
|
||||
System.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
public class PanoramaStitchingRotatingCamera {
|
||||
public static void main(String[] args) {
|
||||
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
|
||||
new PanoramaStitchingRotatingCameraRun().basicPanoramaStitching(args);
|
||||
}
|
||||
}
|
@ -0,0 +1,89 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import org.opencv.core.*;
|
||||
import org.opencv.calib3d.Calib3d;
|
||||
import org.opencv.highgui.HighGui;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
import org.opencv.imgproc.Imgproc;
|
||||
|
||||
|
||||
class PerspectiveCorrectionRun {
|
||||
void perspectiveCorrection (String[] args) {
|
||||
String img1Path = args[0], img2Path = args[1];
|
||||
Mat img1 = Imgcodecs.imread(img1Path);
|
||||
Mat img2 = Imgcodecs.imread(img2Path);
|
||||
|
||||
//! [find-corners]
|
||||
MatOfPoint2f corners1 = new MatOfPoint2f(), corners2 = new MatOfPoint2f();
|
||||
boolean found1 = Calib3d.findChessboardCorners(img1, new Size(9, 6), corners1 );
|
||||
boolean found2 = Calib3d.findChessboardCorners(img2, new Size(9, 6), corners2 );
|
||||
//! [find-corners]
|
||||
|
||||
if (!found1 || !found2) {
|
||||
System.out.println("Error, cannot find the chessboard corners in both images.");
|
||||
System.exit(-1);
|
||||
}
|
||||
|
||||
//! [estimate-homography]
|
||||
Mat H = new Mat();
|
||||
H = Calib3d.findHomography(corners1, corners2);
|
||||
System.out.println(H.dump());
|
||||
//! [estimate-homography]
|
||||
|
||||
//! [warp-chessboard]
|
||||
Mat img1_warp = new Mat();
|
||||
Imgproc.warpPerspective(img1, img1_warp, H, img1.size());
|
||||
//! [warp-chessboard]
|
||||
|
||||
Mat img_draw_warp = new Mat();
|
||||
List<Mat> list1 = new ArrayList<>(), list2 = new ArrayList<>() ;
|
||||
list1.add(img2);
|
||||
list1.add(img1_warp);
|
||||
Core.hconcat(list1, img_draw_warp);
|
||||
HighGui.imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp);
|
||||
|
||||
//! [compute-transformed-corners]
|
||||
Mat img_draw_matches = new Mat();
|
||||
list2.add(img1);
|
||||
list2.add(img2);
|
||||
Core.hconcat(list2, img_draw_matches);
|
||||
Point []corners1Arr = corners1.toArray();
|
||||
|
||||
for (int i = 0 ; i < corners1Arr.length; i++) {
|
||||
Mat pt1 = new Mat(3, 1, CvType.CV_64FC1), pt2 = new Mat();
|
||||
pt1.put(0, 0, corners1Arr[i].x, corners1Arr[i].y, 1 );
|
||||
|
||||
Core.gemm(H, pt1, 1, new Mat(), 0, pt2);
|
||||
double[] data = pt2.get(2, 0);
|
||||
Core.divide(pt2, new Scalar(data[0]), pt2);
|
||||
|
||||
double[] data1 =pt2.get(0, 0);
|
||||
double[] data2 = pt2.get(1, 0);
|
||||
Point end = new Point((int)(img1.cols()+ data1[0]), (int)data2[0]);
|
||||
Imgproc.line(img_draw_matches, corners1Arr[i], end, RandomColor(), 2);
|
||||
}
|
||||
|
||||
HighGui.imshow("Draw matches", img_draw_matches);
|
||||
HighGui.waitKey(0);
|
||||
//! [compute-transformed-corners]
|
||||
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
Scalar RandomColor () {
|
||||
Random rng = new Random();
|
||||
int r = rng.nextInt(256);
|
||||
int g = rng.nextInt(256);
|
||||
int b = rng.nextInt(256);
|
||||
return new Scalar(r, g, b);
|
||||
}
|
||||
}
|
||||
|
||||
public class PerspectiveCorrection {
|
||||
public static void main (String[] args) {
|
||||
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
|
||||
new PerspectiveCorrectionRun().perspectiveCorrection(args);
|
||||
}
|
||||
}
|
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
|
||||
def basicPanoramaStitching(img1Path, img2Path):
|
||||
img1 = cv.imread(cv.samples.findFile(img1Path))
|
||||
img2 = cv.imread(cv.samples.findFile(img2Path))
|
||||
|
||||
# [camera-pose-from-Blender-at-location-1]
|
||||
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
|
||||
[ 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
|
||||
[-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
|
||||
[0, 0, 0, 1]],dtype=np.float64)
|
||||
# [camera-pose-from-Blender-at-location-1]
|
||||
|
||||
# [camera-pose-from-Blender-at-location-2]
|
||||
c2Mo = np.array([[0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112],
|
||||
[-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
|
||||
[0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
|
||||
[0, 0, 0, 1]],dtype=np.float64)
|
||||
# [camera-pose-from-Blender-at-location-2]
|
||||
|
||||
# [camera-intrinsics-from-Blender]
|
||||
cameraMatrix = np.array([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0, 0, 1]], dtype=np.float32)
|
||||
# [camera-intrinsics-from-Blender]
|
||||
|
||||
# [extract-rotation]
|
||||
R1 = c1Mo[0:3, 0:3]
|
||||
R2 = c2Mo[0:3, 0:3]
|
||||
#[extract-rotation]
|
||||
|
||||
# [compute-rotation-displacement]
|
||||
R2 = R2.transpose()
|
||||
R_2to1 = np.dot(R1,R2)
|
||||
# [compute-rotation-displacement]
|
||||
|
||||
# [compute-homography]
|
||||
H = cameraMatrix.dot(R_2to1).dot(np.linalg.inv(cameraMatrix))
|
||||
H = H / H[2][2]
|
||||
# [compute-homography]
|
||||
|
||||
# [stitch]
|
||||
img_stitch = cv.warpPerspective(img2, H, (img2.shape[1]*2, img2.shape[0]))
|
||||
img_stitch[0:img1.shape[0], 0:img1.shape[1]] = img1
|
||||
# [stitch]
|
||||
|
||||
img_space = np.zeros((img1.shape[0],50,3), dtype=np.uint8)
|
||||
img_compare = cv.hconcat([img1,img_space, img2])
|
||||
|
||||
cv.imshow("Final", img_compare)
|
||||
cv.imshow("Panorama", img_stitch)
|
||||
cv.waitKey(0)
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Code for homography tutorial. Example 5: basic panorama stitching from a rotating camera.")
|
||||
parser.add_argument("-I1","--image1", help = "path to first image", default="Blender_Suzanne1.jpg")
|
||||
parser.add_argument("-I2","--image2", help = "path to second image", default="Blender_Suzanne2.jpg")
|
||||
args = parser.parse_args()
|
||||
print("Panorama Stitching Started")
|
||||
basicPanoramaStitching(args.image1, args.image2)
|
||||
print("Panorama Stitching Completed Successfully")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import sys
|
||||
|
||||
|
||||
def randomColor():
|
||||
color = np.random.randint(0, 255,(1, 3))
|
||||
return color[0].tolist()
|
||||
|
||||
def perspectiveCorrection(img1Path, img2Path ,patternSize ):
|
||||
img1 = cv.imread(cv.samples.findFile(img1Path))
|
||||
img2 = cv.imread(cv.samples.findFile(img2Path))
|
||||
|
||||
# [find-corners]
|
||||
ret1, corners1 = cv.findChessboardCorners(img1, patternSize)
|
||||
ret2, corners2 = cv.findChessboardCorners(img2, patternSize)
|
||||
# [find-corners]
|
||||
|
||||
if not ret1 or not ret2:
|
||||
print("Error, cannot find the chessboard corners in both images.")
|
||||
sys.exit(-1)
|
||||
|
||||
# [estimate-homography]
|
||||
H, _ = cv.findHomography(corners1, corners2)
|
||||
print(H)
|
||||
# [estimate-homography]
|
||||
|
||||
# [warp-chessboard]
|
||||
img1_warp = cv.warpPerspective(img1, H, (img1.shape[1], img1.shape[0]))
|
||||
# [warp-chessboard]
|
||||
|
||||
img_draw_warp = cv.hconcat([img2, img1_warp])
|
||||
cv.imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp )
|
||||
|
||||
corners1 = corners1.tolist()
|
||||
corners1 = [a[0] for a in corners1]
|
||||
|
||||
# [compute-transformed-corners]
|
||||
img_draw_matches = cv.hconcat([img1, img2])
|
||||
for i in range(len(corners1)):
|
||||
pt1 = np.array([corners1[i][0], corners1[i][1], 1])
|
||||
pt1 = pt1.reshape(3, 1)
|
||||
pt2 = np.dot(H, pt1)
|
||||
pt2 = pt2/pt2[2]
|
||||
end = (int(img1.shape[1] + pt2[0]), int(pt2[1]))
|
||||
cv.line(img_draw_matches, tuple([int(j) for j in corners1[i]]), end, randomColor(), 2)
|
||||
|
||||
cv.imshow("Draw matches", img_draw_matches)
|
||||
cv.waitKey(0)
|
||||
# [compute-transformed-corners]
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-I1', "--image1", help="Path to the first image", default="left02.jpg")
|
||||
parser.add_argument('-I2', "--image2", help="Path to the second image", default="left01.jpg")
|
||||
parser.add_argument('-H', "--height", help="Height of pattern size", default=6)
|
||||
parser.add_argument('-W', "--width", help="Width of pattern size", default=9)
|
||||
args = parser.parse_args()
|
||||
|
||||
img1Path = args.image1
|
||||
img2Path = args.image2
|
||||
h = args.height
|
||||
w = args.width
|
||||
perspectiveCorrection(img1Path, img2Path, (w, h))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user