From 9ba4bb73555b9418e7d0d739f555219259be4298 Mon Sep 17 00:00:00 2001 From: Rostislav Vasilikhin Date: Tue, 13 Sep 2022 15:45:20 +0200 Subject: [PATCH] initIntrisicParams2D() refactored variables refactoring levmarq fix initIntrinsicParams2D() refactoring undo LM fix cameraCalcJErr: made a lambda; warnings fixed; vars rearranged & renamed, jacobian buffers, perViewErrors and allErrors fix, etc. stereoCalibrate: internal vars in callback stereoCalibrate: capture only useful variables stereoCalibrate: perViewError fix + minors rvecs and tvecs are not pointers anymore no extra lambda newObjPoints: no pointers stdDevs: no pointers _Jo removed: not used Range::all() -> rowRange, colRange param and mask are std::vectors now indices shortened tabs less func-scoped vars; TODOs less formatting & renaming changes trailing whitespaces less diff less changes less changes Range::all() back perViewErr ptr fix NINTRINSIC captured try to fix warning trying to fix a warning fix warnings, another attempt --- modules/3d/src/levmarq.cpp | 1 + modules/calib/src/calibration.cpp | 629 +++++++++++++++--------------- 2 files changed, 308 insertions(+), 322 deletions(-) diff --git a/modules/3d/src/levmarq.cpp b/modules/3d/src/levmarq.cpp index 72b2992297..b4df3b89cf 100644 --- a/modules/3d/src/levmarq.cpp +++ b/modules/3d/src/levmarq.cpp @@ -300,6 +300,7 @@ LevMarq::Report detail::LevMarqBase::optimize() lambdaLevMarq *= 1.0 / settings.initialLmDownFactor; lmUpFactor = settings.initialLmUpFactor; + // Once set, these flags will be activated until next successful LM iteration - this is not a bug smallGradient = (gradientMax < settings.minGradientTolerance); smallStep = (xNorm < settings.stepNormTolerance); smallEnergyDelta = (costChange / energy < settings.relEnergyDeltaTolerance); diff --git a/modules/calib/src/calibration.cpp b/modules/calib/src/calibration.cpp index a82b53dcd3..dc24f057eb 100644 --- a/modules/calib/src/calibration.cpp +++ b/modules/calib/src/calibration.cpp @@ -61,13 +61,6 @@ static void initIntrinsicParams2D( const Mat& objectPoints, Size imageSize, OutputArray cameraMatrix, double aspectRatio ) { - int i, j, pos; - double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 }; - double H[9] = {0}, f[2] = {0}; - Mat _a( 3, 3, CV_64F, a ); - Mat matH( 3, 3, CV_64F, H ); - Mat _f( 2, 1, CV_64F, f ); - CV_Assert(npoints.type() == CV_32SC1 && (npoints.rows == 1 || npoints.cols == 1) && npoints.isContinuous()); int nimages = npoints.rows + npoints.cols - 1; @@ -79,34 +72,34 @@ static void initIntrinsicParams2D( const Mat& objectPoints, if( objectPoints.rows != 1 || imagePoints.rows != 1 ) CV_Error( CV_StsBadSize, "object points and image points must be a single-row matrices" ); - Mat matA( 2*nimages, 2, CV_64F ); - Mat _b( 2*nimages, 1, CV_64F ); - a[2] = (!imageSize.width) ? 0.5 : (imageSize.width - 1)*0.5; - a[5] = (!imageSize.height) ? 0.5 : (imageSize.height - 1)*0.5; - Mat _allH( nimages, 9, CV_64F ); + Mat_ matA(2*nimages, 2); + Mat_ matb(2*nimages, 1, CV_64F ); + double fx, fy, cx, cy; + cx = (!imageSize.width ) ? 0.5 : (imageSize.width - 1)*0.5; + cy = (!imageSize.height) ? 0.5 : (imageSize.height - 1)*0.5; // extract vanishing points in order to obtain initial value for the focal length - for( i = 0, pos = 0; i < nimages; i++ ) + int pos = 0; + for(int i = 0; i < nimages; i++ ) { - double* Ap = (double*)matA.data + i*4; - double* bp = (double*)_b.data + i*2; int ni = npoints.at(i); - double h[3], v[3], d1[3], d2[3]; - double n[4] = {0,0,0,0}; Mat matM = objectPoints.colRange(pos, pos + ni); Mat _m = imagePoints.colRange(pos, pos + ni); pos += ni; + Matx33d H; Mat matH0 = findHomography(matM, _m); CV_Assert(matH0.size() == Size(3, 3)); - matH0.convertTo(matH, CV_64F); + matH0.convertTo(H, CV_64F); - H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2]; - H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5]; + H(0, 0) -= H(2, 0)*cx; H(0, 1) -= H(2, 1)*cx; H(0, 2) -= H(2, 2)*cx; + H(1, 0) -= H(2, 0)*cy; H(1, 1) -= H(2, 1)*cy; H(1, 2) -= H(2, 2)*cy; - for( j = 0; j < 3; j++ ) + Vec3d h, v, d1, d2; + Vec4d n; + for(int j = 0; j < 3; j++ ) { - double t0 = H[j*3], t1 = H[j*3+1]; + double t0 = H(j, 0), t1 = H(j, 1); h[j] = t0; v[j] = t1; d1[j] = (t0 + t1)*0.5; d2[j] = (t0 - t1)*0.5; @@ -114,31 +107,34 @@ static void initIntrinsicParams2D( const Mat& objectPoints, n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j]; } - for( j = 0; j < 4; j++ ) + for(int j = 0; j < 4; j++ ) n[j] = 1./std::sqrt(n[j]); - for( j = 0; j < 3; j++ ) + for(int j = 0; j < 3; j++ ) { h[j] *= n[0]; v[j] *= n[1]; d1[j] *= n[2]; d2[j] *= n[3]; } - Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1]; - Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1]; - bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2]; + matA(i*2+0, 0) = h[0]*v[0]; matA(i*2+0, 1) = h[1]*v[1]; + matA(i*2+1, 0) = d1[0]*d2[0]; matA(i*2+1, 1) = d1[1]*d2[1]; + matb(i*2+0) = -h[2]*v[2]; + matb(i*2+1) = -d1[2]*d2[2]; } - solve(matA, _b, _f, DECOMP_NORMAL + DECOMP_SVD); - CV_Assert((double*)_f.data == f); - a[0] = std::sqrt(fabs(1./f[0])); - a[4] = std::sqrt(fabs(1./f[1])); + Vec2d f; + solve(matA, matb, f, DECOMP_NORMAL + DECOMP_SVD); + fx = std::sqrt(fabs(1./f[0])); + fy = std::sqrt(fabs(1./f[1])); if( aspectRatio != 0 ) { - double tf = (a[0] + a[4])/(aspectRatio + 1.); - a[0] = aspectRatio*tf; - a[4] = tf; + double tf = (fx + fy)/(aspectRatio + 1.); + fx = aspectRatio*tf; + fy = tf; } - _a.copyTo(cameraMatrix); + Matx33d(fx, 0, cx, + 0, fy, cy, + 0, 0, 1).copyTo(cameraMatrix); } static void subMatrix(const Mat& src, Mat& dst, @@ -165,152 +161,14 @@ static void subMatrix(const Mat& src, Mat& dst, } } -static void cameraCalcJErr(const Mat& objectPoints, const Mat& imagePoints, - const Mat& npoints, Mat& allErrors, - Mat& _param, bool calcJ, Mat& JtErr, Mat& JtJ, double& errnorm, - double aspectRatio, Mat& perViewErrors, - int flags, bool optimizeObjPoints) -{ - const int NINTRINSIC = CALIB_NINTRINSIC; - int ni = 0, nimages = (int)npoints.total(); - double k[14] = {0}; - Mat _k(14, 1, CV_64F, k); - double* param = _param.ptr(); - int nparams = (int)_param.total(); - int ni0 = npoints.at(0); - Mat _Je(ni0*2, 6, CV_64F), _Ji(ni0*2, NINTRINSIC, CV_64F), _Jo, _err(ni*2, 1, CV_64F); - - if( flags & CALIB_FIX_ASPECT_RATIO ) - { - param[0] = param[1]*aspectRatio; - //pparam[0] = pparam[1]*aspectRatio; - } - - Matx33d A(param[0], 0, param[2], - 0, param[1], param[3], - 0, 0, 1); - std::copy(param + 4, param + 4 + 14, k); - - JtJ.setZero(); - JtErr.setZero(); - - if(optimizeObjPoints) - _Jo.create(ni0*2, ni0*3, CV_64F); - - double reprojErr = 0; - int maxPoints = 0; - for( int i = 0; i < nimages; i++ ) - maxPoints = max(maxPoints, npoints.at(i)); - - for( int i = 0, pos = 0; i < nimages; i++, pos += ni ) - { - ni = npoints.at(i); - Mat _ri = _param.rowRange(NINTRINSIC + i*6, NINTRINSIC + i*6 + 3); - Mat _ti = _param.rowRange(NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6); - - Mat _Mi = objectPoints.colRange(pos, pos + ni); - if( optimizeObjPoints ) - { - _Mi = _param.rowRange(NINTRINSIC + nimages * 6, - NINTRINSIC + nimages * 6 + ni * 3); - _Mi = _Mi.reshape(3, 1); - } - Mat _mi = imagePoints.colRange(pos, pos + ni); - Mat _me = allErrors.colRange(pos, pos + ni); - - _Je.resize(ni*2); - _Ji.resize(ni*2); - _err.resize(ni*2); - if (optimizeObjPoints) - _Jo.resize(ni*2); - Mat _mp = _err.reshape(2, 1); - - if( calcJ ) - { - Mat _dpdr = _Je.colRange(0, 3); - Mat _dpdt = _Je.colRange(3, 6); - Mat _dpdf = _Ji.colRange(0, 2); - Mat _dpdc = _Ji.colRange(2, 4); - Mat _dpdk = _Ji.colRange(4, NINTRINSIC); - Mat _dpdo = _Jo.empty() ? Mat() : _Jo.colRange(0, ni * 3); - double* dpdr_p = _dpdr.ptr(); - double* dpdt_p = _dpdt.ptr(); - double* dpdf_p = _dpdf.ptr(); - double* dpdc_p = _dpdc.ptr(); - double* dpdk_p = _dpdk.ptr(); - double* dpdo_p = _dpdo.ptr(); - - projectPoints(_Mi, _ri, _ti, A, _k, _mp, _dpdr, _dpdt, - (flags & CALIB_FIX_FOCAL_LENGTH) ? _OutputArray() : _OutputArray(_dpdf), - (flags & CALIB_FIX_PRINCIPAL_POINT) ? _OutputArray() : _OutputArray(_dpdc), - _dpdk, _Jo.empty() ? _OutputArray() : _OutputArray(_dpdo), - (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0.); - CV_Assert(_mp.ptr() == _err.ptr() && - dpdr_p == _dpdr.ptr() && dpdt_p == _dpdt.ptr() && - dpdf_p == _dpdf.ptr() && dpdc_p == _dpdc.ptr() && - dpdk_p == _dpdk.ptr() && dpdo_p == _dpdo.ptr()); - } - else - projectPoints( _Mi, _ri, _ti, A, _k, _mp, - noArray(), noArray(), noArray(), - noArray(), noArray(), noArray(), 0.); - - subtract( _mp, _mi, _mp ); - _mp.copyTo(_me); - - if( calcJ ) - { - // see HZ: (A6.14) for details on the structure of the Jacobian - JtJ(Rect(0, 0, NINTRINSIC, NINTRINSIC)) += _Ji.t() * _Ji; - JtJ(Rect(NINTRINSIC + i * 6, NINTRINSIC + i * 6, 6, 6)) = _Je.t() * _Je; - JtJ(Rect(NINTRINSIC + i * 6, 0, 6, NINTRINSIC)) = _Ji.t() * _Je; - if( optimizeObjPoints ) - { - JtJ(Rect(NINTRINSIC + nimages * 6, 0, maxPoints * 3, NINTRINSIC)) += _Ji.t() * _Jo; - JtJ(Rect(NINTRINSIC + nimages * 6, NINTRINSIC + i * 6, maxPoints * 3, 6)) - += _Je.t() * _Jo; - JtJ(Rect(NINTRINSIC + nimages * 6, NINTRINSIC + nimages * 6, maxPoints * 3, maxPoints * 3)) - += _Jo.t() * _Jo; - } - - JtErr.rowRange(0, NINTRINSIC) += _Ji.t() * _err; - JtErr.rowRange(NINTRINSIC + i * 6, NINTRINSIC + (i + 1) * 6) = _Je.t() * _err; - if( optimizeObjPoints ) - { - JtErr.rowRange(NINTRINSIC + nimages * 6, nparams) += _Jo.t() * _err; - } - } - - double viewErr = norm(_err, NORM_L2SQR); - /*if (i == 0 || i == nimages-1) { - printf("image %d.", i); - for(int j = 0; j < 10; j++) { - printf(" %.2g", _err.at(j)); - } - printf("\n"); - }*/ - - if( !perViewErrors.empty() ) - perViewErrors.at(i) = std::sqrt(viewErr / ni); - - reprojErr += viewErr; - } - - errnorm = reprojErr; -} - static double calibrateCameraInternal( const Mat& objectPoints, - const Mat& imagePoints, const Mat& npoints, - Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, - Mat* rvecs, Mat* tvecs, Mat* newObjPoints, Mat* stdDevs, - Mat* perViewErrors, int flags, const TermCriteria& termCrit ) + const Mat& imagePoints, const Mat& npoints, + Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, + Mat rvecs, Mat tvecs, Mat newObjPoints, Mat stdDevs, + Mat perViewErr, int flags, const TermCriteria& termCrit ) { - const int NINTRINSIC = CALIB_NINTRINSIC; + int NINTRINSIC = CALIB_NINTRINSIC; - Matx33d A; - double k[14] = {0}; - Mat matA(3, 3, CV_64F, A.val); - int i, maxPoints = 0, ni = 0, pos, total = 0, nparams, cn; double aspectRatio = 0.; int nimages = npoints.checkVector(1, CV_32S); CV_Assert(nimages >= 1); @@ -335,22 +193,22 @@ static double calibrateCameraInternal( const Mat& objectPoints, CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" ); } - if( rvecs ) + if( !rvecs.empty() ) { - cn = rvecs->channels(); - CV_Assert(rvecs->depth() == CV_32F || rvecs->depth() == CV_64F); - CV_Assert(rvecs->rows == nimages); - CV_Assert((rvecs->rows == nimages && (rvecs->cols*cn == 3 || rvecs->cols*cn == 3)) || - (rvecs->rows == 1 && rvecs->cols == nimages && cn == 3)); + int cn = rvecs.channels(); + CV_Assert(rvecs.depth() == CV_32F || rvecs.depth() == CV_64F); + CV_Assert(rvecs.rows == nimages); + CV_Assert((rvecs.rows == nimages && (rvecs.cols*cn == 3 || rvecs.cols*cn == 3)) || + (rvecs.rows == 1 && rvecs.cols == nimages && cn == 3)); } - if( tvecs ) + if( !tvecs.empty() ) { - cn = tvecs->channels(); - CV_Assert(tvecs->depth() == CV_32F || tvecs->depth() == CV_64F); - CV_Assert(tvecs->rows == nimages); - CV_Assert((tvecs->rows == nimages && tvecs->cols*cn == 3) || - (tvecs->rows == 1 && tvecs->cols == nimages && cn == 3)); + int cn = tvecs.channels(); + CV_Assert(tvecs.depth() == CV_32F || tvecs.depth() == CV_64F); + CV_Assert(tvecs.rows == nimages); + CV_Assert((tvecs.rows == nimages && tvecs.cols*cn == 3) || + (tvecs.rows == 1 && tvecs.cols == nimages && cn == 3)); } CV_Assert(cameraMatrix.type() == CV_32F || cameraMatrix.type() == CV_64F); @@ -361,9 +219,10 @@ static double calibrateCameraInternal( const Mat& objectPoints, CV_Assert(ndistCoeffs == 4 || ndistCoeffs == 5 || ndistCoeffs == 8 || ndistCoeffs == 12 || ndistCoeffs == 14); - for( i = 0; i < nimages; i++ ) + int total = 0, maxPoints = 0; + for(int i = 0; i < nimages; i++ ) { - ni = npoints.at(i); + int ni = npoints.at(i); if( ni < 4 ) { CV_Error_( CV_StsOutOfRange, ("The number of points in the view #%d is < 4", i)); @@ -372,23 +231,23 @@ static double calibrateCameraInternal( const Mat& objectPoints, total += ni; } - if( newObjPoints ) + if( !newObjPoints.empty() ) { - cn = newObjPoints->channels(); - CV_Assert(newObjPoints->depth() == CV_32F || newObjPoints->depth() == CV_64F); - CV_Assert(rvecs->rows == nimages); - CV_Assert((newObjPoints->rows == maxPoints && newObjPoints->cols*cn == 3) || - (newObjPoints->rows == 1 && newObjPoints->cols == maxPoints && cn == 3)); + int cn = newObjPoints.channels(); + CV_Assert(newObjPoints.depth() == CV_32F || newObjPoints.depth() == CV_64F); + CV_Assert(rvecs.rows == nimages); + CV_Assert((newObjPoints.rows == maxPoints && newObjPoints.cols*cn == 3) || + (newObjPoints.rows == 1 && newObjPoints.cols == maxPoints && cn == 3)); } - if( stdDevs ) + if( !stdDevs.empty() ) { - cn = stdDevs->channels(); - CV_Assert(stdDevs->depth() == CV_32F || stdDevs->depth() == CV_64F); + int cn = stdDevs.channels(); + CV_Assert(stdDevs.depth() == CV_32F || stdDevs.depth() == CV_64F); int nstddev = nimages*6 + NINTRINSIC + (releaseObject ? maxPoints*3 : 0); - CV_Assert((stdDevs->rows == nstddev && stdDevs->cols*cn == 1) || - (stdDevs->rows == 1 && stdDevs->cols == nstddev && cn == 1)); + CV_Assert((stdDevs.rows == nstddev && stdDevs.cols*cn == 1) || + (stdDevs.rows == 1 && stdDevs.cols == nstddev && cn == 1)); } Mat matM( 1, total, CV_64FC3 ); @@ -405,11 +264,12 @@ static double calibrateCameraInternal( const Mat& objectPoints, else convertPointsFromHomogeneous(imagePoints, _m, CV_64F); - nparams = NINTRINSIC + nimages*6; + int nparams = NINTRINSIC + nimages*6; if( releaseObject ) nparams += maxPoints * 3; - Mat _k( distCoeffs.rows, distCoeffs.cols, CV_64F, k); + std::vector k(14, 0.0); + Mat _k( distCoeffs.rows, distCoeffs.cols, CV_64F, k.data()); if( distCoeffs.total() < 8 ) { if( distCoeffs.total() < 5 ) @@ -419,7 +279,8 @@ static double calibrateCameraInternal( const Mat& objectPoints, const double minValidAspectRatio = 0.01; const double maxValidAspectRatio = 100.0; - cameraMatrix.convertTo(matA, CV_64F); + Matx33d A; + cameraMatrix.convertTo(A, CV_64F); // 1. initialize intrinsic parameters & LM solver if( flags & CALIB_USE_INTRINSIC_GUESS ) @@ -455,7 +316,7 @@ static double calibrateCameraInternal( const Mat& objectPoints, if( fabs(mean[2]) > 1e-5 || fabs(sdv[2]) > 1e-5 ) CV_Error( CV_StsBadArg, "For non-planar calibration rigs the initial intrinsic matrix must be specified" ); - for( i = 0; i < total; i++ ) + for(int i = 0; i < total; i++ ) matM.at(i).z = 0.; if( flags & CALIB_FIX_ASPECT_RATIO ) @@ -472,14 +333,10 @@ static double calibrateCameraInternal( const Mat& objectPoints, //std::cout << "A0: " << A << std::endl; //std::cout << "dist0:" << _k << std::endl; - Mat _Ji( maxPoints*2, NINTRINSIC, CV_64FC1, Scalar(0)); - Mat _Je( maxPoints*2, 6, CV_64FC1 ); - Mat _err( maxPoints*2, 1, CV_64FC1 ); - Mat param0( nparams, 1, CV_64FC1 ); - Mat mask0 = Mat::ones( nparams, 1, CV_8UC1 ); + std::vector param(nparams, 0.0); + Mat paramM(param, false); + std::vector mask(nparams, (uchar)1); - const bool allocJo = stdDevs || releaseObject; - Mat _Jo = allocJo ? Mat( maxPoints*2, maxPoints*3, CV_64FC1, Scalar(0) ) : Mat(); int solveMethod = DECOMP_EIG; if(flags & CALIB_USE_LU) { @@ -489,11 +346,8 @@ static double calibrateCameraInternal( const Mat& objectPoints, solveMethod = DECOMP_QR; } - double* param = param0.ptr(); - uchar* mask = mask0.ptr(); - param[0] = A(0, 0); param[1] = A(1, 1); param[2] = A(0, 2); param[3] = A(1, 2); - std::copy(k, k + 14, param + 4); + std::copy(k.begin(), k.end(), param.begin() + 4); if(flags & CALIB_FIX_ASPECT_RATIO) mask[0] = 0; @@ -535,30 +389,36 @@ static double calibrateCameraInternal( const Mat& objectPoints, if(releaseObject) { // copy object points + int s = NINTRINSIC + nimages * 6; + std::copy( matM.ptr(), matM.ptr( 0, maxPoints - 1 ) + 3, - param + NINTRINSIC + nimages * 6 ); + param.begin() + NINTRINSIC + nimages * 6 ); + // fix points - mask[NINTRINSIC + nimages * 6] = 0; - mask[NINTRINSIC + nimages * 6 + 1] = 0; - mask[NINTRINSIC + nimages * 6 + 2] = 0; - mask[NINTRINSIC + nimages * 6 + iFixedPoint * 3] = 0; - mask[NINTRINSIC + nimages * 6 + iFixedPoint * 3 + 1] = 0; - mask[NINTRINSIC + nimages * 6 + iFixedPoint * 3 + 2] = 0; + mask[s + 0] = 0; + mask[s + 1] = 0; + mask[s + 2] = 0; + mask[s + iFixedPoint * 3 + 0] = 0; + mask[s + iFixedPoint * 3 + 1] = 0; + mask[s + iFixedPoint * 3 + 2] = 0; mask[nparams - 1] = 0; } // 2. initialize extrinsic parameters - for( i = 0, pos = 0; i < nimages; i++, pos += ni ) + for(int i = 0, pos = 0; i < nimages; i++ ) { - ni = npoints.at(i); + int ni = npoints.at(i); - Mat _ri = param0.rowRange(NINTRINSIC + i*6, NINTRINSIC + i*6 + 3); - Mat _ti = param0.rowRange(NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6); + int s = NINTRINSIC + i*6; + Mat _ri = paramM.rowRange(s, s + 3); + Mat _ti = paramM.rowRange(s + 3, s + 6); Mat _Mi = matM.colRange(pos, pos + ni); Mat _mi = _m.colRange(pos, pos + ni); - solvePnP(_Mi, _mi, matA, _k, _ri, _ti, false); + solvePnP(_Mi, _mi, A, _k, _ri, _ti, false); + + pos += ni; } //std::cout << "single camera calib. param before LM: " << param0.t() << "\n"; @@ -566,43 +426,159 @@ static double calibrateCameraInternal( const Mat& objectPoints, // 3. run the optimization - Mat perViewErr = perViewErrors ? *perViewErrors : Mat(); - auto calibCameraLMCallback = [matM, _m, npoints, &allErrors, aspectRatio, &perViewErr, flags, releaseObject] - (InputOutputArray _param, OutputArray JtErr, OutputArray JtJ, double& errnorm) -> bool + Mat errBuf( maxPoints*2, 1, CV_64FC1 ); + Mat JiBuf ( maxPoints*2, NINTRINSIC, CV_64FC1, Scalar(0)); + Mat JeBuf ( maxPoints*2, 6, CV_64FC1 ); + Mat JoBuf; + if (releaseObject) + JoBuf = Mat( maxPoints*2, maxPoints*3, CV_64FC1); + + auto cameraCalcJErr = [matM, _m, npoints, nimages, flags, releaseObject, nparams, maxPoints, aspectRatio, NINTRINSIC, + &JeBuf, &JiBuf, &JoBuf, &errBuf, &allErrors, &perViewErr] + (InputOutputArray _param, OutputArray _JtErr, OutputArray _JtJ, double& errnorm) -> bool { - Mat jterr = JtErr.getMat(), jtj = JtJ.getMat(); - Mat mparam = _param.getMat(); - cameraCalcJErr(matM, _m, npoints, allErrors, mparam, /* calcJ */ JtErr.needed() && JtJ.needed(), - jterr, jtj, errnorm, - aspectRatio, perViewErr, flags, releaseObject); + bool optimizeObjPoints = releaseObject; + + Mat_ param_m = _param.getMat(); + param_m = param_m.reshape(1, max(param_m.rows, param_m.cols)); + + if( flags & CALIB_FIX_ASPECT_RATIO ) + { + param_m(0) = param_m(1)*aspectRatio; + } + + double fx = param_m(0), fy = param_m(1), cx = param_m(2), cy = param_m(3); + Matx33d intrin(fx, 0, cx, + 0, fy, cy, + 0, 0, 1); + Mat dist = param_m.rowRange(4, 4+14); + + bool calcJ = !_JtErr.empty() && !_JtJ.empty(); + Mat JtErr, JtJ; + if (calcJ) + { + JtErr = _JtErr.getMat(); + JtJ = _JtJ.getMat(); + JtJ.setZero(); + JtErr.setZero(); + } + + double reprojErr = 0; + + int so = NINTRINSIC + nimages * 6; + int pos = 0; + for( int i = 0; i < nimages; i++ ) + { + int si = NINTRINSIC + i * 6; + + int ni = npoints.at(i); + Mat _ri = param_m.rowRange(si, si + 3); + Mat _ti = param_m.rowRange(si + 3, si + 6); + + Mat _Mi = matM.colRange(pos, pos + ni); + if( optimizeObjPoints ) + { + _Mi = param_m.rowRange(so, so + ni * 3); + _Mi = _Mi.reshape(3, 1); + } + Mat _mi = _m.colRange(pos, pos + ni); + Mat _me = allErrors.colRange(pos, pos + ni); + + Mat Jo; + if (optimizeObjPoints) + Jo = JoBuf(Range(0, ni*2), Range(0, ni*3)); + + Mat Je = JeBuf.rowRange(0, ni*2); + Mat Ji = JiBuf.rowRange(0, ni*2); + Mat err = errBuf.rowRange(0, ni*2); + Mat _mp = err.reshape(2, 1); + + if( calcJ ) + { + Mat _dpdr = Je.colRange(0, 3); + Mat _dpdt = Je.colRange(3, 6); + Mat _dpdf = (flags & CALIB_FIX_FOCAL_LENGTH) ? Mat() : Ji.colRange(0, 2); + Mat _dpdc = (flags & CALIB_FIX_PRINCIPAL_POINT) ? Mat() : Ji.colRange(2, 4); + Mat _dpdk = Ji.colRange(4, NINTRINSIC); + Mat _dpdo = Jo.empty() ? Mat() : Jo.colRange(0, ni * 3); + + projectPoints(_Mi, _ri, _ti, intrin, dist, _mp, _dpdr, _dpdt, + (_dpdf.empty() ? noArray() : _dpdf), + (_dpdc.empty() ? noArray() : _dpdc), + _dpdk, (_dpdo.empty() ? noArray() : _dpdo), + (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0.); + } + else + projectPoints( _Mi, _ri, _ti, intrin, dist, _mp); + + subtract( _mp, _mi, _mp ); + _mp.copyTo(_me); + + if( calcJ ) + { + // see HZ: (A6.14) for details on the structure of the Jacobian + JtJ(Rect(0, 0, NINTRINSIC, NINTRINSIC)) += Ji.t() * Ji; + JtJ(Rect(si, si, 6, 6)) = Je.t() * Je; + JtJ(Rect(si, 0, 6, NINTRINSIC)) = Ji.t() * Je; + if( optimizeObjPoints ) + { + JtJ(Rect(so, 0, maxPoints * 3, NINTRINSIC)) += Ji.t() * Jo; + JtJ(Rect(so, si, maxPoints * 3, 6)) += Je.t() * Jo; + JtJ(Rect(so, so, maxPoints * 3, maxPoints * 3)) += Jo.t() * Jo; + } + + JtErr.rowRange(0, NINTRINSIC) += Ji.t() * err; + JtErr.rowRange(si, si + 6) = Je.t() * err; + if( optimizeObjPoints ) + { + JtErr.rowRange(so, nparams) += Jo.t() * err; + } + } + + double viewErr = norm(err, NORM_L2SQR); + if( !perViewErr.empty() ) + perViewErr.at(i) = std::sqrt(viewErr / ni); + + reprojErr += viewErr; + pos += ni; + } + + errnorm = reprojErr; return true; }; - LevMarq solver(param0, calibCameraLMCallback, + LevMarq solver(param, cameraCalcJErr, LevMarq::Settings() .setMaxIterations((unsigned int)termCrit.maxCount) .setStepNormTolerance(termCrit.epsilon) .setSmallEnergyTolerance(termCrit.epsilon * termCrit.epsilon), - mask0, MatrixType::AUTO, VariableType::LINEAR, /* LtoR */ false, solveMethod); + mask, MatrixType::AUTO, VariableType::LINEAR, /* LtoR */ false, solveMethod); // geodesic is not supported for normal callbacks solver.optimize(); //std::cout << "single camera calib. param after LM: " << param0.t() << "\n"; // If solver failed, then the last calculated perViewErr can be wrong & should be recalculated - Mat JtErr(nparams, 1, CV_64F), JtJ(nparams, nparams, CV_64F), JtJinv, JtJN; + Mat JtErr, JtJ, JtJinv, JtJN; double reprojErr = 0; - JtErr.setZero(); JtJ.setZero(); - cameraCalcJErr(matM, _m, npoints, allErrors, param0, (stdDevs != nullptr), - JtErr, JtJ, reprojErr, - aspectRatio, perViewErr, flags, releaseObject); - if (stdDevs) + if (!stdDevs.empty()) { - int nparams_nz = countNonZero(mask0); + JtErr = Mat(nparams, 1, CV_64F); + JtJ = Mat(nparams, nparams, CV_64F); + JtErr.setZero(); + JtJ.setZero(); + } + + cameraCalcJErr(param, JtErr, JtJ, reprojErr); + + if (!stdDevs.empty()) + { + int nparams_nz = countNonZero(mask); JtJN.create(nparams_nz, nparams_nz, CV_64F); - subMatrix(JtJ, JtJN, mask0, mask0); + subMatrix(JtJ, JtJN, mask, mask); completeSymm(JtJN, false); + //TODO: try DECOMP_CHOLESKY maybe? cv::invert(JtJN, JtJinv, DECOMP_EIG); // sigma2 is deviation of the noise // see any papers about variance of the least squares estimator for @@ -610,50 +586,53 @@ static double calibrateCameraInternal( const Mat& objectPoints, double sigma2 = norm(allErrors, NORM_L2SQR) / (total - nparams_nz); int j = 0; for ( int s = 0; s < nparams; s++ ) - if( mask0.at(s) ) + if( mask[s] ) { - stdDevs->at(s) = std::sqrt(JtJinv.at(j,j) * sigma2); + stdDevs.at(s) = std::sqrt(JtJinv.at(j,j) * sigma2); j++; } else - stdDevs->at(s) = 0.; + stdDevs.at(s) = 0.; } // 4. store the results A = Matx33d(param[0], 0, param[2], 0, param[1], param[3], 0, 0, 1); A.convertTo(cameraMatrix, cameraMatrix.type()); - _k = Mat(distCoeffs.size(), CV_64F, param + 4); + _k = Mat(distCoeffs.size(), CV_64F, param.data() + 4); _k.convertTo(distCoeffs, distCoeffs.type()); - if( newObjPoints && releaseObject ) + + if( !newObjPoints.empty() && releaseObject ) { - Mat _Mi = param0.rowRange(NINTRINSIC + nimages * 6, - NINTRINSIC + nimages * 6 + maxPoints * 3); - _Mi.reshape(3, 1).convertTo(*newObjPoints, newObjPoints->type()); + int s = NINTRINSIC + nimages * 6; + Mat _Mi = paramM.rowRange(s, s + maxPoints * 3); + _Mi.reshape(3, 1).convertTo(newObjPoints, newObjPoints.type()); } - for( i = 0, pos = 0; i < nimages; i++ ) + for(int i = 0; i < nimages; i++ ) { - if( rvecs ) + if( !rvecs.empty() ) { - Mat src = Mat(3, 1, CV_64F, param + NINTRINSIC + i*6); - if( rvecs->rows == nimages && rvecs->cols*rvecs->channels() == 9 ) + //TODO: fix it + Mat src = Mat(3, 1, CV_64F, param.data() + NINTRINSIC + i*6); + if( rvecs.rows == nimages && rvecs.cols*rvecs.channels() == 9 ) { - Mat dst(3, 3, rvecs->depth(), rvecs->ptr(i)); + Mat dst(3, 3, rvecs.depth(), rvecs.ptr(i)); Rodrigues(src, A); A.convertTo(dst, dst.type()); } else { - Mat dst(3, 1, rvecs->depth(), rvecs->rows == 1 ? - rvecs->data + i*rvecs->elemSize1() : rvecs->ptr(i)); + Mat dst(3, 1, rvecs.depth(), rvecs.rows == 1 ? + rvecs.data + i*rvecs.elemSize1() : rvecs.ptr(i)); src.convertTo(dst, dst.type()); } } - if( tvecs ) + if( !tvecs.empty() ) { - Mat src(3, 1, CV_64F, param + NINTRINSIC + i*6 + 3); - Mat dst(3, 1, tvecs->depth(), tvecs->rows == 1 ? - tvecs->data + i*tvecs->elemSize1() : tvecs->ptr(i)); + //TODO: fix it + Mat src(3, 1, CV_64F, param.data() + NINTRINSIC + i*6 + 3); + Mat dst(3, 1, tvecs.depth(), tvecs.rows == 1 ? + tvecs.data + i*tvecs.elemSize1() : tvecs.ptr(i)); src.convertTo(dst, dst.type()); } } @@ -673,7 +652,7 @@ static double stereoCalibrateImpl( Mat perViewErr, int flags, TermCriteria termCrit ) { - const int NINTRINSIC = 18; + int NINTRINSIC = CALIB_NINTRINSIC; double dk[2][14]={{0}}; Mat Dist[2]; @@ -735,7 +714,7 @@ static double stereoCalibrateImpl( Mat matA(A[k], false); calibrateCameraInternal(objectPoints, imagePoints[k], _npoints, imageSize, 0, matA, Dist[k], - 0, 0, 0, 0, 0, flags, termCrit); + Mat(), Mat(), Mat(), Mat(), Mat(), flags, termCrit); } } @@ -829,12 +808,12 @@ static double stereoCalibrateImpl( for(int i = 0; i < nimages; i++ ) { int ni = _npoints.at(i); - Mat objpt_i = objectPoints(Range::all(), Range(pos, pos + ni)); + Mat objpt_i = objectPoints.colRange(pos, pos + ni); Matx33d R[2]; Vec3d rv, T[2]; for(int k = 0; k < 2; k++ ) { - Mat imgpt_ik = imagePoints[k](Range::all(), Range(pos, pos + ni)); + Mat imgpt_ik = imagePoints[k].colRange(pos, pos + ni); solvePnP(objpt_i, imgpt_ik, A[k], Dist[k], rv, T[k], false, SOLVEPNP_ITERATIVE ); Rodrigues(rv, R[k]); @@ -916,13 +895,17 @@ static double stereoCalibrateImpl( Mat J_LRBuf( maxPoints*2, 6, CV_64F ); Mat JiBuf( maxPoints*2, NINTRINSIC, CV_64F, Scalar(0) ); - auto lmcallback = [&](InputOutputArray _param, OutputArray JtErr_, OutputArray JtJ_, double& errnorm) + auto lmcallback = [errBuf, JeBuf, J_LRBuf, JiBuf, recomputeIntrinsics, nimages, flags, aspectRatio, &A, &dk, NINTRINSIC, + _npoints, objectPoints, imagePoints, Dist, &perViewErr] + (InputOutputArray _param, OutputArray JtErr_, OutputArray JtJ_, double& errnorm) { Mat_ param_m = _param.getMat(); Vec3d om_LR(param_m(0), param_m(1), param_m(2)); Vec3d T_LR(param_m(3), param_m(4), param_m(5)); Vec3d om[2], T[2]; Matx33d dr3dr1, dr3dr2, dt3dr2, dt3dt1, dt3dt2; + Matx33d intrin[2]; + std::vector< std::vector > distCoeffs(2, std::vector(14, 0.0)); double reprojErr = 0; @@ -944,11 +927,20 @@ static double stereoCalibrateImpl( { double fx = param_m(idx + k*NINTRINSIC+0), fy = param_m(idx + k*NINTRINSIC+1); double cx = param_m(idx + k*NINTRINSIC+2), cy = param_m(idx + k*NINTRINSIC+3); - A[k] = Matx33d(fx, 0, cx, - 0, fy, cy, - 0, 0, 1); + intrin[k] = Matx33d(fx, 0, cx, + 0, fy, cy, + 0, 0, 1); for(int j = 0; j < 14; j++) - dk[k][j] = param_m(idx + k*NINTRINSIC+4+j); + distCoeffs[k][j] = param_m(idx + k*NINTRINSIC+4+j); + } + } + else + { + for (int k = 0; k < 2; k++) + { + intrin[k] = A[k]; + for(int j = 0; j < 14; j++) + distCoeffs[k][j] = dk[k][j]; } } @@ -985,11 +977,11 @@ static double stereoCalibrateImpl( Mat imgpt_ik = imagePoints[k](Range::all(), Range(ptPos, ptPos + ni)); if( JtJ_.needed() || JtErr_.needed() ) - projectPoints(objpt_i, om[k], T[k], A[k], Dist[k], + projectPoints(objpt_i, om[k], T[k], intrin[k], distCoeffs[k], tmpImagePoints, dpdrot, dpdt, dpdf, dpdc, dpdk, noArray(), (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0.); else - projectPoints(objpt_i, om[k], T[k], A[k], Dist[k], tmpImagePoints); + projectPoints(objpt_i, om[k], T[k], intrin[k], distCoeffs[k], tmpImagePoints); subtract( tmpImagePoints, imgpt_ik, tmpImagePoints ); if( JtJ_.needed() ) @@ -1089,6 +1081,8 @@ static double stereoCalibrateImpl( reprojErr = r.energy; } + // Extract optimized params from the param vector + Vec3d om_LR(param[0], param[1], param[2]); Vec3d T_LR(param[3], param[4], param[5]); Matx33d R_LR; @@ -1137,15 +1131,18 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, int iFixedPoint, - Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2, - Mat& npoints ) + OutputArray objPt, OutputArray imgPt1, OutputArray imgPt2, + OutputArray npoints ) { int nimages = (int)objectPoints.total(); int total = 0; CV_Assert(nimages > 0); CV_CheckEQ(nimages, (int)imagePoints1.total(), ""); - if (imgPtMat2) + if (!imagePoints2.empty()) + { CV_CheckEQ(nimages, (int)imagePoints2.total(), ""); + CV_Assert(imgPt2.needed()); + } for (int i = 0; i < nimages; i++) { @@ -1168,25 +1165,28 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints, } npoints.create(1, (int)nimages, CV_32S); - objPtMat.create(1, (int)total, CV_32FC3); - imgPtMat1.create(1, (int)total, CV_32FC2); + objPt.create(1, (int)total, CV_32FC3); + imgPt1.create(1, (int)total, CV_32FC2); Point2f* imgPtData2 = 0; - if (imgPtMat2) + Mat imgPt1Mat = imgPt1.getMat(); + if (!imagePoints2.empty()) { - imgPtMat2->create(1, (int)total, CV_32FC2); - imgPtData2 = imgPtMat2->ptr(); + imgPt2.create(1, (int)total, CV_32FC2); + imgPtData2 = imgPt2.getMat().ptr(); } + Mat nPointsMat = npoints.getMat(); + Mat objPtMat = objPt.getMat(); Point3f* objPtData = objPtMat.ptr(); - Point2f* imgPtData1 = imgPtMat1.ptr(); + Point2f* imgPtData1 = imgPt1.getMat().ptr(); for (int i = 0, j = 0; i < nimages; i++) { Mat objpt = objectPoints.getMat(i); Mat imgpt1 = imagePoints1.getMat(i); int numberOfObjectPoints = objpt.checkVector(3, CV_32F); - npoints.at(i) = numberOfObjectPoints; + nPointsMat.at(i) = numberOfObjectPoints; for (int n = 0; n < numberOfObjectPoints; ++n) { objPtData[j + n] = objpt.ptr()[n]; @@ -1207,14 +1207,14 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints, j += numberOfObjectPoints; } - int ni = npoints.at(0); + int ni = nPointsMat.at(0); bool releaseObject = iFixedPoint > 0 && iFixedPoint < ni - 1; // check object points. If not qualified, report errors. if( releaseObject ) { for (int i = 1; i < nimages; i++) { - if( npoints.at(i) != ni ) + if( nPointsMat.at(i) != ni ) { CV_Error( CV_StsBadArg, "All objectPoints[i].size() should be equal when " "object-releasing method is requested." ); @@ -1233,10 +1233,10 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints, static void collectCalibrationData( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, - Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2, - Mat& npoints ) + OutputArray objPt, OutputArray imgPtMat1, OutputArray imgPtMat2, + OutputArray npoints ) { - collectCalibrationData( objectPoints, imagePoints1, imagePoints2, -1, objPtMat, imgPtMat1, + collectCalibrationData( objectPoints, imagePoints1, imagePoints2, -1, objPt, imgPtMat1, imgPtMat2, npoints ); } @@ -1258,7 +1258,7 @@ Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, Mat objPt, imgPt, npoints, cameraMatrix; collectCalibrationData( objectPoints, imagePoints, noArray(), - objPt, imgPt, 0, npoints ); + objPt, imgPt, noArray(), npoints ); initIntrinsicParams2D( objPt, imgPt, npoints, imageSize, cameraMatrix, aspectRatio ); return cameraMatrix; } @@ -1390,7 +1390,7 @@ double calibrateCameraRO(InputArrayOfArrays _objectPoints, } collectCalibrationData( _objectPoints, _imagePoints, noArray(), iFixedPoint, - objPt, imgPt, 0, npoints ); + objPt, imgPt, noArray(), npoints ); bool releaseObject = iFixedPoint > 0 && iFixedPoint < npoints.at(0) - 1; newobj_needed = newobj_needed && releaseObject; @@ -1405,10 +1405,8 @@ double calibrateCameraRO(InputArrayOfArrays _objectPoints, bool stddev_any_needed = stddev_needed || stddev_ext_needed || stddev_obj_needed; if( stddev_any_needed ) { - if( releaseObject ) - stdDeviationsM.create(nimages*6 + CALIB_NINTRINSIC + np * 3, 1, CV_64F); - else - stdDeviationsM.create(nimages*6 + CALIB_NINTRINSIC, 1, CV_64F); + int sz = nimages*6 + CALIB_NINTRINSIC + (releaseObject ? np * 3 : 0); + stdDeviationsM.create(sz, 1, CV_64F); } if( errors_needed ) @@ -1420,39 +1418,26 @@ double calibrateCameraRO(InputArrayOfArrays _objectPoints, double reprojErr = calibrateCameraInternal( objPt, imgPt, npoints, imageSize, iFixedPoint, cameraMatrix, distCoeffs, - rvecs_needed ? &rvecM : NULL, - tvecs_needed ? &tvecM : NULL, - newobj_needed ? &newObjPt : NULL, - stddev_any_needed ? &stdDeviationsM : NULL, - errors_needed ? &errorsM : NULL, flags, cvTermCriteria(criteria)); - - if( newobj_needed ) - newObjPt.copyTo(newObjPoints); + rvecM, tvecM, + newObjPt, + stdDeviationsM, + errorsM, flags, cvTermCriteria(criteria)); if( stddev_needed ) { - stdDeviationsIntrinsics.create(CALIB_NINTRINSIC, 1, CV_64F); - Mat stdDeviationsIntrinsicsMat = stdDeviationsIntrinsics.getMat(); - std::memcpy(stdDeviationsIntrinsicsMat.ptr(), stdDeviationsM.ptr(), - CALIB_NINTRINSIC*sizeof(double)); + stdDeviationsM.rowRange(0, CALIB_NINTRINSIC).copyTo(stdDeviationsIntrinsics); } if ( stddev_ext_needed ) { - stdDeviationsExtrinsics.create(nimages*6, 1, CV_64F); - Mat stdDeviationsExtrinsicsMat = stdDeviationsExtrinsics.getMat(); - std::memcpy(stdDeviationsExtrinsicsMat.ptr(), - stdDeviationsM.ptr() + CALIB_NINTRINSIC*sizeof(double), - nimages*6*sizeof(double)); + int s = CALIB_NINTRINSIC; + stdDeviationsM.rowRange(s, s + nimages*6).copyTo(stdDeviationsExtrinsics); } if( stddev_obj_needed ) { - stdDeviationsObjPoints.create( np * 3, 1, CV_64F ); - Mat stdDeviationsObjPointsMat = stdDeviationsObjPoints.getMat(); - std::memcpy( stdDeviationsObjPointsMat.ptr(), stdDeviationsM.ptr() - + ( CALIB_NINTRINSIC + nimages * 6 ) * sizeof( double ), - np * 3 * sizeof( double ) ); + int s = CALIB_NINTRINSIC + nimages*6; + stdDeviationsM.rowRange(s, s + np*3).copyTo(stdDeviationsObjPoints); } // overly complicated and inefficient rvec/ tvec handling to support vector @@ -1577,7 +1562,7 @@ double stereoCalibrate( InputArrayOfArrays _objectPoints, Mat objPt, imgPt, imgPt2, npoints; collectCalibrationData( _objectPoints, _imagePoints1, _imagePoints2, - objPt, imgPt, &imgPt2, npoints ); + objPt, imgPt, imgPt2, npoints ); Mat matR = _Rmat.getMat(), matT = _Tmat.getMat(); bool E_needed = _Emat.needed(), F_needed = _Fmat.needed(), errors_needed = _perViewErrors.needed();