From 6873bdee70528ba6101bd5f4091b5f5302ee70c9 Mon Sep 17 00:00:00 2001 From: Vincent Rabaud Date: Fri, 8 Nov 2024 09:56:49 +0100 Subject: [PATCH] backport C++ 3d/calibration_base.cpp:5.x to calib3d/calibration_base.cpp:4.x (#26414) * Add vanilla calibration_base from 5.x This is from 55105719dd698bf3eeb5b58f6dbb3baaf0eeb5b9 * Have the C implementation use the new C++ one. --- modules/calib3d/src/calib3d_c_api.h | 65 - modules/calib3d/src/calibration.cpp | 1922 +--------------------- modules/calib3d/src/calibration_base.cpp | 1644 ++++++++++++++++++ modules/calib3d/src/checkchessboard.cpp | 1 - modules/calib3d/src/precomp.hpp | 13 + modules/calib3d/src/solvepnp.cpp | 8 +- 6 files changed, 1697 insertions(+), 1956 deletions(-) create mode 100644 modules/calib3d/src/calibration_base.cpp diff --git a/modules/calib3d/src/calib3d_c_api.h b/modules/calib3d/src/calib3d_c_api.h index cdbf021667..46cdbc824a 100644 --- a/modules/calib3d/src/calib3d_c_api.h +++ b/modules/calib3d/src/calib3d_c_api.h @@ -111,23 +111,6 @@ void cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, void cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2, CvMat* new_points1, CvMat* new_points2); - -/* Computes the optimal new camera matrix according to the free scaling parameter alpha: - alpha=0 - only valid pixels will be retained in the undistorted image - alpha=1 - all the source image pixels will be retained in the undistorted image -*/ -void cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, - const CvMat* dist_coeffs, - CvSize image_size, double alpha, - CvMat* new_camera_matrix, - CvSize new_imag_size CV_DEFAULT(cvSize(0,0)), - CvRect* valid_pixel_ROI CV_DEFAULT(0), - int center_principal_point CV_DEFAULT(0)); - -/* Converts rotation vector to rotation matrix or vice versa */ -int cvRodrigues2( const CvMat* src, CvMat* dst, - CvMat* jacobian CV_DEFAULT(0) ); - /* Finds perspective transformation between the object plane and image (view) plane */ int cvFindHomography( const CvMat* src_points, const CvMat* dst_points, @@ -138,54 +121,6 @@ int cvFindHomography( const CvMat* src_points, int maxIters CV_DEFAULT(2000), double confidence CV_DEFAULT(0.995)); -/* Computes RQ decomposition for 3x3 matrices */ -void cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, - CvMat *matrixQx CV_DEFAULT(NULL), - CvMat *matrixQy CV_DEFAULT(NULL), - CvMat *matrixQz CV_DEFAULT(NULL), - CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); - -/* Computes projection matrix decomposition */ -void cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr, - CvMat *rotMatr, CvMat *posVect, - CvMat *rotMatrX CV_DEFAULT(NULL), - CvMat *rotMatrY CV_DEFAULT(NULL), - CvMat *rotMatrZ CV_DEFAULT(NULL), - CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); - -/* Computes d(AB)/dA and d(AB)/dB */ -void cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB ); - -/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)), - t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */ -void cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1, - const CvMat* _rvec2, const CvMat* _tvec2, - CvMat* _rvec3, CvMat* _tvec3, - CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0), - CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0), - CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0), - CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) ); - -/* Projects object points to the view plane using - the specified extrinsic and intrinsic camera parameters */ -void cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, - const CvMat* translation_vector, const CvMat* camera_matrix, - const CvMat* distortion_coeffs, CvMat* image_points, - CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL), - CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL), - CvMat* dpddist CV_DEFAULT(NULL), - double aspect_ratio CV_DEFAULT(0)); - -/* Finds extrinsic camera parameters from - a few known corresponding point pairs and intrinsic parameters */ -void cvFindExtrinsicCameraParams2( const CvMat* object_points, - const CvMat* image_points, - const CvMat* camera_matrix, - const CvMat* distortion_coeffs, - CvMat* rotation_vector, - CvMat* translation_vector, - int use_extrinsic_guess CV_DEFAULT(0) ); - /* Computes initial estimate of the intrinsic camera parameters in case of planar calibration target (e.g. chessboard) */ void cvInitIntrinsicParams2D( const CvMat* object_points, diff --git a/modules/calib3d/src/calibration.cpp b/modules/calib3d/src/calibration.cpp index 9729cb7bea..c791d64f00 100644 --- a/modules/calib3d/src/calibration.cpp +++ b/modules/calib3d/src/calibration.cpp @@ -42,7 +42,6 @@ #include "precomp.hpp" #include "hal_replacement.hpp" -#include "opencv2/imgproc/imgproc_c.h" #include "distortion_model.hpp" #include "calib3d_c_api.h" #include @@ -58,1317 +57,8 @@ using namespace cv; -// reimplementation of dAB.m -CV_IMPL void cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB ) -{ - int i, j, M, N, L; - int bstep; - - CV_Assert( CV_IS_MAT(A) && CV_IS_MAT(B) ); - CV_Assert( CV_ARE_TYPES_EQ(A, B) && - (CV_MAT_TYPE(A->type) == CV_32F || CV_MAT_TYPE(A->type) == CV_64F) ); - CV_Assert( A->cols == B->rows ); - - M = A->rows; - L = A->cols; - N = B->cols; - bstep = B->step/CV_ELEM_SIZE(B->type); - - if( dABdA ) - { - CV_Assert( CV_ARE_TYPES_EQ(A, dABdA) && - dABdA->rows == A->rows*B->cols && dABdA->cols == A->rows*A->cols ); - } - - if( dABdB ) - { - CV_Assert( CV_ARE_TYPES_EQ(A, dABdB) && - dABdB->rows == A->rows*B->cols && dABdB->cols == B->rows*B->cols ); - } - - if( CV_MAT_TYPE(A->type) == CV_32F ) - { - for( i = 0; i < M*N; i++ ) - { - int i1 = i / N, i2 = i % N; - - if( dABdA ) - { - float* dcda = (float*)(dABdA->data.ptr + dABdA->step*i); - const float* b = (const float*)B->data.ptr + i2; - - for( j = 0; j < M*L; j++ ) - dcda[j] = 0; - for( j = 0; j < L; j++ ) - dcda[i1*L + j] = b[j*bstep]; - } - - if( dABdB ) - { - float* dcdb = (float*)(dABdB->data.ptr + dABdB->step*i); - const float* a = (const float*)(A->data.ptr + A->step*i1); - - for( j = 0; j < L*N; j++ ) - dcdb[j] = 0; - for( j = 0; j < L; j++ ) - dcdb[j*N + i2] = a[j]; - } - } - } - else - { - for( i = 0; i < M*N; i++ ) - { - int i1 = i / N, i2 = i % N; - - if( dABdA ) - { - double* dcda = (double*)(dABdA->data.ptr + dABdA->step*i); - const double* b = (const double*)B->data.ptr + i2; - - for( j = 0; j < M*L; j++ ) - dcda[j] = 0; - for( j = 0; j < L; j++ ) - dcda[i1*L + j] = b[j*bstep]; - } - - if( dABdB ) - { - double* dcdb = (double*)(dABdB->data.ptr + dABdB->step*i); - const double* a = (const double*)(A->data.ptr + A->step*i1); - - for( j = 0; j < L*N; j++ ) - dcdb[j] = 0; - for( j = 0; j < L; j++ ) - dcdb[j*N + i2] = a[j]; - } - } - } -} - -// reimplementation of compose_motion.m -CV_IMPL void cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1, - const CvMat* _rvec2, const CvMat* _tvec2, - CvMat* _rvec3, CvMat* _tvec3, - CvMat* dr3dr1, CvMat* dr3dt1, - CvMat* dr3dr2, CvMat* dr3dt2, - CvMat* dt3dr1, CvMat* dt3dt1, - CvMat* dt3dr2, CvMat* dt3dt2 ) -{ - double _r1[3], _r2[3]; - double _R1[9], _d1[9*3], _R2[9], _d2[9*3]; - CvMat r1 = cvMat(3,1,CV_64F,_r1), r2 = cvMat(3,1,CV_64F,_r2); - CvMat R1 = cvMat(3,3,CV_64F,_R1), R2 = cvMat(3,3,CV_64F,_R2); - CvMat dR1dr1 = cvMat(9,3,CV_64F,_d1), dR2dr2 = cvMat(9,3,CV_64F,_d2); - - CV_Assert( CV_IS_MAT(_rvec1) && CV_IS_MAT(_rvec2) ); - - CV_Assert( CV_MAT_TYPE(_rvec1->type) == CV_32F || - CV_MAT_TYPE(_rvec1->type) == CV_64F ); - - CV_Assert( _rvec1->rows == 3 && _rvec1->cols == 1 && CV_ARE_SIZES_EQ(_rvec1, _rvec2) ); - - cvConvert( _rvec1, &r1 ); - cvConvert( _rvec2, &r2 ); - - cvRodrigues2( &r1, &R1, &dR1dr1 ); - cvRodrigues2( &r2, &R2, &dR2dr2 ); - - if( _rvec3 || dr3dr1 || dr3dr2 ) - { - double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3]; - double _W1[9*3], _W2[3*3]; - CvMat r3 = cvMat(3,1,CV_64F,_r3), R3 = cvMat(3,3,CV_64F,_R3); - CvMat dR3dR1 = cvMat(9,9,CV_64F,_dR3dR1), dR3dR2 = cvMat(9,9,CV_64F,_dR3dR2); - CvMat dr3dR3 = cvMat(3,9,CV_64F,_dr3dR3); - CvMat W1 = cvMat(3,9,CV_64F,_W1), W2 = cvMat(3,3,CV_64F,_W2); - - cvMatMul( &R2, &R1, &R3 ); - cvCalcMatMulDeriv( &R2, &R1, &dR3dR2, &dR3dR1 ); - - cvRodrigues2( &R3, &r3, &dr3dR3 ); - - if( _rvec3 ) - cvConvert( &r3, _rvec3 ); - - if( dr3dr1 ) - { - cvMatMul( &dr3dR3, &dR3dR1, &W1 ); - cvMatMul( &W1, &dR1dr1, &W2 ); - cvConvert( &W2, dr3dr1 ); - } - - if( dr3dr2 ) - { - cvMatMul( &dr3dR3, &dR3dR2, &W1 ); - cvMatMul( &W1, &dR2dr2, &W2 ); - cvConvert( &W2, dr3dr2 ); - } - } - - if( dr3dt1 ) - cvZero( dr3dt1 ); - if( dr3dt2 ) - cvZero( dr3dt2 ); - - if( _tvec3 || dt3dr2 || dt3dt1 ) - { - double _t1[3], _t2[3], _t3[3], _dxdR2[3*9], _dxdt1[3*3], _W3[3*3]; - CvMat t1 = cvMat(3,1,CV_64F,_t1), t2 = cvMat(3,1,CV_64F,_t2); - CvMat t3 = cvMat(3,1,CV_64F,_t3); - CvMat dxdR2 = cvMat(3, 9, CV_64F, _dxdR2); - CvMat dxdt1 = cvMat(3, 3, CV_64F, _dxdt1); - CvMat W3 = cvMat(3, 3, CV_64F, _W3); - - CV_Assert( CV_IS_MAT(_tvec1) && CV_IS_MAT(_tvec2) ); - CV_Assert( CV_ARE_SIZES_EQ(_tvec1, _tvec2) && CV_ARE_SIZES_EQ(_tvec1, _rvec1) ); - - cvConvert( _tvec1, &t1 ); - cvConvert( _tvec2, &t2 ); - cvMatMulAdd( &R2, &t1, &t2, &t3 ); - - if( _tvec3 ) - cvConvert( &t3, _tvec3 ); - - if( dt3dr2 || dt3dt1 ) - { - cvCalcMatMulDeriv( &R2, &t1, &dxdR2, &dxdt1 ); - if( dt3dr2 ) - { - cvMatMul( &dxdR2, &dR2dr2, &W3 ); - cvConvert( &W3, dt3dr2 ); - } - if( dt3dt1 ) - cvConvert( &dxdt1, dt3dt1 ); - } - } - - if( dt3dt2 ) - cvSetIdentity( dt3dt2 ); - if( dt3dr1 ) - cvZero( dt3dr1 ); -} - -CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian ) -{ - double J[27] = {0}; - CvMat matJ = cvMat( 3, 9, CV_64F, J ); - - if( !CV_IS_MAT(src) ) - CV_Error( !src ? cv::Error::StsNullPtr : cv::Error::StsBadArg, "Input argument is not a valid matrix" ); - - if( !CV_IS_MAT(dst) ) - CV_Error( !dst ? cv::Error::StsNullPtr : cv::Error::StsBadArg, - "The first output argument is not a valid matrix" ); - - int depth = CV_MAT_DEPTH(src->type); - int elem_size = CV_ELEM_SIZE(depth); - - if( depth != CV_32F && depth != CV_64F ) - CV_Error( cv::Error::StsUnsupportedFormat, "The matrices must have 32f or 64f data type" ); - - if( !CV_ARE_DEPTHS_EQ(src, dst) ) - CV_Error( cv::Error::StsUnmatchedFormats, "All the matrices must have the same data type" ); - - if( jacobian ) - { - if( !CV_IS_MAT(jacobian) ) - CV_Error( cv::Error::StsBadArg, "Jacobian is not a valid matrix" ); - - if( !CV_ARE_DEPTHS_EQ(src, jacobian) || CV_MAT_CN(jacobian->type) != 1 ) - CV_Error( cv::Error::StsUnmatchedFormats, "Jacobian must have 32fC1 or 64fC1 datatype" ); - - if( (jacobian->rows != 9 || jacobian->cols != 3) && - (jacobian->rows != 3 || jacobian->cols != 9)) - CV_Error( cv::Error::StsBadSize, "Jacobian must be 3x9 or 9x3" ); - } - - if( src->cols == 1 || src->rows == 1 ) - { - int step = src->rows > 1 ? src->step / elem_size : 1; - - if( src->rows + src->cols*CV_MAT_CN(src->type) - 1 != 3 ) - CV_Error( cv::Error::StsBadSize, "Input matrix must be 1x3, 3x1 or 3x3" ); - - if( dst->rows != 3 || dst->cols != 3 || CV_MAT_CN(dst->type) != 1 ) - CV_Error( cv::Error::StsBadSize, "Output matrix must be 3x3, single-channel floating point matrix" ); - - Point3d r; - if( depth == CV_32F ) - { - r.x = src->data.fl[0]; - r.y = src->data.fl[step]; - r.z = src->data.fl[step*2]; - } - else - { - r.x = src->data.db[0]; - r.y = src->data.db[step]; - r.z = src->data.db[step*2]; - } - - double theta = norm(r); - - if( theta < DBL_EPSILON ) - { - cvSetIdentity( dst ); - - if( jacobian ) - { - memset( J, 0, sizeof(J) ); - J[5] = J[15] = J[19] = -1; - J[7] = J[11] = J[21] = 1; - } - } - else - { - double c = cos(theta); - double s = sin(theta); - double c1 = 1. - c; - double itheta = theta ? 1./theta : 0.; - - r *= itheta; - - Matx33d rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z ); - Matx33d r_x( 0, -r.z, r.y, - r.z, 0, -r.x, - -r.y, r.x, 0 ); - - // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x] - Matx33d R = c*Matx33d::eye() + c1*rrt + s*r_x; - - Mat(R).convertTo(cvarrToMat(dst), dst->type); - - if( jacobian ) - { - const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }; - double drrt[] = { r.x+r.x, r.y, r.z, r.y, 0, 0, r.z, 0, 0, - 0, r.x, 0, r.x, r.y+r.y, r.z, 0, r.z, 0, - 0, 0, r.x, 0, 0, r.y, r.x, r.y, r.z+r.z }; - double d_r_x_[] = { 0, 0, 0, 0, 0, -1, 0, 1, 0, - 0, 0, 1, 0, 0, 0, -1, 0, 0, - 0, -1, 0, 1, 0, 0, 0, 0, 0 }; - for( int i = 0; i < 3; i++ ) - { - double ri = i == 0 ? r.x : i == 1 ? r.y : r.z; - double a0 = -s*ri, a1 = (s - 2*c1*itheta)*ri, a2 = c1*itheta; - double a3 = (c - s*itheta)*ri, a4 = s*itheta; - for( int k = 0; k < 9; k++ ) - J[i*9+k] = a0*I[k] + a1*rrt.val[k] + a2*drrt[i*9+k] + - a3*r_x.val[k] + a4*d_r_x_[i*9+k]; - } - } - } - } - else if( src->cols == 3 && src->rows == 3 ) - { - Matx33d U, Vt; - Vec3d W; - double theta, s, c; - int step = dst->rows > 1 ? dst->step / elem_size : 1; - - if( (dst->rows != 1 || dst->cols*CV_MAT_CN(dst->type) != 3) && - (dst->rows != 3 || dst->cols != 1 || CV_MAT_CN(dst->type) != 1)) - CV_Error( cv::Error::StsBadSize, "Output matrix must be 1x3 or 3x1" ); - - Matx33d R = cvarrToMat(src); - - if( !checkRange(R, true, NULL, -100, 100) ) - { - cvZero(dst); - if( jacobian ) - cvZero(jacobian); - return 0; - } - - SVD::compute(R, W, U, Vt); - R = U*Vt; - - Point3d r(R(2, 1) - R(1, 2), R(0, 2) - R(2, 0), R(1, 0) - R(0, 1)); - - s = std::sqrt((r.x*r.x + r.y*r.y + r.z*r.z)*0.25); - c = (R(0, 0) + R(1, 1) + R(2, 2) - 1)*0.5; - c = c > 1. ? 1. : c < -1. ? -1. : c; - theta = acos(c); - - if( s < 1e-5 ) - { - double t; - - if( c > 0 ) - r = Point3d(0, 0, 0); - else - { - t = (R(0, 0) + 1)*0.5; - r.x = std::sqrt(MAX(t,0.)); - t = (R(1, 1) + 1)*0.5; - r.y = std::sqrt(MAX(t,0.))*(R(0, 1) < 0 ? -1. : 1.); - t = (R(2, 2) + 1)*0.5; - r.z = std::sqrt(MAX(t,0.))*(R(0, 2) < 0 ? -1. : 1.); - if( fabs(r.x) < fabs(r.y) && fabs(r.x) < fabs(r.z) && (R(1, 2) > 0) != (r.y*r.z > 0) ) - r.z = -r.z; - theta /= norm(r); - r *= theta; - } - - if( jacobian ) - { - memset( J, 0, sizeof(J) ); - if( c > 0 ) - { - J[5] = J[15] = J[19] = -0.5; - J[7] = J[11] = J[21] = 0.5; - } - } - } - else - { - double vth = 1/(2*s); - - if( jacobian ) - { - double t, dtheta_dtr = -1./s; - // var1 = [vth;theta] - // var = [om1;var1] = [om1;vth;theta] - double dvth_dtheta = -vth*c/s; - double d1 = 0.5*dvth_dtheta*dtheta_dtr; - double d2 = 0.5*dtheta_dtr; - // dvar1/dR = dvar1/dtheta*dtheta/dR = [dvth/dtheta; 1] * dtheta/dtr * dtr/dR - double dvardR[5*9] = - { - 0, 0, 0, 0, 0, 1, 0, -1, 0, - 0, 0, -1, 0, 0, 0, 1, 0, 0, - 0, 1, 0, -1, 0, 0, 0, 0, 0, - d1, 0, 0, 0, d1, 0, 0, 0, d1, - d2, 0, 0, 0, d2, 0, 0, 0, d2 - }; - // var2 = [om;theta] - double dvar2dvar[] = - { - vth, 0, 0, r.x, 0, - 0, vth, 0, r.y, 0, - 0, 0, vth, r.z, 0, - 0, 0, 0, 0, 1 - }; - double domegadvar2[] = - { - theta, 0, 0, r.x*vth, - 0, theta, 0, r.y*vth, - 0, 0, theta, r.z*vth - }; - - CvMat _dvardR = cvMat( 5, 9, CV_64FC1, dvardR ); - CvMat _dvar2dvar = cvMat( 4, 5, CV_64FC1, dvar2dvar ); - CvMat _domegadvar2 = cvMat( 3, 4, CV_64FC1, domegadvar2 ); - double t0[3*5]; - CvMat _t0 = cvMat( 3, 5, CV_64FC1, t0 ); - - cvMatMul( &_domegadvar2, &_dvar2dvar, &_t0 ); - cvMatMul( &_t0, &_dvardR, &matJ ); - - // transpose every row of matJ (treat the rows as 3x3 matrices) - CV_SWAP(J[1], J[3], t); CV_SWAP(J[2], J[6], t); CV_SWAP(J[5], J[7], t); - CV_SWAP(J[10], J[12], t); CV_SWAP(J[11], J[15], t); CV_SWAP(J[14], J[16], t); - CV_SWAP(J[19], J[21], t); CV_SWAP(J[20], J[24], t); CV_SWAP(J[23], J[25], t); - } - - vth *= theta; - r *= vth; - } - - if( depth == CV_32F ) - { - dst->data.fl[0] = (float)r.x; - dst->data.fl[step] = (float)r.y; - dst->data.fl[step*2] = (float)r.z; - } - else - { - dst->data.db[0] = r.x; - dst->data.db[step] = r.y; - dst->data.db[step*2] = r.z; - } - } - else - { - CV_Error(cv::Error::StsBadSize, "Input matrix must be 1x3 or 3x1 for a rotation vector, or 3x3 for a rotation matrix"); - } - - if( jacobian ) - { - if( depth == CV_32F ) - { - if( jacobian->rows == matJ.rows ) - cvConvert( &matJ, jacobian ); - else - { - float Jf[3*9]; - CvMat _Jf = cvMat( matJ.rows, matJ.cols, CV_32FC1, Jf ); - cvConvert( &matJ, &_Jf ); - cvTranspose( &_Jf, jacobian ); - } - } - else if( jacobian->rows == matJ.rows ) - cvCopy( &matJ, jacobian ); - else - cvTranspose( &matJ, jacobian ); - } - - return 1; -} - static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8, 8x1, 1x12, 12x1, 1x14 or 14x1 floating-point vector"; -static void cvProjectPoints2Internal( const CvMat* objectPoints, - const CvMat* r_vec, - const CvMat* t_vec, - const CvMat* A, - const CvMat* distCoeffs, - CvMat* imagePoints, CvMat* dpdr CV_DEFAULT(NULL), - CvMat* dpdt CV_DEFAULT(NULL), CvMat* dpdf CV_DEFAULT(NULL), - CvMat* dpdc CV_DEFAULT(NULL), CvMat* dpdk CV_DEFAULT(NULL), - CvMat* dpdo CV_DEFAULT(NULL), - double aspectRatio CV_DEFAULT(0) ) -{ - Ptr matM, _m; - Ptr _dpdr, _dpdt, _dpdc, _dpdf, _dpdk; - Ptr _dpdo; - - int i, j, count; - int calc_derivatives; - const CvPoint3D64f* M; - CvPoint2D64f* m; - double r[3], R[9], R_vec[9], dRdr[27], t[3], a[9], k[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}, fx, fy, cx, cy; - Matx33d matTilt = Matx33d::eye(); - Matx33d dMatTiltdTauX(0,0,0,0,0,0,0,-1,0); - Matx33d dMatTiltdTauY(0,0,0,0,0,0,1,0,0); - CvMat _r, _r_vec, _t, _a = cvMat( 3, 3, CV_64F, a ), _k; - CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr ); - double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0; - double* dpdo_p = 0; - int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, dpdc_step = 0; - int dpdo_step = 0; - bool fixedAspectRatio = aspectRatio > FLT_EPSILON; - - if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(r_vec) || - !CV_IS_MAT(t_vec) || !CV_IS_MAT(A) || - /*!CV_IS_MAT(distCoeffs) ||*/ !CV_IS_MAT(imagePoints) ) - CV_Error( cv::Error::StsBadArg, "One of required arguments is not a valid matrix" ); - - int odepth = CV_MAT_DEPTH(objectPoints->type); - int ochans = CV_MAT_CN(objectPoints->type); - int orows = objectPoints->rows, ocols = objectPoints->cols; - int total = orows * ocols * ochans; - if(total % 3 != 0) - { - //we have stopped support of homogeneous coordinates because it cause ambiguity in interpretation of the input data - CV_Error( cv::Error::StsBadArg, "Homogeneous coordinates are not supported" ); - } - count = total / 3; - - CV_Assert(CV_IS_CONT_MAT(objectPoints->type)); - CV_Assert(odepth == CV_32F || odepth == CV_64F); - // Homogeneous coordinates are not supported - CV_Assert((orows == 1 && ochans == 3) || - (orows == count && ochans*ocols == 3) || - (orows == 3 && ochans == 1 && ocols == count)); - - int idepth = CV_MAT_DEPTH(imagePoints->type); - int ichans = CV_MAT_CN(imagePoints->type); - int irows = imagePoints->rows, icols = imagePoints->cols; - CV_Assert(CV_IS_CONT_MAT(imagePoints->type)); - CV_Assert(idepth == CV_32F || idepth == CV_64F); - // Homogeneous coordinates are not supported - CV_Assert((irows == 1 && ichans == 2) || - (irows == count && ichans*icols == 2) || - (irows == 2 && ichans == 1 && icols == count)); - - if( (CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F) || - (((r_vec->rows != 1 && r_vec->cols != 1) || - r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3) && - ((r_vec->rows != 3 && r_vec->cols != 3) || CV_MAT_CN(r_vec->type) != 1))) - CV_Error( cv::Error::StsBadArg, "Rotation must be represented by 1x3 or 3x1 " - "floating-point rotation vector, or 3x3 rotation matrix" ); - - if( r_vec->rows == 3 && r_vec->cols == 3 ) - { - _r = cvMat( 3, 1, CV_64FC1, r ); - _r_vec = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), R_vec ); - cvConvert( r_vec, &_r_vec ); - cvRodrigues2( &_r_vec, &_r ); - cvRodrigues2( &_r, &matR, &_dRdr ); - cvCopy( &_r_vec, &matR ); - } - else - { - _r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r ); - cvConvert( r_vec, &_r ); - cvRodrigues2( &_r, &matR, &_dRdr ); - } - - if( (CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F) || - (t_vec->rows != 1 && t_vec->cols != 1) || - t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 ) - CV_Error( cv::Error::StsBadArg, - "Translation vector must be 1x3 or 3x1 floating-point vector" ); - - _t = cvMat( t_vec->rows, t_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), t ); - cvConvert( t_vec, &_t ); - - if( (CV_MAT_TYPE(A->type) != CV_64FC1 && CV_MAT_TYPE(A->type) != CV_32FC1) || - A->rows != 3 || A->cols != 3 ) - CV_Error( cv::Error::StsBadArg, "Intrinsic parameters must be 3x3 floating-point matrix" ); - - cvConvert( A, &_a ); - fx = a[0]; fy = a[4]; - cx = a[2]; cy = a[5]; - - if( fixedAspectRatio ) - fx = fy*aspectRatio; - - int delems = 0; - if( distCoeffs ) - { - CV_Assert(CV_IS_MAT(distCoeffs)); - - int ddepth = CV_MAT_DEPTH(distCoeffs->type); - int dchans = CV_MAT_CN(distCoeffs->type); - int drows = distCoeffs->rows, dcols = distCoeffs->cols; - delems = drows * dcols * dchans; - CV_Assert((ddepth == CV_32F || ddepth == CV_64F) && - (drows == 1 || dcols == 1) && - (delems == 4 || delems == 5 || delems == 8 || delems == 12 || delems == 14)); - - _k = cvMat( drows, dcols, CV_MAKETYPE(CV_64F, dchans), k ); - cvConvert( distCoeffs, &_k ); - if(k[12] != 0 || k[13] != 0) - { - detail::computeTiltProjectionMatrix(k[12], k[13], &matTilt, &dMatTiltdTauX, &dMatTiltdTauY); - } - } - - if (idepth == CV_32F && odepth == CV_32F) - { - float rtMatrix[12] = { (float)R[0], (float)R[1], (float)R[2], (float)t[0], - (float)R[3], (float)R[4], (float)R[5], (float)t[1], - (float)R[6], (float)R[7], (float)R[8], (float)t[2] }; - - cv_camera_intrinsics_pinhole_32f intr; - intr.fx = (float)fx; intr.fy = (float)fy; - intr.cx = (float)cx; intr.cy = (float)cy; - intr.amt_k = 0; intr.amt_p = 0; intr.amt_s = 0; intr.use_tau = false; - - switch (delems) - { - case 0: break; - case 4: // [k_1, k_2, p_1, p_2] - intr.amt_k = 2; intr.amt_p = 2; - break; - case 5: // [k_1, k_2, p_1, p_2, k_3] - intr.amt_k = 3; intr.amt_p = 2; - break; - case 8: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6] - intr.amt_k = 6; intr.amt_p = 2; - break; - case 12: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4] - intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; - break; - case 14: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4, tau_x, tau_y] - intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; intr.use_tau = true; - break; - default: - CV_Error(cv::Error::StsInternal, "Wrong number of distortion coefficients"); - } - - intr.k[0] = (float)k[0]; - intr.k[1] = (float)k[1]; - intr.k[2] = (float)k[4]; - intr.k[3] = (float)k[5]; - intr.k[4] = (float)k[6]; - intr.k[5] = (float)k[7]; - - intr.p[0] = (float)k[2]; - intr.p[1] = (float)k[3]; - - for (int ctr = 0; ctr < 4; ctr++) - { - intr.s[ctr] = (float)k[8+ctr]; - } - - intr.tau_x = (float)k[12]; - intr.tau_y = (float)k[13]; - - CALL_HAL(projectPoints, cv_hal_project_points_pinhole32f, - objectPoints->data.fl, objectPoints->step, count, - imagePoints->data.fl, imagePoints->step, - rtMatrix, &intr); - } - - _m.reset(cvCreateMat( imagePoints->rows, imagePoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(imagePoints->type)) )); - cvConvert(imagePoints, _m); - - matM.reset(cvCreateMat( objectPoints->rows, objectPoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(objectPoints->type)) )); - cvConvert(objectPoints, matM); - - M = (CvPoint3D64f*)matM->data.db; - m = (CvPoint2D64f*)_m->data.db; - - if (idepth == CV_64F && odepth == CV_64F) - { - double rtMatrix[12] = { R[0], R[1], R[2], t[0], - R[3], R[4], R[5], t[1], - R[6], R[7], R[8], t[2] }; - - cv_camera_intrinsics_pinhole_64f intr; - intr.fx = fx; intr.fy = fy; - intr.cx = cx; intr.cy = cy; - intr.amt_k = 0; intr.amt_p = 0; intr.amt_s = 0; intr.use_tau = false; - - switch (delems) - { - case 0: break; - case 4: // [k_1, k_2, p_1, p_2] - intr.amt_k = 2; intr.amt_p = 2; - break; - case 5: // [k_1, k_2, p_1, p_2, k_3] - intr.amt_k = 3; intr.amt_p = 2; - break; - case 8: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6] - intr.amt_k = 6; intr.amt_p = 2; - break; - case 12: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4] - intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; - break; - case 14: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4, tau_x, tau_y] - intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; intr.use_tau = true; - break; - default: - CV_Error(cv::Error::StsInternal, "Wrong number of distortion coefficients"); - } - - intr.k[0] = k[0]; - intr.k[1] = k[1]; - intr.k[2] = k[4]; - intr.k[3] = k[5]; - intr.k[4] = k[6]; - intr.k[5] = k[7]; - - intr.p[0] = k[2]; - intr.p[1] = k[3]; - - for (int ctr = 0; ctr < 4; ctr++) - { - intr.s[ctr] = k[8+ctr]; - } - - intr.tau_x = k[12]; - intr.tau_y = k[13]; - - CALL_HAL(projectPoints, cv_hal_project_points_pinhole64f, - objectPoints->data.db, objectPoints->step, count, - imagePoints->data.db, imagePoints->step, - rtMatrix, &intr); - } - - if( dpdr ) - { - if( !CV_IS_MAT(dpdr) || - (CV_MAT_TYPE(dpdr->type) != CV_32FC1 && - CV_MAT_TYPE(dpdr->type) != CV_64FC1) || - dpdr->rows != count*2 || dpdr->cols != 3 ) - CV_Error( cv::Error::StsBadArg, "dp/drot must be 2Nx3 floating-point matrix" ); - - if( CV_MAT_TYPE(dpdr->type) == CV_64FC1 ) - { - _dpdr.reset(cvCloneMat(dpdr)); - } - else - _dpdr.reset(cvCreateMat( 2*count, 3, CV_64FC1 )); - dpdr_p = _dpdr->data.db; - dpdr_step = _dpdr->step/sizeof(dpdr_p[0]); - } - - if( dpdt ) - { - if( !CV_IS_MAT(dpdt) || - (CV_MAT_TYPE(dpdt->type) != CV_32FC1 && - CV_MAT_TYPE(dpdt->type) != CV_64FC1) || - dpdt->rows != count*2 || dpdt->cols != 3 ) - CV_Error( cv::Error::StsBadArg, "dp/dT must be 2Nx3 floating-point matrix" ); - - if( CV_MAT_TYPE(dpdt->type) == CV_64FC1 ) - { - _dpdt.reset(cvCloneMat(dpdt)); - } - else - _dpdt.reset(cvCreateMat( 2*count, 3, CV_64FC1 )); - dpdt_p = _dpdt->data.db; - dpdt_step = _dpdt->step/sizeof(dpdt_p[0]); - } - - if( dpdf ) - { - if( !CV_IS_MAT(dpdf) || - (CV_MAT_TYPE(dpdf->type) != CV_32FC1 && CV_MAT_TYPE(dpdf->type) != CV_64FC1) || - dpdf->rows != count*2 || dpdf->cols != 2 ) - CV_Error( cv::Error::StsBadArg, "dp/df must be 2Nx2 floating-point matrix" ); - - if( CV_MAT_TYPE(dpdf->type) == CV_64FC1 ) - { - _dpdf.reset(cvCloneMat(dpdf)); - } - else - _dpdf.reset(cvCreateMat( 2*count, 2, CV_64FC1 )); - dpdf_p = _dpdf->data.db; - dpdf_step = _dpdf->step/sizeof(dpdf_p[0]); - } - - if( dpdc ) - { - if( !CV_IS_MAT(dpdc) || - (CV_MAT_TYPE(dpdc->type) != CV_32FC1 && CV_MAT_TYPE(dpdc->type) != CV_64FC1) || - dpdc->rows != count*2 || dpdc->cols != 2 ) - CV_Error( cv::Error::StsBadArg, "dp/dc must be 2Nx2 floating-point matrix" ); - - if( CV_MAT_TYPE(dpdc->type) == CV_64FC1 ) - { - _dpdc.reset(cvCloneMat(dpdc)); - } - else - _dpdc.reset(cvCreateMat( 2*count, 2, CV_64FC1 )); - dpdc_p = _dpdc->data.db; - dpdc_step = _dpdc->step/sizeof(dpdc_p[0]); - } - - if( dpdk ) - { - if( !CV_IS_MAT(dpdk) || - (CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) || - dpdk->rows != count*2 || (dpdk->cols != 14 && dpdk->cols != 12 && dpdk->cols != 8 && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) ) - CV_Error( cv::Error::StsBadArg, "dp/df must be 2Nx14, 2Nx12, 2Nx8, 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" ); - - if( !distCoeffs ) - CV_Error( cv::Error::StsNullPtr, "distCoeffs is NULL while dpdk is not" ); - - if( CV_MAT_TYPE(dpdk->type) == CV_64FC1 ) - { - _dpdk.reset(cvCloneMat(dpdk)); - } - else - _dpdk.reset(cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 )); - dpdk_p = _dpdk->data.db; - dpdk_step = _dpdk->step/sizeof(dpdk_p[0]); - } - - if( dpdo ) - { - if( !CV_IS_MAT( dpdo ) || ( CV_MAT_TYPE( dpdo->type ) != CV_32FC1 - && CV_MAT_TYPE( dpdo->type ) != CV_64FC1 ) - || dpdo->rows != count * 2 || dpdo->cols != count * 3 ) - CV_Error( cv::Error::StsBadArg, "dp/do must be 2Nx3N floating-point matrix" ); - - if( CV_MAT_TYPE( dpdo->type ) == CV_64FC1 ) - { - _dpdo.reset( cvCloneMat( dpdo ) ); - } - else - _dpdo.reset( cvCreateMat( 2 * count, 3 * count, CV_64FC1 ) ); - cvZero(_dpdo); - dpdo_p = _dpdo->data.db; - dpdo_step = _dpdo->step / sizeof( dpdo_p[0] ); - } - - calc_derivatives = dpdr || dpdt || dpdf || dpdc || dpdk || dpdo; - - for( i = 0; i < count; i++ ) - { - double X = M[i].x, Y = M[i].y, Z = M[i].z; - double x = R[0]*X + R[1]*Y + R[2]*Z + t[0]; - double y = R[3]*X + R[4]*Y + R[5]*Z + t[1]; - double z = R[6]*X + R[7]*Y + R[8]*Z + t[2]; - double r2, r4, r6, a1, a2, a3, cdist, icdist2; - double xd, yd, xd0, yd0, invProj; - Vec3d vecTilt; - Vec3d dVecTilt; - Matx22d dMatTilt; - Vec2d dXdYd; - - double z0 = z; - z = z ? 1./z : 1; - x *= z; y *= z; - - r2 = x*x + y*y; - r4 = r2*r2; - r6 = r4*r2; - a1 = 2*x*y; - a2 = r2 + 2*x*x; - a3 = r2 + 2*y*y; - cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6; - icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6); - xd0 = x*cdist*icdist2 + k[2]*a1 + k[3]*a2 + k[8]*r2+k[9]*r4; - yd0 = y*cdist*icdist2 + k[2]*a3 + k[3]*a1 + k[10]*r2+k[11]*r4; - - // additional distortion by projecting onto a tilt plane - vecTilt = matTilt*Vec3d(xd0, yd0, 1); - invProj = vecTilt(2) ? 1./vecTilt(2) : 1; - xd = invProj * vecTilt(0); - yd = invProj * vecTilt(1); - - m[i].x = xd*fx + cx; - m[i].y = yd*fy + cy; - - if( calc_derivatives ) - { - if( dpdc_p ) - { - dpdc_p[0] = 1; dpdc_p[1] = 0; // dp_xdc_x; dp_xdc_y - dpdc_p[dpdc_step] = 0; - dpdc_p[dpdc_step+1] = 1; - dpdc_p += dpdc_step*2; - } - - if( dpdf_p ) - { - if( fixedAspectRatio ) - { - dpdf_p[0] = 0; dpdf_p[1] = xd*aspectRatio; // dp_xdf_x; dp_xdf_y - dpdf_p[dpdf_step] = 0; - dpdf_p[dpdf_step+1] = yd; - } - else - { - dpdf_p[0] = xd; dpdf_p[1] = 0; - dpdf_p[dpdf_step] = 0; - dpdf_p[dpdf_step+1] = yd; - } - dpdf_p += dpdf_step*2; - } - for (int row = 0; row < 2; ++row) - for (int col = 0; col < 2; ++col) - dMatTilt(row,col) = matTilt(row,col)*vecTilt(2) - - matTilt(2,col)*vecTilt(row); - double invProjSquare = (invProj*invProj); - dMatTilt *= invProjSquare; - if( dpdk_p ) - { - dXdYd = dMatTilt*Vec2d(x*icdist2*r2, y*icdist2*r2); - dpdk_p[0] = fx*dXdYd(0); - dpdk_p[dpdk_step] = fy*dXdYd(1); - dXdYd = dMatTilt*Vec2d(x*icdist2*r4, y*icdist2*r4); - dpdk_p[1] = fx*dXdYd(0); - dpdk_p[dpdk_step+1] = fy*dXdYd(1); - if( _dpdk->cols > 2 ) - { - dXdYd = dMatTilt*Vec2d(a1, a3); - dpdk_p[2] = fx*dXdYd(0); - dpdk_p[dpdk_step+2] = fy*dXdYd(1); - dXdYd = dMatTilt*Vec2d(a2, a1); - dpdk_p[3] = fx*dXdYd(0); - dpdk_p[dpdk_step+3] = fy*dXdYd(1); - if( _dpdk->cols > 4 ) - { - dXdYd = dMatTilt*Vec2d(x*icdist2*r6, y*icdist2*r6); - dpdk_p[4] = fx*dXdYd(0); - dpdk_p[dpdk_step+4] = fy*dXdYd(1); - - if( _dpdk->cols > 5 ) - { - dXdYd = dMatTilt*Vec2d( - x*cdist*(-icdist2)*icdist2*r2, y*cdist*(-icdist2)*icdist2*r2); - dpdk_p[5] = fx*dXdYd(0); - dpdk_p[dpdk_step+5] = fy*dXdYd(1); - dXdYd = dMatTilt*Vec2d( - x*cdist*(-icdist2)*icdist2*r4, y*cdist*(-icdist2)*icdist2*r4); - dpdk_p[6] = fx*dXdYd(0); - dpdk_p[dpdk_step+6] = fy*dXdYd(1); - dXdYd = dMatTilt*Vec2d( - x*cdist*(-icdist2)*icdist2*r6, y*cdist*(-icdist2)*icdist2*r6); - dpdk_p[7] = fx*dXdYd(0); - dpdk_p[dpdk_step+7] = fy*dXdYd(1); - if( _dpdk->cols > 8 ) - { - dXdYd = dMatTilt*Vec2d(r2, 0); - dpdk_p[8] = fx*dXdYd(0); //s1 - dpdk_p[dpdk_step+8] = fy*dXdYd(1); //s1 - dXdYd = dMatTilt*Vec2d(r4, 0); - dpdk_p[9] = fx*dXdYd(0); //s2 - dpdk_p[dpdk_step+9] = fy*dXdYd(1); //s2 - dXdYd = dMatTilt*Vec2d(0, r2); - dpdk_p[10] = fx*dXdYd(0);//s3 - dpdk_p[dpdk_step+10] = fy*dXdYd(1); //s3 - dXdYd = dMatTilt*Vec2d(0, r4); - dpdk_p[11] = fx*dXdYd(0);//s4 - dpdk_p[dpdk_step+11] = fy*dXdYd(1); //s4 - if( _dpdk->cols > 12 ) - { - dVecTilt = dMatTiltdTauX * Vec3d(xd0, yd0, 1); - dpdk_p[12] = fx * invProjSquare * ( - dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0)); - dpdk_p[dpdk_step+12] = fy*invProjSquare * ( - dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1)); - dVecTilt = dMatTiltdTauY * Vec3d(xd0, yd0, 1); - dpdk_p[13] = fx * invProjSquare * ( - dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0)); - dpdk_p[dpdk_step+13] = fy * invProjSquare * ( - dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1)); - } - } - } - } - } - dpdk_p += dpdk_step*2; - } - - if( dpdt_p ) - { - double dxdt[] = { z, 0, -x*z }, dydt[] = { 0, z, -y*z }; - for( j = 0; j < 3; j++ ) - { - double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j]; - double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt; - double dicdist2_dt = -icdist2*icdist2*(k[5]*dr2dt + 2*k[6]*r2*dr2dt + 3*k[7]*r4*dr2dt); - double da1dt = 2*(x*dydt[j] + y*dxdt[j]); - double dmxdt = (dxdt[j]*cdist*icdist2 + x*dcdist_dt*icdist2 + x*cdist*dicdist2_dt + - k[2]*da1dt + k[3]*(dr2dt + 4*x*dxdt[j]) + k[8]*dr2dt + 2*r2*k[9]*dr2dt); - double dmydt = (dydt[j]*cdist*icdist2 + y*dcdist_dt*icdist2 + y*cdist*dicdist2_dt + - k[2]*(dr2dt + 4*y*dydt[j]) + k[3]*da1dt + k[10]*dr2dt + 2*r2*k[11]*dr2dt); - dXdYd = dMatTilt*Vec2d(dmxdt, dmydt); - dpdt_p[j] = fx*dXdYd(0); - dpdt_p[dpdt_step+j] = fy*dXdYd(1); - } - dpdt_p += dpdt_step*2; - } - - if( dpdr_p ) - { - double dx0dr[] = - { - X*dRdr[0] + Y*dRdr[1] + Z*dRdr[2], - X*dRdr[9] + Y*dRdr[10] + Z*dRdr[11], - X*dRdr[18] + Y*dRdr[19] + Z*dRdr[20] - }; - double dy0dr[] = - { - X*dRdr[3] + Y*dRdr[4] + Z*dRdr[5], - X*dRdr[12] + Y*dRdr[13] + Z*dRdr[14], - X*dRdr[21] + Y*dRdr[22] + Z*dRdr[23] - }; - double dz0dr[] = - { - X*dRdr[6] + Y*dRdr[7] + Z*dRdr[8], - X*dRdr[15] + Y*dRdr[16] + Z*dRdr[17], - X*dRdr[24] + Y*dRdr[25] + Z*dRdr[26] - }; - for( j = 0; j < 3; j++ ) - { - double dxdr = z*(dx0dr[j] - x*dz0dr[j]); - double dydr = z*(dy0dr[j] - y*dz0dr[j]); - double dr2dr = 2*x*dxdr + 2*y*dydr; - double dcdist_dr = (k[0] + 2*k[1]*r2 + 3*k[4]*r4)*dr2dr; - double dicdist2_dr = -icdist2*icdist2*(k[5] + 2*k[6]*r2 + 3*k[7]*r4)*dr2dr; - double da1dr = 2*(x*dydr + y*dxdr); - double dmxdr = (dxdr*cdist*icdist2 + x*dcdist_dr*icdist2 + x*cdist*dicdist2_dr + - k[2]*da1dr + k[3]*(dr2dr + 4*x*dxdr) + (k[8] + 2*r2*k[9])*dr2dr); - double dmydr = (dydr*cdist*icdist2 + y*dcdist_dr*icdist2 + y*cdist*dicdist2_dr + - k[2]*(dr2dr + 4*y*dydr) + k[3]*da1dr + (k[10] + 2*r2*k[11])*dr2dr); - dXdYd = dMatTilt*Vec2d(dmxdr, dmydr); - dpdr_p[j] = fx*dXdYd(0); - dpdr_p[dpdr_step+j] = fy*dXdYd(1); - } - dpdr_p += dpdr_step*2; - } - - if( dpdo_p ) - { - double dxdo[] = { z * ( R[0] - x * z * z0 * R[6] ), - z * ( R[1] - x * z * z0 * R[7] ), - z * ( R[2] - x * z * z0 * R[8] ) }; - double dydo[] = { z * ( R[3] - y * z * z0 * R[6] ), - z * ( R[4] - y * z * z0 * R[7] ), - z * ( R[5] - y * z * z0 * R[8] ) }; - for( j = 0; j < 3; j++ ) - { - double dr2do = 2 * x * dxdo[j] + 2 * y * dydo[j]; - double dr4do = 2 * r2 * dr2do; - double dr6do = 3 * r4 * dr2do; - double da1do = 2 * y * dxdo[j] + 2 * x * dydo[j]; - double da2do = dr2do + 4 * x * dxdo[j]; - double da3do = dr2do + 4 * y * dydo[j]; - double dcdist_do - = k[0] * dr2do + k[1] * dr4do + k[4] * dr6do; - double dicdist2_do = -icdist2 * icdist2 - * ( k[5] * dr2do + k[6] * dr4do + k[7] * dr6do ); - double dxd0_do = cdist * icdist2 * dxdo[j] - + x * icdist2 * dcdist_do + x * cdist * dicdist2_do - + k[2] * da1do + k[3] * da2do + k[8] * dr2do - + k[9] * dr4do; - double dyd0_do = cdist * icdist2 * dydo[j] - + y * icdist2 * dcdist_do + y * cdist * dicdist2_do - + k[2] * da3do + k[3] * da1do + k[10] * dr2do - + k[11] * dr4do; - dXdYd = dMatTilt * Vec2d( dxd0_do, dyd0_do ); - dpdo_p[i * 3 + j] = fx * dXdYd( 0 ); - dpdo_p[dpdo_step + i * 3 + j] = fy * dXdYd( 1 ); - } - dpdo_p += dpdo_step * 2; - } - } - } - - if( _m != imagePoints ) - cvConvert( _m, imagePoints ); - - if( _dpdr != dpdr ) - cvConvert( _dpdr, dpdr ); - - if( _dpdt != dpdt ) - cvConvert( _dpdt, dpdt ); - - if( _dpdf != dpdf ) - cvConvert( _dpdf, dpdf ); - - if( _dpdc != dpdc ) - cvConvert( _dpdc, dpdc ); - - if( _dpdk != dpdk ) - cvConvert( _dpdk, dpdk ); - - if( _dpdo != dpdo ) - cvConvert( _dpdo, dpdo ); -} - -CV_IMPL void cvProjectPoints2( const CvMat* objectPoints, - const CvMat* r_vec, - const CvMat* t_vec, - const CvMat* A, - const CvMat* distCoeffs, - CvMat* imagePoints, CvMat* dpdr, - CvMat* dpdt, CvMat* dpdf, - CvMat* dpdc, CvMat* dpdk, - double aspectRatio ) -{ - cvProjectPoints2Internal( objectPoints, r_vec, t_vec, A, distCoeffs, imagePoints, dpdr, dpdt, - dpdf, dpdc, dpdk, NULL, aspectRatio ); -} - -CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints, - const CvMat* imagePoints, const CvMat* A, - const CvMat* distCoeffs, CvMat* rvec, CvMat* tvec, - int useExtrinsicGuess ) -{ - const int max_iter = 20; - Ptr matM, _Mxy, _m, _mn, matL; - - int i, count; - double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9]; - double MM[9] = { 0 }, U[9] = { 0 }, V[9] = { 0 }, W[3] = { 0 }; - cv::Scalar Mc; - double param[6] = { 0 }; - CvMat matA = cvMat( 3, 3, CV_64F, a ); - CvMat _Ar = cvMat( 3, 3, CV_64F, ar ); - CvMat matR = cvMat( 3, 3, CV_64F, R ); - CvMat _r = cvMat( 3, 1, CV_64F, param ); - CvMat _t = cvMat( 3, 1, CV_64F, param + 3 ); - CvMat _Mc = cvMat( 1, 3, CV_64F, Mc.val ); - CvMat _MM = cvMat( 3, 3, CV_64F, MM ); - CvMat matU = cvMat( 3, 3, CV_64F, U ); - CvMat matV = cvMat( 3, 3, CV_64F, V ); - CvMat matW = cvMat( 3, 1, CV_64F, W ); - CvMat _param = cvMat( 6, 1, CV_64F, param ); - CvMat _dpdr, _dpdt; - - CV_Assert( CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) && - CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) ); - - count = MAX(objectPoints->cols, objectPoints->rows); - matM.reset(cvCreateMat( 1, count, CV_64FC3 )); - _m.reset(cvCreateMat( 1, count, CV_64FC2 )); - - cvConvertPointsHomogeneous( objectPoints, matM ); - cvConvertPointsHomogeneous( imagePoints, _m ); - cvConvert( A, &matA ); - - CV_Assert( (CV_MAT_DEPTH(rvec->type) == CV_64F || CV_MAT_DEPTH(rvec->type) == CV_32F) && - (rvec->rows == 1 || rvec->cols == 1) && rvec->rows*rvec->cols*CV_MAT_CN(rvec->type) == 3 ); - - CV_Assert( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) && - (tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 ); - - CV_Assert((count >= 4) || (count == 3 && useExtrinsicGuess)); // it is unsafe to call LM optimisation without an extrinsic guess in the case of 3 points. This is because there is no guarantee that it will converge on the correct solution. - - _mn.reset(cvCreateMat( 1, count, CV_64FC2 )); - _Mxy.reset(cvCreateMat( 1, count, CV_64FC2 )); - - // normalize image points - // (unapply the intrinsic matrix transformation and distortion) - cvUndistortPoints( _m, _mn, &matA, distCoeffs, 0, &_Ar ); - - if( useExtrinsicGuess ) - { - CvMat _r_temp = cvMat(rvec->rows, rvec->cols, - CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param ); - CvMat _t_temp = cvMat(tvec->rows, tvec->cols, - CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3); - cvConvert( rvec, &_r_temp ); - cvConvert( tvec, &_t_temp ); - } - else - { - Mc = cvAvg(matM); - cvReshape( matM, matM, 1, count ); - cvMulTransposed( matM, &_MM, 1, &_Mc ); - cvSVD( &_MM, &matW, 0, &matV, CV_SVD_MODIFY_A + CV_SVD_V_T ); - - // initialize extrinsic parameters - if( W[2]/W[1] < 1e-3) - { - // a planar structure case (all M's lie in the same plane) - double tt[3], h[9], h1_norm, h2_norm; - CvMat* R_transform = &matV; - CvMat T_transform = cvMat( 3, 1, CV_64F, tt ); - CvMat matH = cvMat( 3, 3, CV_64F, h ); - CvMat _h1, _h2, _h3; - - if( V[2]*V[2] + V[5]*V[5] < 1e-10 ) - cvSetIdentity( R_transform ); - - if( cvDet(R_transform) < 0 ) - cvScale( R_transform, R_transform, -1 ); - - cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T ); - - for( i = 0; i < count; i++ ) - { - const double* Rp = R_transform->data.db; - const double* Tp = T_transform.data.db; - const double* src = matM->data.db + i*3; - double* dst = _Mxy->data.db + i*2; - - dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0]; - dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1]; - } - - cvFindHomography( _Mxy, _mn, &matH ); - - if( cvCheckArr(&matH, CV_CHECK_QUIET) ) - { - cvGetCol( &matH, &_h1, 0 ); - _h2 = _h1; _h2.data.db++; - _h3 = _h2; _h3.data.db++; - h1_norm = std::sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]); - h2_norm = std::sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]); - - cvScale( &_h1, &_h1, 1./MAX(h1_norm, DBL_EPSILON) ); - cvScale( &_h2, &_h2, 1./MAX(h2_norm, DBL_EPSILON) ); - cvScale( &_h3, &_t, 2./MAX(h1_norm + h2_norm, DBL_EPSILON)); - cvCrossProduct( &_h1, &_h2, &_h3 ); - - cvRodrigues2( &matH, &_r ); - cvRodrigues2( &_r, &matH ); - cvMatMulAdd( &matH, &T_transform, &_t, &_t ); - cvMatMul( &matH, R_transform, &matR ); - } - else - { - cvSetIdentity( &matR ); - cvZero( &_t ); - } - - cvRodrigues2( &matR, &_r ); - } - else - { - // non-planar structure. Use DLT method - CV_CheckGE(count, 6, "DLT algorithm needs at least 6 points for pose estimation from 3D-2D point correspondences."); - double* L; - double LL[12*12], LW[12], LV[12*12], sc; - CvMat _LL = cvMat( 12, 12, CV_64F, LL ); - CvMat _LW = cvMat( 12, 1, CV_64F, LW ); - CvMat _LV = cvMat( 12, 12, CV_64F, LV ); - CvMat _RRt, _RR, _tt; - CvPoint3D64f* M = (CvPoint3D64f*)matM->data.db; - CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db; - - matL.reset(cvCreateMat( 2*count, 12, CV_64F )); - L = matL->data.db; - - for( i = 0; i < count; i++, L += 24 ) - { - double x = -mn[i].x, y = -mn[i].y; - L[0] = L[16] = M[i].x; - L[1] = L[17] = M[i].y; - L[2] = L[18] = M[i].z; - L[3] = L[19] = 1.; - L[4] = L[5] = L[6] = L[7] = 0.; - L[12] = L[13] = L[14] = L[15] = 0.; - L[8] = x*M[i].x; - L[9] = x*M[i].y; - L[10] = x*M[i].z; - L[11] = x; - L[20] = y*M[i].x; - L[21] = y*M[i].y; - L[22] = y*M[i].z; - L[23] = y; - } - - cvMulTransposed( matL, &_LL, 1 ); - cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T ); - _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 ); - cvGetCols( &_RRt, &_RR, 0, 3 ); - cvGetCol( &_RRt, &_tt, 3 ); - if( cvDet(&_RR) < 0 ) - cvScale( &_RRt, &_RRt, -1 ); - sc = cvNorm(&_RR); - CV_Assert(fabs(sc) > DBL_EPSILON); - cvSVD( &_RR, &matW, &matU, &matV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T ); - cvGEMM( &matU, &matV, 1, 0, 0, &matR, CV_GEMM_A_T ); - cvScale( &_tt, &_t, cvNorm(&matR)/sc ); - cvRodrigues2( &matR, &_r ); - } - } - - cvReshape( matM, matM, 3, 1 ); - cvReshape( _mn, _mn, 2, 1 ); - - // refine extrinsic parameters using iterative algorithm - CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true); - cvCopy( &_param, solver.param ); - - for(;;) - { - CvMat *matJ = 0, *_err = 0; - const CvMat *__param = 0; - bool proceed = solver.update( __param, matJ, _err ); - cvCopy( __param, &_param ); - if( !proceed || !_err ) - break; - cvReshape( _err, _err, 2, 1 ); - if( matJ ) - { - cvGetCols( matJ, &_dpdr, 0, 3 ); - cvGetCols( matJ, &_dpdt, 3, 6 ); - cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs, - _err, &_dpdr, &_dpdt, 0, 0, 0 ); - } - else - { - cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs, - _err, 0, 0, 0, 0, 0 ); - } - cvSub(_err, _m, _err); - cvReshape( _err, _err, 1, 2*count ); - } - cvCopy( solver.param, &_param ); - - _r = cvMat( rvec->rows, rvec->cols, - CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param ); - _t = cvMat( tvec->rows, tvec->cols, - CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3 ); - - cvConvert( &_r, rvec ); - cvConvert( &_t, tvec ); -} - CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* npoints, CvSize imageSize, CvMat* cameraMatrix, @@ -1797,7 +487,9 @@ static double cvCalibrateCamera2Internal( const CvMat* objectPoints, CvMat _Mi = cvMat(matM.colRange(pos, pos + ni)); CvMat _mi = cvMat(_m.colRange(pos, pos + ni)); - cvFindExtrinsicCameraParams2( &_Mi, &_mi, &matA, &_k, &_ri, &_ti ); + Mat r_mat = cvarrToMat(&_ri), t_mat = cvarrToMat(&_ti); + findExtrinsicCameraParams2( cvarrToMat(&_Mi), cvarrToMat(&_mi), cvarrToMat(&matA), + cvarrToMat(&_k), r_mat, t_mat, /*useExtrinsicGuess=*/0 ); } // 3. run the optimization @@ -1851,21 +543,16 @@ static double cvCalibrateCamera2Internal( const CvMat* objectPoints, if( calcJ ) { - CvMat _dpdr = cvMat(_Je.colRange(0, 3)); - CvMat _dpdt = cvMat(_Je.colRange(3, 6)); - CvMat _dpdf = cvMat(_Ji.colRange(0, 2)); - CvMat _dpdc = cvMat(_Ji.colRange(2, 4)); - CvMat _dpdk = cvMat(_Ji.colRange(4, NINTRINSIC)); - CvMat _dpdo = _Jo.empty() ? CvMat() : cvMat(_Jo.colRange(0, ni * 3)); - - cvProjectPoints2Internal( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt, - (flags & CALIB_FIX_FOCAL_LENGTH) ? nullptr : &_dpdf, - (flags & CALIB_FIX_PRINCIPAL_POINT) ? nullptr : &_dpdc, &_dpdk, - (_Jo.empty()) ? nullptr: &_dpdo, - (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0); + projectPoints( cvarrToMat(&_Mi), cvarrToMat(&_ri), cvarrToMat(&_ti), cvarrToMat(&matA), + cvarrToMat(&_k), cvarrToMat(&_mp), _Je.colRange(0, 3), _Je.colRange(3, 6), + (flags & CALIB_FIX_FOCAL_LENGTH) ? noArray() : _Ji.colRange(0, 2), + (flags & CALIB_FIX_PRINCIPAL_POINT) ? noArray() : _Ji.colRange(2, 4), + _Ji.colRange(4, 4 + _k.cols * _k.rows), (_Jo.empty()) ? noArray() : _Jo.colRange(0, ni * 3), + (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0); } else - cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp ); + projectPoints( cvarrToMat(&_Mi), cvarrToMat(&_ri), cvarrToMat(&_ti), cvarrToMat(&matA), + cvarrToMat(&_k), cvarrToMat(&_mp) ); cvSub( &_mp, &_mi, &_mp ); if (perViewErrors || stdDevs) @@ -1956,7 +643,7 @@ static double cvCalibrateCamera2Internal( const CvMat* objectPoints, { dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type), rvecs->data.ptr + rvecs->step*i ); - cvRodrigues2( &src, &matA ); + Rodrigues( cvarrToMat(&src), cvarrToMat(&matA) ); cvConvert( &matA, &dst ); } else @@ -2286,8 +973,11 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i R[k] = cvMat(3, 3, CV_64F, r[k]); T[k] = cvMat(3, 1, CV_64F, t[k]); - cvFindExtrinsicCameraParams2( &objpt_i, &imgpt_i[k], &K[k], &Dist[k], &om[k], &T[k] ); - cvRodrigues2( &om[k], &R[k] ); + Mat r_mat = cvarrToMat(&om[k]), t_mat = cvarrToMat(&T[k]); + findExtrinsicCameraParams2( cvarrToMat(&objpt_i), cvarrToMat(&imgpt_i[k]), + cvarrToMat(&K[k]), cvarrToMat(&Dist[k]), + r_mat, t_mat, /*useExtrinsicGuess=*/0 ); + Rodrigues( cvarrToMat(&om[k]), cvarrToMat(&R[k]) ); if( k == 0 ) { // save initial om_left and T_left @@ -2301,7 +991,7 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i } cvGEMM( &R[1], &R[0], 1, 0, 0, &R[0], CV_GEMM_B_T ); cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] ); - cvRodrigues2( &R[0], &T[0] ); + Rodrigues( cvarrToMat(&R[0]), cvarrToMat(&T[0]) ); RT0->data.db[i] = t[0][0]; RT0->data.db[i + nimages] = t[0][1]; RT0->data.db[i + nimages*2] = t[0][2]; @@ -2378,7 +1068,7 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i break; reprojErr = 0; - cvRodrigues2( &om_LR, &R_LR ); + Rodrigues( cvarrToMat(&om_LR), cvarrToMat(&R_LR) ); om[1] = cvMat(3,1,CV_64F,_omR); T[1] = cvMat(3,1,CV_64F,_tR); @@ -2433,31 +1123,33 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3); if( JtJ || JtErr ) - cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1], &dr3dr1, 0, - &dr3dr2, 0, 0, &dt3dt1, &dt3dr2, &dt3dt2 ); + composeRT( cvarrToMat(&om[0]), cvarrToMat(&T[0]), cvarrToMat(&om_LR), + cvarrToMat(&T_LR), cvarrToMat(&om[1]), cvarrToMat(&T[1]), + cvarrToMat(&dr3dr1), noArray(), cvarrToMat(&dr3dr2), + noArray(), noArray(), cvarrToMat(&dt3dt1), cvarrToMat(&dt3dr2), + cvarrToMat(&dt3dt2 ) ); else - cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1] ); + composeRT( cvarrToMat(&om[0]), cvarrToMat(&T[0]), cvarrToMat(&om_LR), + cvarrToMat(&T_LR), cvarrToMat(&om[1]), cvarrToMat(&T[1]) ); objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3); err.resize(ni*2); Je.resize(ni*2); J_LR.resize(ni*2); Ji.resize(ni*2); CvMat tmpimagePoints = cvMat(err.reshape(2, 1)); - CvMat dpdf = cvMat(Ji.colRange(0, 2)); - CvMat dpdc = cvMat(Ji.colRange(2, 4)); - CvMat dpdk = cvMat(Ji.colRange(4, NINTRINSIC)); - CvMat dpdrot = cvMat(Je.colRange(0, 3)); - CvMat dpdt = cvMat(Je.colRange(3, 6)); for( k = 0; k < 2; k++ ) { imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2); if( JtJ || JtErr ) - cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], - &tmpimagePoints, &dpdrot, &dpdt, &dpdf, &dpdc, &dpdk, + projectPoints( cvarrToMat(&objpt_i), cvarrToMat(&om[k]), cvarrToMat(&T[k]), + cvarrToMat(&K[k]), cvarrToMat(&Dist[k]), + err.reshape(2, 1), Je.colRange(0, 3), Je.colRange(3, 6), + Ji.colRange(0, 2), Ji.colRange(2, 4), Ji.colRange(4, 4 + Dist[k].cols * Dist[k].rows), noArray(), (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0); else - cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], &tmpimagePoints ); + projectPoints( cvarrToMat(&objpt_i), cvarrToMat(&om[k]), cvarrToMat(&T[k]), + cvarrToMat(&K[k]), cvarrToMat(&Dist[k]), cvarrToMat(&tmpimagePoints) ); cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints ); if( solver.state == CvLevMarq::CALC_J ) @@ -2525,7 +1217,7 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i *_errNorm = reprojErr; } - cvRodrigues2( &om_LR, &R_LR ); + Rodrigues( cvarrToMat(&om_LR), cvarrToMat(&R_LR) ); if( matR->rows == 1 || matR->cols == 1 ) cvConvert( &om_LR, matR ); else @@ -2586,7 +1278,7 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i { dst = cvMat(3, 3, CV_MAT_DEPTH(rvecs->type), rvecs->data.ptr + rvecs->step*i); - cvRodrigues2( &src, &tmp ); + Rodrigues( cvarrToMat(&src), cvarrToMat(&tmp) ); cvConvert( &tmp, &dst ); } else @@ -2691,11 +1383,11 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2, int i, k; if( matR->rows == 3 && matR->cols == 3 ) - cvRodrigues2(matR, &om); // get vector rotation + Rodrigues(cvarrToMat(matR), cvarrToMat(&om)); // get vector rotation else cvConvert(matR, &om); // it's already a rotation vector cvConvertScale(&om, &om, -0.5); // get average rotation - cvRodrigues2(&om, &r_r); // rotate cameras to same orientation by averaging + Rodrigues(cvarrToMat(&om), cvarrToMat(&r_r)); // rotate cameras to same orientation by averaging cvMatMul(&r_r, matT, &t); int idx = fabs(_t[0]) > fabs(_t[1]) ? 0 : 1; @@ -2709,7 +1401,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2, double nw = cvNorm(&ww, 0, CV_L2); if (nw > 0.0) cvConvertScale(&ww, &ww, acos(fabs(c)/nt)/nw); - cvRodrigues2(&ww, &wR); + Rodrigues(cvarrToMat(&ww), cvarrToMat(&wR)); // apply to both views cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, CV_GEMM_B_T); @@ -2754,7 +1446,8 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2, _a_tmp[1][1]=fc_new; _a_tmp[0][2]=0.0; _a_tmp[1][2]=0.0; - cvProjectPoints2( &pts_3, k == 0 ? _R1 : _R2, &Z, &A_tmp, 0, &pts ); + projectPoints( cvarrToMat(&pts_3), cvarrToMat(k == 0 ? _R1 : _R2), cvarrToMat(&Z), + cvarrToMat(&A_tmp), noArray(), cvarrToMat(&pts) ); CvScalar avg = cvAvg(&pts); cc_new[k].x = (nx-1)/2 - avg.val[0]; cc_new[k].y = (ny-1)/2 - avg.val[1]; @@ -2878,87 +1571,6 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2, } -void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCoeffs, - CvSize imgSize, double alpha, - CvMat* newCameraMatrix, CvSize newImgSize, - CvRect* validPixROI, int centerPrincipalPoint ) -{ - cv::Rect_ inner, outer; - newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imgSize; - - double M[3][3]; - CvMat matM = cvMat(3, 3, CV_64F, M); - cvConvert(cameraMatrix, &matM); - - if( centerPrincipalPoint ) - { - double cx0 = M[0][2]; - double cy0 = M[1][2]; - double cx = (newImgSize.width-1)*0.5; - double cy = (newImgSize.height-1)*0.5; - - icvGetRectangles( cameraMatrix, distCoeffs, 0, cameraMatrix, imgSize, inner, outer ); - double s0 = std::max(std::max(std::max((double)cx/(cx0 - inner.x), (double)cy/(cy0 - inner.y)), - (double)cx/(inner.x + inner.width - cx0)), - (double)cy/(inner.y + inner.height - cy0)); - double s1 = std::min(std::min(std::min((double)cx/(cx0 - outer.x), (double)cy/(cy0 - outer.y)), - (double)cx/(outer.x + outer.width - cx0)), - (double)cy/(outer.y + outer.height - cy0)); - double s = s0*(1 - alpha) + s1*alpha; - - M[0][0] *= s; - M[1][1] *= s; - M[0][2] = cx; - M[1][2] = cy; - - if( validPixROI ) - { - inner = cv::Rect_((double)((inner.x - cx0)*s + cx), - (double)((inner.y - cy0)*s + cy), - (double)(inner.width*s), - (double)(inner.height*s)); - cv::Rect r(cvCeil(inner.x), cvCeil(inner.y), cvFloor(inner.width), cvFloor(inner.height)); - r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height); - *validPixROI = cvRect(r); - } - } - else - { - // Get inscribed and circumscribed rectangles in normalized - // (independent of camera matrix) coordinates - icvGetRectangles( cameraMatrix, distCoeffs, 0, 0, imgSize, inner, outer ); - - // Projection mapping inner rectangle to viewport - double fx0 = (newImgSize.width - 1) / inner.width; - double fy0 = (newImgSize.height - 1) / inner.height; - double cx0 = -fx0 * inner.x; - double cy0 = -fy0 * inner.y; - - // Projection mapping outer rectangle to viewport - double fx1 = (newImgSize.width - 1) / outer.width; - double fy1 = (newImgSize.height - 1) / outer.height; - double cx1 = -fx1 * outer.x; - double cy1 = -fy1 * outer.y; - - // Interpolate between the two optimal projections - M[0][0] = fx0*(1 - alpha) + fx1*alpha; - M[1][1] = fy0*(1 - alpha) + fy1*alpha; - M[0][2] = cx0*(1 - alpha) + cx1*alpha; - M[1][2] = cy0*(1 - alpha) + cy1*alpha; - - if( validPixROI ) - { - icvGetRectangles( cameraMatrix, distCoeffs, 0, &matM, imgSize, inner, outer ); - cv::Rect r = inner; - r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height); - *validPixROI = cvRect(r); - } - } - - cvConvert(&matM, newCameraMatrix); -} - - CV_IMPL int cvStereoRectifyUncalibrated( const CvMat* _points1, const CvMat* _points2, const CvMat* F0, CvSize imgSize, @@ -3273,222 +1885,6 @@ void cvReprojectImageTo3D( const CvArr* disparityImage, } -CV_IMPL void -cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, - CvMat *matrixQx, CvMat *matrixQy, CvMat *matrixQz, - CvPoint3D64f *eulerAngles) -{ - double matM[3][3], matR[3][3], matQ[3][3]; - CvMat M = cvMat(3, 3, CV_64F, matM); - CvMat R = cvMat(3, 3, CV_64F, matR); - CvMat Q = cvMat(3, 3, CV_64F, matQ); - double z, c, s; - - /* Validate parameters. */ - CV_Assert( CV_IS_MAT(matrixM) && CV_IS_MAT(matrixR) && CV_IS_MAT(matrixQ) && - matrixM->cols == 3 && matrixM->rows == 3 && - CV_ARE_SIZES_EQ(matrixM, matrixR) && CV_ARE_SIZES_EQ(matrixM, matrixQ)); - - cvConvert(matrixM, &M); - - /* Find Givens rotation Q_x for x axis (left multiplication). */ - /* - ( 1 0 0 ) - Qx = ( 0 c s ), c = m33/sqrt(m32^2 + m33^2), s = m32/sqrt(m32^2 + m33^2) - ( 0 -s c ) - */ - s = std::abs(matM[2][1]) > DBL_EPSILON ? matM[2][1] : 0; - c = std::abs(matM[2][1]) > DBL_EPSILON ? matM[2][2] : 1; - z = 1./std::sqrt(c * c + s * s); - c *= z; - s *= z; - - double _Qx[3][3] = { {1, 0, 0}, {0, c, s}, {0, -s, c} }; - CvMat Qx = cvMat(3, 3, CV_64F, _Qx); - - cvMatMul(&M, &Qx, &R); - CV_DbgAssert(fabs(matR[2][1]) < FLT_EPSILON); - matR[2][1] = 0; - - /* Find Givens rotation for y axis. */ - /* - ( c 0 -s ) - Qy = ( 0 1 0 ), c = m33/sqrt(m31^2 + m33^2), s = -m31/sqrt(m31^2 + m33^2) - ( s 0 c ) - */ - s = std::abs(matR[2][0]) > DBL_EPSILON ? -matR[2][0] : 0; - c = std::abs(matR[2][0]) > DBL_EPSILON ? matR[2][2] : 1; - z = 1./std::sqrt(c * c + s * s); - c *= z; - s *= z; - - double _Qy[3][3] = { {c, 0, -s}, {0, 1, 0}, {s, 0, c} }; - CvMat Qy = cvMat(3, 3, CV_64F, _Qy); - cvMatMul(&R, &Qy, &M); - - CV_DbgAssert(fabs(matM[2][0]) < FLT_EPSILON); - matM[2][0] = 0; - - /* Find Givens rotation for z axis. */ - /* - ( c s 0 ) - Qz = (-s c 0 ), c = m22/sqrt(m21^2 + m22^2), s = m21/sqrt(m21^2 + m22^2) - ( 0 0 1 ) - */ - - s = std::abs(matM[1][0]) > DBL_EPSILON ? matM[1][0] : 0; - c = std::abs(matM[1][0]) > DBL_EPSILON ? matM[1][1] : 1; - z = 1./std::sqrt(c * c + s * s); - c *= z; - s *= z; - - double _Qz[3][3] = { {c, s, 0}, {-s, c, 0}, {0, 0, 1} }; - CvMat Qz = cvMat(3, 3, CV_64F, _Qz); - - cvMatMul(&M, &Qz, &R); - CV_DbgAssert(fabs(matR[1][0]) < FLT_EPSILON); - matR[1][0] = 0; - - // Solve the decomposition ambiguity. - // Diagonal entries of R, except the last one, shall be positive. - // Further rotate R by 180 degree if necessary - if( matR[0][0] < 0 ) - { - if( matR[1][1] < 0 ) - { - // rotate around z for 180 degree, i.e. a rotation matrix of - // [-1, 0, 0], - // [ 0, -1, 0], - // [ 0, 0, 1] - matR[0][0] *= -1; - matR[0][1] *= -1; - matR[1][1] *= -1; - - _Qz[0][0] *= -1; - _Qz[0][1] *= -1; - _Qz[1][0] *= -1; - _Qz[1][1] *= -1; - } - else - { - // rotate around y for 180 degree, i.e. a rotation matrix of - // [-1, 0, 0], - // [ 0, 1, 0], - // [ 0, 0, -1] - matR[0][0] *= -1; - matR[0][2] *= -1; - matR[1][2] *= -1; - matR[2][2] *= -1; - - cvTranspose( &Qz, &Qz ); - - _Qy[0][0] *= -1; - _Qy[0][2] *= -1; - _Qy[2][0] *= -1; - _Qy[2][2] *= -1; - } - } - else if( matR[1][1] < 0 ) - { - // ??? for some reason, we never get here ??? - - // rotate around x for 180 degree, i.e. a rotation matrix of - // [ 1, 0, 0], - // [ 0, -1, 0], - // [ 0, 0, -1] - matR[0][1] *= -1; - matR[0][2] *= -1; - matR[1][1] *= -1; - matR[1][2] *= -1; - matR[2][2] *= -1; - - cvTranspose( &Qz, &Qz ); - cvTranspose( &Qy, &Qy ); - - _Qx[1][1] *= -1; - _Qx[1][2] *= -1; - _Qx[2][1] *= -1; - _Qx[2][2] *= -1; - } - - // calculate the euler angle - if( eulerAngles ) - { - eulerAngles->x = acos(_Qx[1][1]) * (_Qx[1][2] >= 0 ? 1 : -1) * (180.0 / CV_PI); - eulerAngles->y = acos(_Qy[0][0]) * (_Qy[2][0] >= 0 ? 1 : -1) * (180.0 / CV_PI); - eulerAngles->z = acos(_Qz[0][0]) * (_Qz[0][1] >= 0 ? 1 : -1) * (180.0 / CV_PI); - } - - /* Calculate orthogonal matrix. */ - /* - Q = QzT * QyT * QxT - */ - cvGEMM( &Qz, &Qy, 1, 0, 0, &M, CV_GEMM_A_T + CV_GEMM_B_T ); - cvGEMM( &M, &Qx, 1, 0, 0, &Q, CV_GEMM_B_T ); - - /* Save R and Q matrices. */ - cvConvert( &R, matrixR ); - cvConvert( &Q, matrixQ ); - - if( matrixQx ) - cvConvert(&Qx, matrixQx); - if( matrixQy ) - cvConvert(&Qy, matrixQy); - if( matrixQz ) - cvConvert(&Qz, matrixQz); -} - - -CV_IMPL void -cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr, - CvMat *rotMatr, CvMat *posVect, - CvMat *rotMatrX, CvMat *rotMatrY, - CvMat *rotMatrZ, CvPoint3D64f *eulerAngles) -{ - double tmpProjMatrData[16], tmpMatrixDData[16], tmpMatrixVData[16]; - CvMat tmpProjMatr = cvMat(4, 4, CV_64F, tmpProjMatrData); - CvMat tmpMatrixD = cvMat(4, 4, CV_64F, tmpMatrixDData); - CvMat tmpMatrixV = cvMat(4, 4, CV_64F, tmpMatrixVData); - CvMat tmpMatrixM; - - /* Validate parameters. */ - if(projMatr == 0 || calibMatr == 0 || rotMatr == 0 || posVect == 0) - CV_Error(cv::Error::StsNullPtr, "Some of parameters is a NULL pointer!"); - - if(!CV_IS_MAT(projMatr) || !CV_IS_MAT(calibMatr) || !CV_IS_MAT(rotMatr) || !CV_IS_MAT(posVect)) - CV_Error(cv::Error::StsUnsupportedFormat, "Input parameters must be matrices!"); - - if(projMatr->cols != 4 || projMatr->rows != 3) - CV_Error(cv::Error::StsUnmatchedSizes, "Size of projection matrix must be 3x4!"); - - if(calibMatr->cols != 3 || calibMatr->rows != 3 || rotMatr->cols != 3 || rotMatr->rows != 3) - CV_Error(cv::Error::StsUnmatchedSizes, "Size of calibration and rotation matrices must be 3x3!"); - - if(posVect->cols != 1 || posVect->rows != 4) - CV_Error(cv::Error::StsUnmatchedSizes, "Size of position vector must be 4x1!"); - - /* Compute position vector. */ - cvSetZero(&tmpProjMatr); // Add zero row to make matrix square. - int i, k; - for(i = 0; i < 3; i++) - for(k = 0; k < 4; k++) - cvmSet(&tmpProjMatr, i, k, cvmGet(projMatr, i, k)); - - cvSVD(&tmpProjMatr, &tmpMatrixD, NULL, &tmpMatrixV, CV_SVD_MODIFY_A + CV_SVD_V_T); - - /* Save position vector. */ - for(i = 0; i < 4; i++) - cvmSet(posVect, i, 0, cvmGet(&tmpMatrixV, 3, i)); // Solution is last row of V. - - /* Compute calibration and rotation matrices via RQ decomposition. */ - cvGetCols(projMatr, &tmpMatrixM, 0, 3); // M is first square matrix of P. - - CV_Assert(cvDet(&tmpMatrixM) != 0.0); // So far only finite cameras could be decomposed, so M has to be nonsingular [det(M) != 0]. - - cvRQDecomp3x3(&tmpMatrixM, calibMatr, rotMatr, rotMatrX, rotMatrY, rotMatrZ, eulerAngles); -} - - namespace cv { @@ -3633,144 +2029,6 @@ static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype, int outputSize = 14) } // namespace cv - -void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian) -{ - CV_INSTRUMENT_REGION(); - - Mat src = _src.getMat(); - const Size srcSz = src.size(); - CV_Check(srcSz, srcSz == Size(3, 1) || srcSz == Size(1, 3) || - (srcSz == Size(1, 1) && src.channels() == 3) || - srcSz == Size(3, 3), - "Input matrix must be 1x3 or 3x1 for a rotation vector, or 3x3 for a rotation matrix"); - - bool v2m = src.cols == 1 || src.rows == 1; - _dst.create(3, v2m ? 3 : 1, src.depth()); - Mat dst = _dst.getMat(); - CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _cjacobian; - if( _jacobian.needed() ) - { - _jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth()); - _cjacobian = cvMat(_jacobian.getMat()); - } - bool ok = cvRodrigues2(&_csrc, &_cdst, _jacobian.needed() ? &_cjacobian : 0) > 0; - if( !ok ) - dst = Scalar(0); -} - -void cv::matMulDeriv( InputArray _Amat, InputArray _Bmat, - OutputArray _dABdA, OutputArray _dABdB ) -{ - CV_INSTRUMENT_REGION(); - - Mat A = _Amat.getMat(), B = _Bmat.getMat(); - _dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type()); - _dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type()); - Mat dABdA = _dABdA.getMat(), dABdB = _dABdB.getMat(); - CvMat matA = cvMat(A), matB = cvMat(B), c_dABdA = cvMat(dABdA), c_dABdB = cvMat(dABdB); - cvCalcMatMulDeriv(&matA, &matB, &c_dABdA, &c_dABdB); -} - - -void cv::composeRT( InputArray _rvec1, InputArray _tvec1, - InputArray _rvec2, InputArray _tvec2, - OutputArray _rvec3, OutputArray _tvec3, - OutputArray _dr3dr1, OutputArray _dr3dt1, - OutputArray _dr3dr2, OutputArray _dr3dt2, - OutputArray _dt3dr1, OutputArray _dt3dt1, - OutputArray _dt3dr2, OutputArray _dt3dt2 ) -{ - Mat rvec1 = _rvec1.getMat(), tvec1 = _tvec1.getMat(); - Mat rvec2 = _rvec2.getMat(), tvec2 = _tvec2.getMat(); - int rtype = rvec1.type(); - _rvec3.create(rvec1.size(), rtype); - _tvec3.create(tvec1.size(), rtype); - Mat rvec3 = _rvec3.getMat(), tvec3 = _tvec3.getMat(); - - CvMat c_rvec1 = cvMat(rvec1), c_tvec1 = cvMat(tvec1), c_rvec2 = cvMat(rvec2), - c_tvec2 = cvMat(tvec2), c_rvec3 = cvMat(rvec3), c_tvec3 = cvMat(tvec3); - CvMat c_dr3dr1, c_dr3dt1, c_dr3dr2, c_dr3dt2, c_dt3dr1, c_dt3dt1, c_dt3dr2, c_dt3dt2; - CvMat *p_dr3dr1=0, *p_dr3dt1=0, *p_dr3dr2=0, *p_dr3dt2=0, *p_dt3dr1=0, *p_dt3dt1=0, *p_dt3dr2=0, *p_dt3dt2=0; -#define CV_COMPOSE_RT_PARAM(name) \ - Mat name; \ - if (_ ## name.needed())\ - { \ - _ ## name.create(3, 3, rtype); \ - name = _ ## name.getMat(); \ - p_ ## name = &(c_ ## name = cvMat(name)); \ - } - - CV_COMPOSE_RT_PARAM(dr3dr1); CV_COMPOSE_RT_PARAM(dr3dt1); - CV_COMPOSE_RT_PARAM(dr3dr2); CV_COMPOSE_RT_PARAM(dr3dt2); - CV_COMPOSE_RT_PARAM(dt3dr1); CV_COMPOSE_RT_PARAM(dt3dt1); - CV_COMPOSE_RT_PARAM(dt3dr2); CV_COMPOSE_RT_PARAM(dt3dt2); -#undef CV_COMPOSE_RT_PARAM - - cvComposeRT(&c_rvec1, &c_tvec1, &c_rvec2, &c_tvec2, &c_rvec3, &c_tvec3, - p_dr3dr1, p_dr3dt1, p_dr3dr2, p_dr3dt2, - p_dt3dr1, p_dt3dt1, p_dt3dr2, p_dt3dt2); -} - - -void cv::projectPoints( InputArray _opoints, - InputArray _rvec, - InputArray _tvec, - InputArray _cameraMatrix, - InputArray _distCoeffs, - OutputArray _ipoints, - OutputArray _jacobian, - double aspectRatio ) -{ - Mat opoints = _opoints.getMat(); - int npoints = opoints.checkVector(3), depth = opoints.depth(); - if (npoints < 0) - opoints = opoints.t(); - npoints = opoints.checkVector(3); - CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F)); - - if (opoints.cols == 3) - opoints = opoints.reshape(3); - - CvMat dpdrot, dpdt, dpdf, dpdc, dpddist; - CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0; - - CV_Assert( _ipoints.needed() ); - - _ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true); - Mat imagePoints = _ipoints.getMat(); - CvMat c_imagePoints = cvMat(imagePoints); - CvMat c_objectPoints = cvMat(opoints); - Mat cameraMatrix = _cameraMatrix.getMat(); - - Mat rvec = _rvec.getMat(), tvec = _tvec.getMat(); - CvMat c_cameraMatrix = cvMat(cameraMatrix); - CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec); - - double dc0buf[5]={0}; - Mat dc0(5,1,CV_64F,dc0buf); - Mat distCoeffs = _distCoeffs.getMat(); - if( distCoeffs.empty() ) - distCoeffs = dc0; - CvMat c_distCoeffs = cvMat(distCoeffs); - int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1; - - Mat jacobian; - if( _jacobian.needed() ) - { - _jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F); - jacobian = _jacobian.getMat(); - pdpdrot = &(dpdrot = cvMat(jacobian.colRange(0, 3))); - pdpdt = &(dpdt = cvMat(jacobian.colRange(3, 6))); - pdpdf = &(dpdf = cvMat(jacobian.colRange(6, 8))); - pdpdc = &(dpdc = cvMat(jacobian.colRange(8, 10))); - pdpddist = &(dpddist = cvMat(jacobian.colRange(10, 10+ndistCoeffs))); - } - - cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix, &c_distCoeffs, - &c_imagePoints, pdpdrot, pdpdt, pdpdf, pdpdc, pdpddist, aspectRatio ); -} - cv::Mat cv::initCameraMatrix2D( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, double aspectRatio ) @@ -4244,108 +2502,6 @@ bool cv::stereoRectifyUncalibrated( InputArray _points1, InputArray _points2, return cvStereoRectifyUncalibrated(&c_pt1, &c_pt2, p_F, cvSize(imgSize), &c_H1, &c_H2, threshold) > 0; } -cv::Mat cv::getOptimalNewCameraMatrix( InputArray _cameraMatrix, - InputArray _distCoeffs, - Size imgSize, double alpha, Size newImgSize, - Rect* validPixROI, bool centerPrincipalPoint ) -{ - CV_INSTRUMENT_REGION(); - - Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); - CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs); - - Mat newCameraMatrix(3, 3, CV_MAT_TYPE(c_cameraMatrix.type)); - CvMat c_newCameraMatrix = cvMat(newCameraMatrix); - - cvGetOptimalNewCameraMatrix(&c_cameraMatrix, &c_distCoeffs, cvSize(imgSize), - alpha, &c_newCameraMatrix, - cvSize(newImgSize), (CvRect*)validPixROI, (int)centerPrincipalPoint); - return newCameraMatrix; -} - - -cv::Vec3d cv::RQDecomp3x3( InputArray _Mmat, - OutputArray _Rmat, - OutputArray _Qmat, - OutputArray _Qx, - OutputArray _Qy, - OutputArray _Qz ) -{ - CV_INSTRUMENT_REGION(); - - Mat M = _Mmat.getMat(); - _Rmat.create(3, 3, M.type()); - _Qmat.create(3, 3, M.type()); - Mat Rmat = _Rmat.getMat(); - Mat Qmat = _Qmat.getMat(); - Vec3d eulerAngles; - - CvMat matM = cvMat(M), matR = cvMat(Rmat), matQ = cvMat(Qmat); -#define CV_RQDecomp3x3_PARAM(name) \ - Mat name; \ - CvMat c_ ## name, *p ## name = NULL; \ - if( _ ## name.needed() ) \ - { \ - _ ## name.create(3, 3, M.type()); \ - name = _ ## name.getMat(); \ - c_ ## name = cvMat(name); p ## name = &c_ ## name; \ - } - - CV_RQDecomp3x3_PARAM(Qx); - CV_RQDecomp3x3_PARAM(Qy); - CV_RQDecomp3x3_PARAM(Qz); -#undef CV_RQDecomp3x3_PARAM - cvRQDecomp3x3(&matM, &matR, &matQ, pQx, pQy, pQz, (CvPoint3D64f*)&eulerAngles[0]); - return eulerAngles; -} - - -void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraMatrix, - OutputArray _rotMatrix, OutputArray _transVect, - OutputArray _rotMatrixX, OutputArray _rotMatrixY, - OutputArray _rotMatrixZ, OutputArray _eulerAngles ) -{ - CV_INSTRUMENT_REGION(); - - Mat projMatrix = _projMatrix.getMat(); - int type = projMatrix.type(); - _cameraMatrix.create(3, 3, type); - _rotMatrix.create(3, 3, type); - _transVect.create(4, 1, type); - Mat cameraMatrix = _cameraMatrix.getMat(); - Mat rotMatrix = _rotMatrix.getMat(); - Mat transVect = _transVect.getMat(); - CvMat c_projMatrix = cvMat(projMatrix), c_cameraMatrix = cvMat(cameraMatrix); - CvMat c_rotMatrix = cvMat(rotMatrix), c_transVect = cvMat(transVect); - CvPoint3D64f *p_eulerAngles = 0; - -#define CV_decomposeProjectionMatrix_PARAM(name) \ - Mat name; \ - CvMat c_ ## name, *p_ ## name = NULL; \ - if( _ ## name.needed() ) \ - { \ - _ ## name.create(3, 3, type); \ - name = _ ## name.getMat(); \ - c_ ## name = cvMat(name); p_ ## name = &c_ ## name; \ - } - - CV_decomposeProjectionMatrix_PARAM(rotMatrixX); - CV_decomposeProjectionMatrix_PARAM(rotMatrixY); - CV_decomposeProjectionMatrix_PARAM(rotMatrixZ); -#undef CV_decomposeProjectionMatrix_PARAM - - if( _eulerAngles.needed() ) - { - _eulerAngles.create(3, 1, CV_64F, -1, true); - p_eulerAngles = _eulerAngles.getMat().ptr(); - } - - cvDecomposeProjectionMatrix(&c_projMatrix, &c_cameraMatrix, &c_rotMatrix, - &c_transVect, p_rotMatrixX, p_rotMatrixY, - p_rotMatrixZ, p_eulerAngles); -} - - namespace cv { diff --git a/modules/calib3d/src/calibration_base.cpp b/modules/calib3d/src/calibration_base.cpp new file mode 100644 index 0000000000..c047a45e8e --- /dev/null +++ b/modules/calib3d/src/calibration_base.cpp @@ -0,0 +1,1644 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "hal_replacement.hpp" +#include "distortion_model.hpp" +#include "calib3d_c_api.h" +#include +#include + +using namespace cv; + +/* + This is straight-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet + that is (in a large extent) based on the paper: + Z. Zhang. "A flexible new technique for camera calibration". + IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000. + The 1st initial port was done by Valery Mosyagin. +*/ + +// reimplementation of dAB.m +void cv::matMulDeriv( InputArray A_, InputArray B_, OutputArray dABdA_, OutputArray dABdB_ ) +{ + CV_INSTRUMENT_REGION(); + + Mat A = A_.getMat(), B = B_.getMat(); + int type = A.type(); + CV_Assert(type == B.type()); + CV_Assert(type == CV_32F || type == CV_64F); + CV_Assert(A.cols == B.rows); + + dABdA_.create(A.rows*B.cols, A.rows*A.cols, type); + dABdB_.create(A.rows*B.cols, B.rows*B.cols, type); + Mat dABdA = dABdA_.getMat(), dABdB = dABdB_.getMat(); + + int M = A.rows, L = A.cols, N = B.cols; + int bstep = (int)(B.step/B.elemSize()); + + if( type == CV_32F ) + { + for( int i = 0; i < M*N; i++ ) + { + int j, i1 = i / N, i2 = i % N; + + const float* a = A.ptr(i1); + const float* b = B.ptr() + i2; + float* dcda = dABdA.ptr(i); + float* dcdb = dABdB.ptr(i); + + memset(dcda, 0, M*L*sizeof(dcda[0])); + memset(dcdb, 0, L*N*sizeof(dcdb[0])); + + for( j = 0; j < L; j++ ) + { + dcda[i1*L + j] = b[j*bstep]; + dcdb[j*N + i2] = a[j]; + } + } + } + else + { + for( int i = 0; i < M*N; i++ ) + { + int j, i1 = i / N, i2 = i % N; + + const double* a = A.ptr(i1); + const double* b = B.ptr() + i2; + double* dcda = dABdA.ptr(i); + double* dcdb = dABdB.ptr(i); + + memset(dcda, 0, M*L*sizeof(dcda[0])); + memset(dcdb, 0, L*N*sizeof(dcdb[0])); + + for( j = 0; j < L; j++ ) + { + dcda[i1*L + j] = b[j*bstep]; + dcdb[j*N + i2] = a[j]; + } + } + } +} + +void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian) +{ + CV_INSTRUMENT_REGION(); + + Mat src = _src.getMat(); + const Size srcSz = src.size(); + int srccn = src.channels(); + int depth = src.depth(); + CV_Check(srcSz, ((srcSz == Size(3, 1) || srcSz == Size(1, 3)) && srccn == 1) || + (srcSz == Size(1, 1) && srccn == 3) || + (srcSz == Size(3, 3) && srccn == 1), + "Input matrix must be 1x3 or 3x1 for a rotation vector, or 3x3 for a rotation matrix"); + + bool v2m = src.cols == 1 || src.rows == 1; + _dst.create(3, v2m ? 3 : 1, depth); + Mat dst = _dst.getMat(), jacobian; + if( _jacobian.needed() ) + { + _jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth()); + jacobian = _jacobian.getMat(); + } + + double J[27] = {0}; + Mat matJ( 3, 9, CV_64F, J); + + dst.setTo(0); + + if( depth != CV_32F && depth != CV_64F ) + CV_Error( cv::Error::StsUnsupportedFormat, "The matrices must have 32f or 64f data type" ); + + if( v2m ) + { + int sstep = src.rows > 1 ? (int)src.step1() : 1; + + Point3d r; + if( depth == CV_32F ) + { + const float* sptr = src.ptr(); + r.x = sptr[0]; + r.y = sptr[sstep]; + r.z = sptr[sstep*2]; + } + else + { + const double* sptr = src.ptr(); + r.x = sptr[0]; + r.y = sptr[sstep]; + r.z = sptr[sstep*2]; + } + + double theta = norm(r); + if( theta < DBL_EPSILON ) + { + dst = Mat::eye(3, 3, depth); + if( jacobian.data ) + { + memset( J, 0, sizeof(J) ); + J[5] = J[15] = J[19] = -1; + J[7] = J[11] = J[21] = 1; + } + } + else + { + double c = std::cos(theta); + double s = std::sin(theta); + double c1 = 1. - c; + double itheta = theta ? 1./theta : 0.; + + r *= itheta; + + Matx33d rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z ); + Matx33d r_x( 0, -r.z, r.y, + r.z, 0, -r.x, + -r.y, r.x, 0 ); + + // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x] + Matx33d R = c*Matx33d::eye() + c1*rrt + s*r_x; + Mat(R).convertTo(dst, depth); + + if( jacobian.data ) + { + const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }; + double drrt[] = { r.x+r.x, r.y, r.z, r.y, 0, 0, r.z, 0, 0, + 0, r.x, 0, r.x, r.y+r.y, r.z, 0, r.z, 0, + 0, 0, r.x, 0, 0, r.y, r.x, r.y, r.z+r.z }; + double d_r_x_[] = { 0, 0, 0, 0, 0, -1, 0, 1, 0, + 0, 0, 1, 0, 0, 0, -1, 0, 0, + 0, -1, 0, 1, 0, 0, 0, 0, 0 }; + for( int i = 0; i < 3; i++ ) + { + double ri = i == 0 ? r.x : i == 1 ? r.y : r.z; + double a0 = -s*ri, a1 = (s - 2*c1*itheta)*ri, a2 = c1*itheta; + double a3 = (c - s*itheta)*ri, a4 = s*itheta; + for( int k = 0; k < 9; k++ ) + J[i*9+k] = a0*I[k] + a1*rrt.val[k] + a2*drrt[i*9+k] + + a3*r_x.val[k] + a4*d_r_x_[i*9+k]; + } + } + } + } + else + { + Matx33d U, Vt; + Vec3d W; + double theta, s, c; + int dstep = dst.rows > 1 ? (int)dst.step1() : 1; + + Matx33d R; + src.convertTo(R, CV_64F); + + if( !checkRange(R, true, NULL, -100, 100) ) + { + dst.setTo(0); + if (jacobian.data) + jacobian.setTo(0); + return; + } + + SVD::compute(R, W, U, Vt); + R = U*Vt; + + Point3d r(R(2, 1) - R(1, 2), R(0, 2) - R(2, 0), R(1, 0) - R(0, 1)); + + s = std::sqrt((r.x*r.x + r.y*r.y + r.z*r.z)*0.25); + c = (R(0, 0) + R(1, 1) + R(2, 2) - 1)*0.5; + c = c > 1. ? 1. : c < -1. ? -1. : c; + theta = std::acos(c); + + if( s < 1e-5 ) + { + double t; + + if( c > 0 ) + r = Point3d(0, 0, 0); + else + { + t = (R(0, 0) + 1)*0.5; + r.x = std::sqrt(MAX(t,0.)); + t = (R(1, 1) + 1)*0.5; + r.y = std::sqrt(MAX(t,0.))*(R(0, 1) < 0 ? -1. : 1.); + t = (R(2, 2) + 1)*0.5; + r.z = std::sqrt(MAX(t,0.))*(R(0, 2) < 0 ? -1. : 1.); + if( fabs(r.x) < fabs(r.y) && fabs(r.x) < fabs(r.z) && (R(1, 2) > 0) != (r.y*r.z > 0) ) + r.z = -r.z; + theta /= norm(r); + r *= theta; + } + + if( jacobian.data ) + { + memset( J, 0, sizeof(J) ); + if( c > 0 ) + { + J[5] = J[15] = J[19] = -0.5; + J[7] = J[11] = J[21] = 0.5; + } + } + } + else + { + double vth = 1/(2*s); + + if( jacobian.data ) + { + double t, dtheta_dtr = -1./s; + // var1 = [vth;theta] + // var = [om1;var1] = [om1;vth;theta] + double dvth_dtheta = -vth*c/s; + double d1 = 0.5*dvth_dtheta*dtheta_dtr; + double d2 = 0.5*dtheta_dtr; + // dvar1/dR = dvar1/dtheta*dtheta/dR = [dvth/dtheta; 1] * dtheta/dtr * dtr/dR + double dvardR[5*9] = + { + 0, 0, 0, 0, 0, 1, 0, -1, 0, + 0, 0, -1, 0, 0, 0, 1, 0, 0, + 0, 1, 0, -1, 0, 0, 0, 0, 0, + d1, 0, 0, 0, d1, 0, 0, 0, d1, + d2, 0, 0, 0, d2, 0, 0, 0, d2 + }; + // var2 = [om;theta] + double dvar2dvar[] = + { + vth, 0, 0, r.x, 0, + 0, vth, 0, r.y, 0, + 0, 0, vth, r.z, 0, + 0, 0, 0, 0, 1 + }; + double domegadvar2[] = + { + theta, 0, 0, r.x*vth, + 0, theta, 0, r.y*vth, + 0, 0, theta, r.z*vth + }; + + Mat _dvardR( 5, 9, CV_64FC1, dvardR ); + Mat _dvar2dvar( 4, 5, CV_64FC1, dvar2dvar ); + Mat _domegadvar2( 3, 4, CV_64FC1, domegadvar2 ); + double t0[3*5]; + Mat _t0( 3, 5, CV_64FC1, t0 ); + + gemm(_domegadvar2, _dvar2dvar, 1, noArray(), 0, _t0); + gemm(_t0, _dvardR, 1, noArray(), 0, matJ); + CV_Assert(matJ.ptr() == J); + + // transpose every row of matJ (treat the rows as 3x3 matrices) + CV_SWAP(J[1], J[3], t); CV_SWAP(J[2], J[6], t); CV_SWAP(J[5], J[7], t); + CV_SWAP(J[10], J[12], t); CV_SWAP(J[11], J[15], t); CV_SWAP(J[14], J[16], t); + CV_SWAP(J[19], J[21], t); CV_SWAP(J[20], J[24], t); CV_SWAP(J[23], J[25], t); + } + + vth *= theta; + r *= vth; + } + + if( depth == CV_32F ) + { + float* dptr = dst.ptr(); + dptr[0] = (float)r.x; + dptr[dstep] = (float)r.y; + dptr[dstep*2] = (float)r.z; + } + else + { + double* dptr = dst.ptr(); + dptr[0] = r.x; + dptr[dstep] = r.y; + dptr[dstep*2] = r.z; + } + } + + if( jacobian.data ) + { + if( depth == CV_32F ) + { + if( jacobian.rows == matJ.rows ) + matJ.convertTo(jacobian, CV_32F); + else + { + float Jf[3*9]; + Mat _Jf( matJ.rows, matJ.cols, CV_32FC1, Jf ); + matJ.convertTo(_Jf, CV_32F); + transpose(_Jf, jacobian); + } + } + else if( jacobian.rows == matJ.rows ) + matJ.copyTo(jacobian); + else + transpose(matJ, jacobian); + } +} + +// reimplementation of compose_motion.m +void cv::composeRT( InputArray _rvec1, InputArray _tvec1, + InputArray _rvec2, InputArray _tvec2, + OutputArray _rvec3, OutputArray _tvec3, + OutputArray _dr3dr1, OutputArray _dr3dt1, + OutputArray _dr3dr2, OutputArray _dr3dt2, + OutputArray _dt3dr1, OutputArray _dt3dt1, + OutputArray _dt3dr2, OutputArray _dt3dt2 ) +{ + Mat rvec1 = _rvec1.getMat(), tvec1 = _tvec1.getMat(); + Mat rvec2 = _rvec2.getMat(), tvec2 = _tvec2.getMat(); + int rtype = rvec1.type(); + + CV_Assert(rtype == CV_32F || rtype == CV_64F); + Size rsz = rvec1.size(); + CV_Assert(rsz == Size(3, 1) || rsz == Size(1, 3)); + CV_Assert(rsz == rvec2.size() && rsz == tvec1.size() && rsz == tvec2.size()); + + Mat dr3dr1, dr3dt1, dr3dr2, dr3dt2; + Mat dt3dr1, dt3dt1, dt3dr2, dt3dt2; + if(_dr3dr1.needed()) { + _dr3dr1.create(3, 3, rtype); + dr3dr1 = _dr3dr1.getMat(); + } + if(_dr3dt1.needed()) { + _dr3dt1.create(3, 3, rtype); + dr3dt1 = _dr3dt1.getMat(); + } + if(_dr3dr2.needed()) { + _dr3dr2.create(3, 3, rtype); + dr3dr2 = _dr3dr2.getMat(); + } + if(_dr3dt2.needed()) { + _dr3dt2.create(3, 3, rtype); + dr3dt2 = _dr3dt2.getMat(); + } + if(_dt3dr1.needed()) { + _dt3dr1.create(3, 3, rtype); + dt3dr1 = _dt3dr1.getMat(); + } + if(_dt3dt1.needed()) { + _dt3dt1.create(3, 3, rtype); + dt3dt1 = _dt3dt1.getMat(); + } + if(_dt3dr2.needed()) { + _dt3dr2.create(3, 3, rtype); + dt3dr2 = _dt3dr2.getMat(); + } + if(_dt3dt2.needed()) { + _dt3dt2.create(3, 3, rtype); + dt3dt2 = _dt3dt2.getMat(); + } + + double _r1[3], _r2[3]; + double _R1[9], _d1[9*3], _R2[9], _d2[9*3]; + Mat r1(rsz,CV_64F,_r1), r2(rsz,CV_64F,_r2); + Mat R1(3,3,CV_64F,_R1), R2(3,3,CV_64F,_R2); + Mat dR1dr1(3,9,CV_64F,_d1), dR2dr2(3,9,CV_64F,_d2); + + rvec1.convertTo(r1, CV_64F); + rvec2.convertTo(r2, CV_64F); + + Rodrigues(r1, R1, dR1dr1); + Rodrigues(r2, R2, dR2dr2); + CV_Assert(dR1dr1.ptr() == _d1); + CV_Assert(dR2dr2.ptr() == _d2); + + double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3]; + double _W1[9*3], _W2[3*3]; + Mat r3(3,1,CV_64F,_r3), R3(3,3,CV_64F,_R3); + Mat dR3dR1(9,9,CV_64F,_dR3dR1), dR3dR2(9,9,CV_64F,_dR3dR2); + Mat dr3dR3(9,3,CV_64F,_dr3dR3); + Mat W1(3,9,CV_64F,_W1), W2(3,3,CV_64F,_W2); + + R3 = R2*R1; + matMulDeriv(R2, R1, dR3dR2, dR3dR1); + Rodrigues(R3, r3, dr3dR3); + CV_Assert(dr3dR3.ptr() == _dr3dR3); + + r3.convertTo(_rvec3, rtype); + + if( dr3dr1.data ) + { + gemm(dr3dR3, dR3dR1, 1, noArray(), 0, W1, GEMM_1_T); + gemm(W1, dR1dr1, 1, noArray(), 0, W2, GEMM_2_T); + W2.convertTo(dr3dr1, rtype); + } + + if( dr3dr2.data ) + { + gemm(dr3dR3, dR3dR2, 1, noArray(), 0, W1, GEMM_1_T); + gemm(W1, dR2dr2, 1, noArray(), 0, W2, GEMM_2_T); + W2.convertTo(dr3dr2, rtype); + } + + if( dr3dt1.data ) + dr3dt1.setTo(0); + if( dr3dt2.data ) + dr3dt2.setTo(0); + + double _t1[3], _t2[3], _t3[3], _dxdR2[3*9], _dxdt1[3*3], _W3[3*3]; + Mat t1(3,1,CV_64F,_t1), t2(3,1,CV_64F,_t2); + Mat t3(3,1,CV_64F,_t3); + Mat dxdR2(3, 9, CV_64F, _dxdR2); + Mat dxdt1(3, 3, CV_64F, _dxdt1); + Mat W3(3, 3, CV_64F, _W3); + + tvec1.convertTo(t1, CV_64F); + tvec2.convertTo(t2, CV_64F); + gemm(R2, t1, 1, t2, 1, t3); + t3.convertTo(_tvec3, rtype); + + if( dt3dr2.data || dt3dt1.data ) + { + matMulDeriv(R2, t1, dxdR2, dxdt1); + if( dt3dr2.data ) + { + gemm(dxdR2, dR2dr2, 1, noArray(), 0, W3, GEMM_2_T); + W3.convertTo(dt3dr2, rtype); + } + if( dt3dt1.data ) + dxdt1.convertTo(dt3dt1, rtype); + } + + if( dt3dt2.data ) + setIdentity(dt3dt2); + if( dt3dr1.data ) + dt3dr1.setTo(0); +} + +static const char* cvDistCoeffErr = + "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8, 8x1, 1x12, 12x1, 1x14 or 14x1 floating-point vector"; + +void cv::projectPoints( InputArray _objectPoints, + InputArray _rvec, InputArray _tvec, + InputArray _cameraMatrix, InputArray _distCoeffs, + OutputArray _imagePoints, OutputArray _dpdr, + OutputArray _dpdt, OutputArray _dpdf, + OutputArray _dpdc, OutputArray _dpdk, + OutputArray _dpdo, double aspectRatio) +{ + Mat _m, objectPoints = _objectPoints.getMat(); + Mat dpdr, dpdt, dpdc, dpdf, dpdk, dpdo; + + int i, j; + double R[9], dRdr[27], t[3], a[9], k[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}, fx, fy, cx, cy; + Matx33d matTilt = Matx33d::eye(); + Matx33d dMatTiltdTauX(0,0,0,0,0,0,0,-1,0); + Matx33d dMatTiltdTauY(0,0,0,0,0,0,1,0,0); + Mat matR( 3, 3, CV_64F, R ), _dRdr( 3, 9, CV_64F, dRdr ); + double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0; + double* dpdo_p = 0; + int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, dpdc_step = 0; + int dpdo_step = 0; + bool fixedAspectRatio = aspectRatio > FLT_EPSILON; + + int objpt_depth = objectPoints.depth(); + int objpt_cn = objectPoints.channels(); + int total = (int)(objectPoints.total()*objectPoints.channels()); + int count = total / 3; + if(total % 3 != 0) + { + //we have stopped support of homogeneous coordinates because it cause ambiguity in interpretation of the input data + CV_Error( cv::Error::StsBadArg, "Homogeneous coordinates are not supported" ); + } + count = total / 3; + CV_Assert(objpt_depth == CV_32F || objpt_depth == CV_64F); + CV_Assert((objectPoints.rows == 1 && objpt_cn == 3) || + (objectPoints.rows == count && objpt_cn*objectPoints.cols == 3) || + (objectPoints.rows == 3 && objpt_cn == 1 && objectPoints.cols == count)); + + if (objectPoints.rows == 3 && objectPoints.cols == count) { + Mat temp; + transpose(objectPoints, temp); + objectPoints = temp; + } + + CV_Assert( _imagePoints.needed() ); + _imagePoints.create(count, 1, CV_MAKETYPE(objpt_depth, 2), -1, true); + Mat ipoints = _imagePoints.getMat(); + + Mat rvec = _rvec.getMat(), tvec = _tvec.getMat(); + if(!((rvec.depth() == CV_32F || rvec.depth() == CV_64F) && + (rvec.size() == Size(3, 3) || + (rvec.rows == 1 && rvec.cols*rvec.channels() == 3) || + (rvec.rows == 3 && rvec.cols*rvec.channels() == 1)))) { + CV_Error(cv::Error::StsBadArg, "rvec must be 3x3 or 1x3 or 3x1 floating-point array"); + } + + if( rvec.size() == Size(3, 3) ) + { + rvec.convertTo(matR, CV_64F); + Vec3d rvec_d; + Rodrigues(matR, rvec_d); + Rodrigues(rvec_d, matR, _dRdr); + rvec.convertTo(matR, CV_64F); + } + else + { + double r[3]; + Mat _r(rvec.size(), CV_64FC(rvec.channels()), r); + rvec.convertTo(_r, CV_64F); + Rodrigues(_r, matR, _dRdr); + } + + if(!((tvec.depth() == CV_32F || tvec.depth() == CV_64F) && + ((tvec.rows == 1 && tvec.cols*tvec.channels() == 3) || + (tvec.rows == 3 && tvec.cols*tvec.channels() == 1)))) { + CV_Error(cv::Error::StsBadArg, "tvec must be 1x3 or 3x1 floating-point array"); + } + + Mat _t(tvec.size(), CV_64FC(tvec.channels()), t); + tvec.convertTo(_t, CV_64F); + + Mat cameraMatrix = _cameraMatrix.getMat(); + + if(cameraMatrix.size() != Size(3, 3) || cameraMatrix.channels() != 1) + CV_Error( cv::Error::StsBadArg, "Intrinsic parameters must be 3x3 floating-point matrix" ); + Mat _a(3, 3, CV_64F, a); + cameraMatrix.convertTo(_a, CV_64F); + + fx = a[0]; fy = a[4]; + cx = a[2]; cy = a[5]; + + if( fixedAspectRatio ) + fx = fy*aspectRatio; + + Mat distCoeffs = _distCoeffs.getMat(); + int ktotal = 0; + if( distCoeffs.data ) + { + int kcn = distCoeffs.channels(); + ktotal = (int)distCoeffs.total()*kcn; + if( (distCoeffs.rows != 1 && distCoeffs.cols != 1) || + (ktotal != 4 && ktotal != 5 && ktotal != 8 && ktotal != 12 && ktotal != 14)) + CV_Error( cv::Error::StsBadArg, cvDistCoeffErr ); + + Mat _k(distCoeffs.size(), CV_64FC(kcn), k); + distCoeffs.convertTo(_k, CV_64F); + if(k[12] != 0 || k[13] != 0) + detail::computeTiltProjectionMatrix(k[12], k[13], &matTilt, &dMatTiltdTauX, &dMatTiltdTauY); + } + + if( _dpdr.needed() ) + { + dpdr.create(count*2, 3, CV_64F); + dpdr_p = dpdr.ptr(); + dpdr_step = (int)dpdr.step1(); + } + if( _dpdt.needed() ) + { + dpdt.create(count*2, 3, CV_64F); + dpdt_p = dpdt.ptr(); + dpdt_step = (int)dpdt.step1(); + } + if( _dpdf.needed() ) + { + dpdf.create(count*2, 2, CV_64F); + dpdf_p = dpdf.ptr(); + dpdf_step = (int)dpdf.step1(); + } + if( _dpdc.needed() ) + { + dpdc.create(count*2, 2, CV_64F); + dpdc_p = dpdc.ptr(); + dpdc_step = (int)dpdc.step1(); + } + if( _dpdk.needed() ) + { + dpdk.create(count*2, ktotal, CV_64F); + dpdk_p = dpdk.ptr(); + dpdk_step = (int)dpdk.step1(); + } + if( _dpdo.needed() ) + { + dpdo = Mat::zeros(count*2, count*3, CV_64F); + dpdo_p = dpdo.ptr(); + dpdo_step = (int)dpdo.step1(); + } + + bool calc_derivatives = dpdr.data || dpdt.data || dpdf.data || + dpdc.data || dpdk.data || dpdo.data; + + if (!calc_derivatives) + { + if (objpt_depth == CV_32F && ipoints.type() == CV_32F) + { + float rtMatrix[12] = { (float)R[0], (float)R[1], (float)R[2], (float)t[0], + (float)R[3], (float)R[4], (float)R[5], (float)t[1], + (float)R[6], (float)R[7], (float)R[8], (float)t[2] }; + + cv_camera_intrinsics_pinhole_32f intr; + intr.fx = (float)fx; intr.fy = (float)fy; + intr.cx = (float)cx; intr.cy = (float)cy; + intr.amt_k = 0; intr.amt_p = 0; intr.amt_s = 0; intr.use_tau = false; + + switch (ktotal) + { + case 0: break; + case 4: // [k_1, k_2, p_1, p_2] + intr.amt_k = 2; intr.amt_p = 2; + break; + case 5: // [k_1, k_2, p_1, p_2, k_3] + intr.amt_k = 3; intr.amt_p = 2; + break; + case 8: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6] + intr.amt_k = 6; intr.amt_p = 2; + break; + case 12: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4] + intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; + break; + case 14: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4, tau_x, tau_y] + intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; intr.use_tau = true; + break; + default: + CV_Error(cv::Error::StsInternal, "Wrong number of distortion coefficients"); + } + + intr.k[0] = (float)k[0]; + intr.k[1] = (float)k[1]; + intr.k[2] = (float)k[4]; + intr.k[3] = (float)k[5]; + intr.k[4] = (float)k[6]; + intr.k[5] = (float)k[7]; + + intr.p[0] = (float)k[2]; + intr.p[1] = (float)k[3]; + + for (int ctr = 0; ctr < 4; ctr++) + { + intr.s[ctr] = (float)k[8+ctr]; + } + + intr.tau_x = (float)k[12]; + intr.tau_y = (float)k[13]; + + CALL_HAL(projectPoints, cv_hal_project_points_pinhole32f, + (float*)objectPoints.data, objectPoints.step, count, + (float*)ipoints.data, ipoints.step, rtMatrix, &intr); + } + + if (objpt_depth == CV_64F && ipoints.type() == CV_64F) + { + double rtMatrix[12] = { R[0], R[1], R[2], t[0], + R[3], R[4], R[5], t[1], + R[6], R[7], R[8], t[2] }; + + cv_camera_intrinsics_pinhole_64f intr; + intr.fx = fx; intr.fy = fy; + intr.cx = cx; intr.cy = cy; + intr.amt_k = 0; intr.amt_p = 0; intr.amt_s = 0; intr.use_tau = false; + + switch (ktotal) + { + case 0: break; + case 4: // [k_1, k_2, p_1, p_2] + intr.amt_k = 2; intr.amt_p = 2; + break; + case 5: // [k_1, k_2, p_1, p_2, k_3] + intr.amt_k = 3; intr.amt_p = 2; + break; + case 8: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6] + intr.amt_k = 6; intr.amt_p = 2; + break; + case 12: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4] + intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; + break; + case 14: // [k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6, s_1, s_2, s_3, s_4, tau_x, tau_y] + intr.amt_k = 6; intr.amt_p = 2; intr.amt_s = 4; intr.use_tau = true; + break; + default: + CV_Error(cv::Error::StsInternal, "Wrong number of distortion coefficients"); + } + + intr.k[0] = k[0]; + intr.k[1] = k[1]; + intr.k[2] = k[4]; + intr.k[3] = k[5]; + intr.k[4] = k[6]; + intr.k[5] = k[7]; + + intr.p[0] = k[2]; + intr.p[1] = k[3]; + + for (int ctr = 0; ctr < 4; ctr++) + { + intr.s[ctr] = k[8+ctr]; + } + + intr.tau_x = k[12]; + intr.tau_y = k[13]; + + CALL_HAL(projectPoints, cv_hal_project_points_pinhole64f, + (double*)objectPoints.data, objectPoints.step, count, + (double*)ipoints.data, ipoints.step, rtMatrix, &intr); + } + } + + Mat matM(objectPoints.size(), CV_64FC(objpt_cn)); + objectPoints.convertTo(matM, CV_64F); + ipoints.convertTo(_m, CV_64F); + const Point3d* M = matM.ptr(); + Point2d* m = _m.ptr(); + + for( i = 0; i < count; i++ ) + { + double X = M[i].x, Y = M[i].y, Z = M[i].z; + double x = R[0]*X + R[1]*Y + R[2]*Z + t[0]; + double y = R[3]*X + R[4]*Y + R[5]*Z + t[1]; + double z = R[6]*X + R[7]*Y + R[8]*Z + t[2]; + double r2, r4, r6, a1, a2, a3, cdist, icdist2; + double xd, yd, xd0, yd0, invProj; + Vec3d vecTilt; + Vec3d dVecTilt; + Matx22d dMatTilt; + Vec2d dXdYd; + + double z0 = z; + z = z ? 1./z : 1; + x *= z; y *= z; + + r2 = x*x + y*y; + r4 = r2*r2; + r6 = r4*r2; + a1 = 2*x*y; + a2 = r2 + 2*x*x; + a3 = r2 + 2*y*y; + cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6; + icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6); + xd0 = x*cdist*icdist2 + k[2]*a1 + k[3]*a2 + k[8]*r2+k[9]*r4; + yd0 = y*cdist*icdist2 + k[2]*a3 + k[3]*a1 + k[10]*r2+k[11]*r4; + + // additional distortion by projecting onto a tilt plane + vecTilt = matTilt*Vec3d(xd0, yd0, 1); + invProj = vecTilt(2) ? 1./vecTilt(2) : 1; + xd = invProj * vecTilt(0); + yd = invProj * vecTilt(1); + + m[i].x = xd*fx + cx; + m[i].y = yd*fy + cy; + + if( calc_derivatives ) + { + if( dpdc.data ) + { + dpdc_p[0] = 1; dpdc_p[1] = 0; // dp_xdc_x; dp_xdc_y + dpdc_p[dpdc_step] = 0; + dpdc_p[dpdc_step+1] = 1; + dpdc_p += dpdc_step*2; + } + + if( dpdf_p ) + { + if( fixedAspectRatio ) + { + dpdf_p[0] = 0; dpdf_p[1] = xd*aspectRatio; // dp_xdf_x; dp_xdf_y + dpdf_p[dpdf_step] = 0; + dpdf_p[dpdf_step+1] = yd; + } + else + { + dpdf_p[0] = xd; dpdf_p[1] = 0; + dpdf_p[dpdf_step] = 0; + dpdf_p[dpdf_step+1] = yd; + } + dpdf_p += dpdf_step*2; + } + for (int row = 0; row < 2; ++row) + for (int col = 0; col < 2; ++col) + dMatTilt(row,col) = matTilt(row,col)*vecTilt(2) - matTilt(2,col)*vecTilt(row); + double invProjSquare = (invProj*invProj); + dMatTilt *= invProjSquare; + if( dpdk_p ) + { + dXdYd = dMatTilt*Vec2d(x*icdist2*r2, y*icdist2*r2); + dpdk_p[0] = fx*dXdYd(0); + dpdk_p[dpdk_step] = fy*dXdYd(1); + dXdYd = dMatTilt*Vec2d(x*icdist2*r4, y*icdist2*r4); + dpdk_p[1] = fx*dXdYd(0); + dpdk_p[dpdk_step+1] = fy*dXdYd(1); + if( dpdk.cols > 2 ) + { + dXdYd = dMatTilt*Vec2d(a1, a3); + dpdk_p[2] = fx*dXdYd(0); + dpdk_p[dpdk_step+2] = fy*dXdYd(1); + dXdYd = dMatTilt*Vec2d(a2, a1); + dpdk_p[3] = fx*dXdYd(0); + dpdk_p[dpdk_step+3] = fy*dXdYd(1); + if( dpdk.cols > 4 ) + { + dXdYd = dMatTilt*Vec2d(x*icdist2*r6, y*icdist2*r6); + dpdk_p[4] = fx*dXdYd(0); + dpdk_p[dpdk_step+4] = fy*dXdYd(1); + + if( dpdk.cols > 5 ) + { + dXdYd = dMatTilt*Vec2d( + x*cdist*(-icdist2)*icdist2*r2, y*cdist*(-icdist2)*icdist2*r2); + dpdk_p[5] = fx*dXdYd(0); + dpdk_p[dpdk_step+5] = fy*dXdYd(1); + dXdYd = dMatTilt*Vec2d( + x*cdist*(-icdist2)*icdist2*r4, y*cdist*(-icdist2)*icdist2*r4); + dpdk_p[6] = fx*dXdYd(0); + dpdk_p[dpdk_step+6] = fy*dXdYd(1); + dXdYd = dMatTilt*Vec2d( + x*cdist*(-icdist2)*icdist2*r6, y*cdist*(-icdist2)*icdist2*r6); + dpdk_p[7] = fx*dXdYd(0); + dpdk_p[dpdk_step+7] = fy*dXdYd(1); + if( dpdk.cols > 8 ) + { + dXdYd = dMatTilt*Vec2d(r2, 0); + dpdk_p[8] = fx*dXdYd(0); //s1 + dpdk_p[dpdk_step+8] = fy*dXdYd(1); //s1 + dXdYd = dMatTilt*Vec2d(r4, 0); + dpdk_p[9] = fx*dXdYd(0); //s2 + dpdk_p[dpdk_step+9] = fy*dXdYd(1); //s2 + dXdYd = dMatTilt*Vec2d(0, r2); + dpdk_p[10] = fx*dXdYd(0);//s3 + dpdk_p[dpdk_step+10] = fy*dXdYd(1); //s3 + dXdYd = dMatTilt*Vec2d(0, r4); + dpdk_p[11] = fx*dXdYd(0);//s4 + dpdk_p[dpdk_step+11] = fy*dXdYd(1); //s4 + if( dpdk.cols > 12 ) + { + dVecTilt = dMatTiltdTauX * Vec3d(xd0, yd0, 1); + dpdk_p[12] = fx * invProjSquare * ( + dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0)); + dpdk_p[dpdk_step+12] = fy*invProjSquare * ( + dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1)); + dVecTilt = dMatTiltdTauY * Vec3d(xd0, yd0, 1); + dpdk_p[13] = fx * invProjSquare * ( + dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0)); + dpdk_p[dpdk_step+13] = fy * invProjSquare * ( + dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1)); + } + } + } + } + } + dpdk_p += dpdk_step*2; + } + + if( dpdt_p ) + { + double dxdt[] = { z, 0, -x*z }, dydt[] = { 0, z, -y*z }; + for( j = 0; j < 3; j++ ) + { + double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j]; + double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt; + double dicdist2_dt = -icdist2*icdist2*(k[5]*dr2dt + 2*k[6]*r2*dr2dt + 3*k[7]*r4*dr2dt); + double da1dt = 2*(x*dydt[j] + y*dxdt[j]); + double dmxdt = (dxdt[j]*cdist*icdist2 + x*dcdist_dt*icdist2 + x*cdist*dicdist2_dt + + k[2]*da1dt + k[3]*(dr2dt + 4*x*dxdt[j]) + k[8]*dr2dt + 2*r2*k[9]*dr2dt); + double dmydt = (dydt[j]*cdist*icdist2 + y*dcdist_dt*icdist2 + y*cdist*dicdist2_dt + + k[2]*(dr2dt + 4*y*dydt[j]) + k[3]*da1dt + k[10]*dr2dt + 2*r2*k[11]*dr2dt); + dXdYd = dMatTilt*Vec2d(dmxdt, dmydt); + dpdt_p[j] = fx*dXdYd(0); + dpdt_p[dpdt_step+j] = fy*dXdYd(1); + } + dpdt_p += dpdt_step*2; + } + + if( dpdr_p ) + { + double dx0dr[] = + { + X*dRdr[0] + Y*dRdr[1] + Z*dRdr[2], + X*dRdr[9] + Y*dRdr[10] + Z*dRdr[11], + X*dRdr[18] + Y*dRdr[19] + Z*dRdr[20] + }; + double dy0dr[] = + { + X*dRdr[3] + Y*dRdr[4] + Z*dRdr[5], + X*dRdr[12] + Y*dRdr[13] + Z*dRdr[14], + X*dRdr[21] + Y*dRdr[22] + Z*dRdr[23] + }; + double dz0dr[] = + { + X*dRdr[6] + Y*dRdr[7] + Z*dRdr[8], + X*dRdr[15] + Y*dRdr[16] + Z*dRdr[17], + X*dRdr[24] + Y*dRdr[25] + Z*dRdr[26] + }; + for( j = 0; j < 3; j++ ) + { + double dxdr = z*(dx0dr[j] - x*dz0dr[j]); + double dydr = z*(dy0dr[j] - y*dz0dr[j]); + double dr2dr = 2*x*dxdr + 2*y*dydr; + double dcdist_dr = (k[0] + 2*k[1]*r2 + 3*k[4]*r4)*dr2dr; + double dicdist2_dr = -icdist2*icdist2*(k[5] + 2*k[6]*r2 + 3*k[7]*r4)*dr2dr; + double da1dr = 2*(x*dydr + y*dxdr); + double dmxdr = (dxdr*cdist*icdist2 + x*dcdist_dr*icdist2 + x*cdist*dicdist2_dr + + k[2]*da1dr + k[3]*(dr2dr + 4*x*dxdr) + (k[8] + 2*r2*k[9])*dr2dr); + double dmydr = (dydr*cdist*icdist2 + y*dcdist_dr*icdist2 + y*cdist*dicdist2_dr + + k[2]*(dr2dr + 4*y*dydr) + k[3]*da1dr + (k[10] + 2*r2*k[11])*dr2dr); + dXdYd = dMatTilt*Vec2d(dmxdr, dmydr); + dpdr_p[j] = fx*dXdYd(0); + dpdr_p[dpdr_step+j] = fy*dXdYd(1); + } + dpdr_p += dpdr_step*2; + } + + if( dpdo_p ) + { + double dxdo[] = { z * ( R[0] - x * z * z0 * R[6] ), + z * ( R[1] - x * z * z0 * R[7] ), + z * ( R[2] - x * z * z0 * R[8] ) }; + double dydo[] = { z * ( R[3] - y * z * z0 * R[6] ), + z * ( R[4] - y * z * z0 * R[7] ), + z * ( R[5] - y * z * z0 * R[8] ) }; + for( j = 0; j < 3; j++ ) + { + double dr2do = 2 * x * dxdo[j] + 2 * y * dydo[j]; + double dr4do = 2 * r2 * dr2do; + double dr6do = 3 * r4 * dr2do; + double da1do = 2 * y * dxdo[j] + 2 * x * dydo[j]; + double da2do = dr2do + 4 * x * dxdo[j]; + double da3do = dr2do + 4 * y * dydo[j]; + double dcdist_do + = k[0] * dr2do + k[1] * dr4do + k[4] * dr6do; + double dicdist2_do = -icdist2 * icdist2 + * ( k[5] * dr2do + k[6] * dr4do + k[7] * dr6do ); + double dxd0_do = cdist * icdist2 * dxdo[j] + + x * icdist2 * dcdist_do + x * cdist * dicdist2_do + + k[2] * da1do + k[3] * da2do + k[8] * dr2do + + k[9] * dr4do; + double dyd0_do = cdist * icdist2 * dydo[j] + + y * icdist2 * dcdist_do + y * cdist * dicdist2_do + + k[2] * da3do + k[3] * da1do + k[10] * dr2do + + k[11] * dr4do; + dXdYd = dMatTilt * Vec2d( dxd0_do, dyd0_do ); + dpdo_p[i * 3 + j] = fx * dXdYd( 0 ); + dpdo_p[dpdo_step + i * 3 + j] = fy * dXdYd( 1 ); + } + dpdo_p += dpdo_step * 2; + } + } + } + + _m.convertTo(_imagePoints, objpt_depth); + + int depth = CV_64F;//cameraMatrix.depth(); + if( _dpdr.needed() ) + dpdr.convertTo(_dpdr, depth); + + if( _dpdt.needed() ) + dpdt.convertTo(_dpdt, depth); + + if( _dpdf.needed() ) + dpdf.convertTo(_dpdf, depth); + + if( _dpdc.needed() ) + dpdc.convertTo(_dpdc, depth); + + if( _dpdk.needed() ) + dpdk.convertTo(_dpdk, depth); + + if( _dpdo.needed() ) + dpdo.convertTo(_dpdo, depth); +} + +cv::Vec3d cv::RQDecomp3x3( InputArray _Marr, + OutputArray _Rarr, + OutputArray _Qarr, + OutputArray _Qx, + OutputArray _Qy, + OutputArray _Qz ) +{ + CV_INSTRUMENT_REGION(); + + Matx33d M, Q; + double z, c, s; + Mat Mmat = _Marr.getMat(); + int depth = Mmat.depth(); + Mmat.convertTo(M, CV_64F); + + /* Find Givens rotation Q_x for x axis (left multiplication). */ + /* + ( 1 0 0 ) + Qx = ( 0 c s ), c = m33/sqrt(m32^2 + m33^2), s = m32/sqrt(m32^2 + m33^2) + ( 0 -s c ) + */ + s = std::abs(M(2, 1)) > DBL_EPSILON ? M(2, 1): 0.; + c = std::abs(M(2, 1)) > DBL_EPSILON ? M(2, 2): 1.; + z = 1./std::sqrt(c * c + s * s); + c *= z; + s *= z; + + Matx33d Qx(1, 0, 0, 0, c, s, 0, -s, c); + Matx33d R = M*Qx; + + assert(fabs(R(2, 1)) < FLT_EPSILON); + R(2, 1) = 0; + + /* Find Givens rotation for y axis. */ + /* + ( c 0 -s ) + Qy = ( 0 1 0 ), c = m33/sqrt(m31^2 + m33^2), s = -m31/sqrt(m31^2 + m33^2) + ( s 0 c ) + */ + s = std::abs(R(2, 0)) > DBL_EPSILON ? -R(2, 0): 0.; + c = std::abs(R(2, 0)) > DBL_EPSILON ? R(2, 2): 1.; + z = 1./std::sqrt(c * c + s * s); + c *= z; + s *= z; + + Matx33d Qy(c, 0, -s, 0, 1, 0, s, 0, c); + M = R*Qy; + + CV_Assert(fabs(M(2, 0)) < FLT_EPSILON); + M(2, 0) = 0; + + /* Find Givens rotation for z axis. */ + /* + ( c s 0 ) + Qz = (-s c 0 ), c = m22/sqrt(m21^2 + m22^2), s = m21/sqrt(m21^2 + m22^2) + ( 0 0 1 ) + */ + + s = std::abs(M(1, 0)) > DBL_EPSILON ? M(1, 0): 0.; + c = std::abs(M(1, 0)) > DBL_EPSILON ? M(1, 1): 1.; + z = 1./std::sqrt(c * c + s * s); + c *= z; + s *= z; + + Matx33d Qz(c, s, 0, -s, c, 0, 0, 0, 1); + R = M*Qz; + + CV_Assert(fabs(R(1, 0)) < FLT_EPSILON); + R(1, 0) = 0; + + // Solve the decomposition ambiguity. + // Diagonal entries of R, except the last one, shall be positive. + // Further rotate R by 180 degree if necessary + if( R(0, 0) < 0 ) + { + if( R(1, 1) < 0 ) + { + // rotate around z for 180 degree, i.e. a rotation matrix of + // [-1, 0, 0], + // [ 0, -1, 0], + // [ 0, 0, 1] + R(0, 0) *= -1; + R(0, 1) *= -1; + R(1, 1) *= -1; + + Qz(0, 0) *= -1; + Qz(0, 1) *= -1; + Qz(1, 0) *= -1; + Qz(1, 1) *= -1; + } + else + { + // rotate around y for 180 degree, i.e. a rotation matrix of + // [-1, 0, 0], + // [ 0, 1, 0], + // [ 0, 0, -1] + R(0, 0) *= -1; + R(0, 2) *= -1; + R(1, 2) *= -1; + R(2, 2) *= -1; + + Qz = Qz.t(); + + Qy(0, 0) *= -1; + Qy(0, 2) *= -1; + Qy(2, 0) *= -1; + Qy(2, 2) *= -1; + } + } + else if( R(1, 1) < 0 ) + { + // ??? for some reason, we never get here ??? + + // rotate around x for 180 degree, i.e. a rotation matrix of + // [ 1, 0, 0], + // [ 0, -1, 0], + // [ 0, 0, -1] + R(0, 1) *= -1; + R(0, 2) *= -1; + R(1, 1) *= -1; + R(1, 2) *= -1; + R(2, 2) *= -1; + + Qz = Qz.t(); + Qy = Qy.t(); + + Qx(1, 1) *= -1; + Qx(1, 2) *= -1; + Qx(2, 1) *= -1; + Qx(2, 2) *= -1; + } + + // calculate the euler angle + Vec3d eulerAngles( + std::acos(Qx(1, 1)) * (Qx(1, 2) >= 0 ? 1 : -1) * (180.0 / CV_PI), + std::acos(Qy(0, 0)) * (Qy(2, 0) >= 0 ? 1 : -1) * (180.0 / CV_PI), + std::acos(Qz(0, 0)) * (Qz(0, 1) >= 0 ? 1 : -1) * (180.0 / CV_PI)); + + /* Calculate orthogonal matrix. */ + /* + Q = QzT * QyT * QxT + */ + M = Qz.t()*Qy.t(); + Q = M*Qx.t(); + + /* Save R and Q matrices. */ + Mat(R).convertTo(_Rarr, depth); + Mat(Q).convertTo(_Qarr, depth); + + if(_Qx.needed()) + Mat(Qx).convertTo(_Qx, depth); + if(_Qy.needed()) + Mat(Qy).convertTo(_Qy, depth); + if(_Qz.needed()) + Mat(Qz).convertTo(_Qz, depth); + return eulerAngles; +} + +void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraMatrix, + OutputArray _rotMatrix, OutputArray _transVect, + OutputArray _rotMatrixX, OutputArray _rotMatrixY, + OutputArray _rotMatrixZ, OutputArray _eulerAngles ) +{ + CV_INSTRUMENT_REGION(); + + Mat projMatrix = _projMatrix.getMat(); + int depth = projMatrix.depth(); + Matx34d P; + projMatrix.convertTo(P, CV_64F); + Matx44d Px(P(0, 0), P(0, 1), P(0, 2), P(0, 3), + P(1, 0), P(1, 1), P(1, 2), P(1, 3), + P(2, 0), P(2, 1), P(2, 2), P(2, 3), + 0, 0, 0, 0), U, Vt; + Matx41d W; + SVDecomp(Px, W, U, Vt, SVD::MODIFY_A); + Vec4d t(Vt(3, 0), Vt(3, 1), Vt(3, 2), Vt(3, 3)); + Matx33d M(P(0, 0), P(0, 1), P(0, 2), + P(1, 0), P(1, 1), P(1, 2), + P(2, 0), P(2, 1), P(2, 2)); + Mat(t).convertTo(_transVect, depth); + Vec3d eulerAngles = RQDecomp3x3(M, _cameraMatrix, _rotMatrix, _rotMatrixX, _rotMatrixY, _rotMatrixZ); + if (_eulerAngles.needed()) + Mat(eulerAngles).convertTo(_eulerAngles, depth); +} + +void cv::findExtrinsicCameraParams2( const Mat& objectPoints, + const Mat& imagePoints, const Mat& A, + const Mat& distCoeffs, Mat& rvec, Mat& tvec, + int useExtrinsicGuess ) +{ + const int max_iter = 20; + Mat matM, _m, _mn; + + int i, count; + double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9]; + double MM[9] = { 0 }, U[9] = { 0 }, V[9] = { 0 }, W[3] = { 0 }; + double param[6] = { 0 }; + Mat matA( 3, 3, CV_64F, a ); + Mat _Ar( 3, 3, CV_64F, ar ); + Mat matR( 3, 3, CV_64F, R ); + Mat _r( 3, 1, CV_64F, param ); + Mat _t( 3, 1, CV_64F, param + 3 ); + Mat _MM( 3, 3, CV_64F, MM ); + Mat matU( 3, 3, CV_64F, U ); + Mat matV( 3, 3, CV_64F, V ); + Mat matW( 3, 1, CV_64F, W ); + Mat _param( 6, 1, CV_64F, param ); + Mat _dpdr, _dpdt; + + count = MAX(objectPoints.cols, objectPoints.rows); + if (objectPoints.checkVector(3) > 0) + objectPoints.convertTo(matM, CV_64F); + else { + convertPointsFromHomogeneous(objectPoints, matM); + matM.convertTo(matM, CV_64F); + } + if (imagePoints.checkVector(2) > 0) + imagePoints.convertTo(_m, CV_64F); + else { + convertPointsFromHomogeneous(imagePoints, _m); + _m.convertTo(_m, CV_64F); + } + A.convertTo(matA, CV_64F); + + CV_Assert((count >= 4) || (count == 3 && useExtrinsicGuess)); // it is unsafe to call LM optimisation without an extrinsic guess in the case of 3 points. This is because there is no guarantee that it will converge on the correct solution. + + // normalize image points + // (unapply the intrinsic matrix transformation and distortion) + undistortPoints(_m, _mn, matA, distCoeffs, Mat(), _Ar); + + if( useExtrinsicGuess ) + { + CV_Assert((rvec.rows == 1 || rvec.cols == 1) && rvec.total()*rvec.channels() == 3); + CV_Assert((tvec.rows == 1 || tvec.cols == 1) && tvec.total()*tvec.channels() == 3); + Mat _r_temp(rvec.rows, rvec.cols, CV_MAKETYPE(CV_64F,rvec.channels()), param); + Mat _t_temp(tvec.rows, tvec.cols, CV_MAKETYPE(CV_64F,tvec.channels()), param + 3); + rvec.convertTo(_r_temp, CV_64F); + tvec.convertTo(_t_temp, CV_64F); + } + else + { + Scalar Mc = mean(matM); + Mat _Mc( 1, 3, CV_64F, Mc.val ); + + matM = matM.reshape(1, count); + mulTransposed(matM, _MM, true, _Mc); + SVDecomp(_MM, matW, noArray(), matV, SVD::MODIFY_A); + CV_Assert(matW.ptr() == W); + CV_Assert(matV.ptr() == V); + + // initialize extrinsic parameters + if( W[2]/W[1] < 1e-3) + { + // a planar structure case (all M's lie in the same plane) + double tt[3]; + Mat R_transform = matV; + Mat T_transform( 3, 1, CV_64F, tt ); + + if( V[2]*V[2] + V[5]*V[5] < 1e-10 ) + R_transform = Mat::eye(3, 3, CV_64F); + + if( determinant(R_transform) < 0 ) + R_transform *= -1.; + + gemm( R_transform, _Mc, -1, Mat(), 0, T_transform, GEMM_2_T ); + + const double* Rp = R_transform.ptr(); + const double* Tp = T_transform.ptr(); + + Mat _Mxy(count, 1, CV_64FC2); + const double* src = (double*)matM.data; + double* dst = (double*)_Mxy.data; + + for( i = 0; i < count; i++, src += 3, dst += 2 ) + { + dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0]; + dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1]; + } + + Mat matH = findHomography(_Mxy, _mn); + + if( checkRange(matH, true)) + { + Mat _h1 = matH.col(0); + Mat _h2 = matH.col(1); + Mat _h3 = matH.col(2); + double* h = matH.ptr(); + CV_Assert(matH.isContinuous()); + double h1_norm = std::sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]); + double h2_norm = std::sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]); + + _h1 *= 1./MAX(h1_norm, DBL_EPSILON); + _h2 *= 1./MAX(h2_norm, DBL_EPSILON); + _t = _h3 * (2./MAX(h1_norm + h2_norm, DBL_EPSILON)); + _h1.cross(_h2).copyTo(_h3); + + Rodrigues( matH, _r ); + Rodrigues( _r, matH ); + _t += matH*T_transform; + matR = matH * R_transform; + } + else + { + setIdentity(matR); + _t.setTo(0); + } + + Rodrigues( matR, _r ); + } + else + { + // non-planar structure. Use DLT method + CV_CheckGE(count, 6, "DLT algorithm needs at least 6 points for pose estimation from 3D-2D point correspondences."); + double LL[12*12], LW[12], LV[12*12], sc; + Mat _LL( 12, 12, CV_64F, LL ); + Mat _LW( 12, 1, CV_64F, LW ); + Mat _LV( 12, 12, CV_64F, LV ); + Point3d* M = (Point3d*)matM.data; + Point2d* mn = (Point2d*)_mn.data; + + Mat matL(2*count, 12, CV_64F); + double* L = matL.ptr(); + + for( i = 0; i < count; i++, L += 24 ) + { + double x = -mn[i].x, y = -mn[i].y; + L[0] = L[16] = M[i].x; + L[1] = L[17] = M[i].y; + L[2] = L[18] = M[i].z; + L[3] = L[19] = 1.; + L[4] = L[5] = L[6] = L[7] = 0.; + L[12] = L[13] = L[14] = L[15] = 0.; + L[8] = x*M[i].x; + L[9] = x*M[i].y; + L[10] = x*M[i].z; + L[11] = x; + L[20] = y*M[i].x; + L[21] = y*M[i].y; + L[22] = y*M[i].z; + L[23] = y; + } + + mulTransposed( matL, _LL, true ); + SVDecomp( _LL, _LW, noArray(), _LV, SVD::MODIFY_A ); + Mat _RRt( 3, 4, CV_64F, LV + 11*12 ); + Mat _RR = _RRt.colRange(0, 3); + Mat _tt = _RRt.col(3); + if( determinant(_RR) < 0 ) + _RRt *= -1.0; + sc = norm(_RR, NORM_L2); + CV_Assert(fabs(sc) > DBL_EPSILON); + SVDecomp(_RR, matW, matU, matV, SVD::MODIFY_A); + matR = matU*matV; + _tt.convertTo(_t, CV_64F, norm(matR, NORM_L2)/sc); + Rodrigues(matR, _r); + } + } + + matM = matM.reshape(3, 1); + _mn = _mn.reshape(2, 1); + + // refine extrinsic parameters using iterative algorithm +#if 0 + // The C++ LMSolver is not as good as CvLevMarq to pass the tests, maybe due to _completeSymmFlag in CvLevMarq. + class RefineLMCallback CV_FINAL : public LMSolver::Callback + { + public: + RefineLMCallback(const Mat &matM, const Mat &_m, const Mat &matA, const Mat& distCoeffs) : matM_(matM), _m_(_m), matA_(matA), distCoeffs_(distCoeffs) { + } + bool compute(InputArray param_, OutputArray _err, OutputArray _Jac) const CV_OVERRIDE + { + const Mat& objpt = matM_; + const Mat& imgpt = _m_; + const Mat& cameraMatrix = matA_; + Mat x = param_.getMat(); + CV_Assert((x.cols == 1 || x.rows == 1) && x.total() == 6 && x.type() == CV_64F); + double* pdata = x.ptr(); + Mat rv(3, 1, CV_64F, pdata); + Mat tv(3, 1, CV_64F, pdata + 3); + int errCount = objpt.rows + objpt.cols - 1; + _err.create(errCount * 2, 1, CV_64F); + Mat err = _err.getMat(); + err = err.reshape(2, errCount); + if (_Jac.needed()) + { + _Jac.create(errCount * 2, 6, CV_64F); + Mat Jac = _Jac.getMat(); + Mat dpdr = Jac.colRange(0, 3); + Mat dpdt = Jac.colRange(3, 6); + projectPoints(objpt, rv, tv, cameraMatrix, distCoeffs_, + err, dpdr, dpdt, noArray(), noArray(), noArray(), noArray()); + } + else + { + projectPoints(objpt, rv, tv, cameraMatrix, distCoeffs_, err); + } + err = err - (imgpt.rows == 1 ? imgpt.t() : imgpt); + err = err.reshape(1, 2 * errCount); + return true; + }; + private: + const Mat &matM_, &_m_, &matA_, &distCoeffs_; + }; + + LMSolver::create(makePtr(matM, _m, matA, distCoeffs), max_iter, FLT_EPSILON)->run(_param); +#else + CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true); + _param.copyTo(cvarrToMat(solver.param)); + + for(;;) + { + CvMat *matJ = 0, *_err = 0; + const CvMat *__param = 0; + bool proceed = solver.update( __param, matJ, _err ); + cvarrToMat(__param).copyTo(_param ); + if( !proceed || !_err ) + break; + int errCount = matM.rows + matM.cols - 1; + Mat err = cvarrToMat(_err); + err = err.reshape(2, errCount); + if( matJ ) + { + Mat Jac = cvarrToMat(matJ); + Mat dpdr = Jac.colRange(0, 3); + Mat dpdt = Jac.colRange(3, 6); + projectPoints(matM, _r, _t, matA, distCoeffs, + err, dpdr, dpdt, noArray(), noArray(), noArray(), noArray()); + } + else + { + projectPoints(matM, _r, _t, matA, distCoeffs, err); + } + subtract(err, _m.rows == 1 ? _m.t() : _m, err); + cvReshape( _err, _err, 1, 2*count ); + } + cvarrToMat(solver.param).copyTo(_param ); +#endif + + _param.rowRange(0, 3).convertTo(rvec, rvec.depth()); + _param.rowRange(3, 6).convertTo(tvec, tvec.depth()); +} + +void cv::projectPoints( InputArray _opoints, + InputArray _rvec, + InputArray _tvec, + InputArray _cameraMatrix, + InputArray _distCoeffs, + OutputArray _ipoints, + OutputArray _jacobian, + double aspectRatio ) +{ + Mat opoints = _opoints.getMat(); + int npoints = opoints.checkVector(3), depth = opoints.depth(); + if (npoints < 0) + opoints = opoints.t(); + npoints = opoints.checkVector(3); + CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F)); + + if (opoints.cols == 3) + opoints = opoints.reshape(3); + + CV_Assert( _ipoints.needed() ); + + double dc0buf[5]={0}; + Mat dc0(5,1,CV_64F,dc0buf); + Mat distCoeffs = _distCoeffs.getMat(); + if( distCoeffs.empty() ) + distCoeffs = dc0; + int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1; + + if( _jacobian.needed() ) + { + _jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F); + Mat jacobian = _jacobian.getMat(); + Mat dpdr = jacobian.colRange(0, 3); + Mat dpdt = jacobian.colRange(3, 6); + Mat dpdf = jacobian.colRange(6, 8); + Mat dpdc = jacobian.colRange(8, 10); + Mat dpdk = jacobian.colRange(10, 10+ndistCoeffs); + + projectPoints(opoints, _rvec, _tvec, _cameraMatrix, distCoeffs, _ipoints, + dpdr, dpdt, dpdf, dpdc, dpdk, noArray(), aspectRatio); + } + else + { + projectPoints(opoints, _rvec, _tvec, _cameraMatrix, distCoeffs, _ipoints, + noArray(), noArray(), noArray(), noArray(), noArray(), noArray(), aspectRatio); + } +} + +static void getUndistortRectangles(InputArray _cameraMatrix, InputArray _distCoeffs, + InputArray R, InputArray newCameraMatrix, Size imgSize, + Rect_& inner, Rect_& outer ) +{ + const int N = 9; + int x, y, k; + Mat _pts(1, N*N, CV_64FC2); + Point2d* pts = _pts.ptr(); + + for( y = k = 0; y < N; y++ ) + for( x = 0; x < N; x++ ) + pts[k++] = Point2d((double)x*(imgSize.width-1)/(N-1), (double)y*(imgSize.height-1)/(N-1)); + + undistortPoints(_pts, _pts, _cameraMatrix, _distCoeffs, R, newCameraMatrix); + + double iX0=-FLT_MAX, iX1=FLT_MAX, iY0=-FLT_MAX, iY1=FLT_MAX; + double oX0=FLT_MAX, oX1=-FLT_MAX, oY0=FLT_MAX, oY1=-FLT_MAX; + // find the inscribed rectangle. + // the code will likely not work with extreme rotation matrices (R) (>45%) + for( y = k = 0; y < N; y++ ) + for( x = 0; x < N; x++ ) + { + Point2d p = pts[k++]; + oX0 = MIN(oX0, p.x); + oX1 = MAX(oX1, p.x); + oY0 = MIN(oY0, p.y); + oY1 = MAX(oY1, p.y); + + if( x == 0 ) + iX0 = MAX(iX0, p.x); + if( x == N-1 ) + iX1 = MIN(iX1, p.x); + if( y == 0 ) + iY0 = MAX(iY0, p.y); + if( y == N-1 ) + iY1 = MIN(iY1, p.y); + } + inner = Rect_(iX0, iY0, iX1-iX0, iY1-iY0); + outer = Rect_(oX0, oY0, oX1-oX0, oY1-oY0); +} + +cv::Mat cv::getOptimalNewCameraMatrix( InputArray _cameraMatrix, InputArray _distCoeffs, + Size imgSize, double alpha, Size newImgSize, + Rect* validPixROI, bool centerPrincipalPoint ) +{ + Rect_ inner, outer; + newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imgSize; + + Mat cameraMatrix = _cameraMatrix.getMat(), M; + cameraMatrix.convertTo(M, CV_64F); + CV_Assert(M.isContinuous()); + + if( centerPrincipalPoint ) + { + double cx0 = M.at(0, 2); + double cy0 = M.at(1, 2); + double cx = (newImgSize.width-1)*0.5; + double cy = (newImgSize.height-1)*0.5; + + getUndistortRectangles( _cameraMatrix, _distCoeffs, Mat(), cameraMatrix, imgSize, inner, outer ); + double s0 = std::max(std::max(std::max((double)cx/(cx0 - inner.x), (double)cy/(cy0 - inner.y)), + (double)cx/(inner.x + inner.width - cx0)), + (double)cy/(inner.y + inner.height - cy0)); + double s1 = std::min(std::min(std::min((double)cx/(cx0 - outer.x), (double)cy/(cy0 - outer.y)), + (double)cx/(outer.x + outer.width - cx0)), + (double)cy/(outer.y + outer.height - cy0)); + double s = s0*(1 - alpha) + s1*alpha; + + M.at(0, 0) *= s; + M.at(1, 1) *= s; + M.at(0, 2) = cx; + M.at(1, 2) = cy; + + if( validPixROI ) + { + inner = cv::Rect_((double)((inner.x - cx0)*s + cx), + (double)((inner.y - cy0)*s + cy), + (double)(inner.width*s), + (double)(inner.height*s)); + Rect r(cvCeil(inner.x), cvCeil(inner.y), cvFloor(inner.width), cvFloor(inner.height)); + r &= Rect(0, 0, newImgSize.width, newImgSize.height); + *validPixROI = r; + } + } + else + { + // Get inscribed and circumscribed rectangles in normalized + // (independent of camera matrix) coordinates + getUndistortRectangles( _cameraMatrix, _distCoeffs, Mat(), Mat(), imgSize, inner, outer ); + + // Projection mapping inner rectangle to viewport + double fx0 = (newImgSize.width - 1) / inner.width; + double fy0 = (newImgSize.height - 1) / inner.height; + double cx0 = -fx0 * inner.x; + double cy0 = -fy0 * inner.y; + + // Projection mapping outer rectangle to viewport + double fx1 = (newImgSize.width - 1) / outer.width; + double fy1 = (newImgSize.height - 1) / outer.height; + double cx1 = -fx1 * outer.x; + double cy1 = -fy1 * outer.y; + + // Interpolate between the two optimal projections + M.at(0, 0) = fx0*(1 - alpha) + fx1*alpha; + M.at(1, 1) = fy0*(1 - alpha) + fy1*alpha; + M.at(0, 2) = cx0*(1 - alpha) + cx1*alpha; + M.at(1, 2) = cy0*(1 - alpha) + cy1*alpha; + + if( validPixROI ) + { + getUndistortRectangles( _cameraMatrix, _distCoeffs, Mat(), M, imgSize, inner, outer ); + Rect r = inner; + r &= Rect(0, 0, newImgSize.width, newImgSize.height); + *validPixROI = r; + } + } + M.convertTo(M, cameraMatrix.type()); + + return M; +} + +/* End of file. */ diff --git a/modules/calib3d/src/checkchessboard.cpp b/modules/calib3d/src/checkchessboard.cpp index 350614e78f..2e867303cc 100644 --- a/modules/calib3d/src/checkchessboard.cpp +++ b/modules/calib3d/src/checkchessboard.cpp @@ -40,7 +40,6 @@ //M*/ #include "precomp.hpp" -#include "opencv2/imgproc/imgproc_c.h" #include "calib3d_c_api.h" #include diff --git a/modules/calib3d/src/precomp.hpp b/modules/calib3d/src/precomp.hpp index 8f598d6709..05811e0907 100644 --- a/modules/calib3d/src/precomp.hpp +++ b/modules/calib3d/src/precomp.hpp @@ -137,6 +137,19 @@ static inline bool haveCollinearPoints( const Mat& m, int count ) return false; } +void findExtrinsicCameraParams2( const Mat& objectPoints, + const Mat& imagePoints, const Mat& A, + const Mat& distCoeffs, Mat& rvec, Mat& tvec, + int useExtrinsicGuess ); + +void projectPoints( InputArray objectPoints, + InputArray rvec, InputArray tvec, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray imagePoints, OutputArray dpdr, + OutputArray dpdt, OutputArray dpdf=noArray(), + OutputArray dpdc=noArray(), OutputArray dpdk=noArray(), + OutputArray dpdo=noArray(), double aspectRatio=0.); + } // namespace cv int checkChessboardBinary(const cv::Mat & img, const cv::Size & size); diff --git a/modules/calib3d/src/solvepnp.cpp b/modules/calib3d/src/solvepnp.cpp index 771abff483..37cf0bb2db 100644 --- a/modules/calib3d/src/solvepnp.cpp +++ b/modules/calib3d/src/solvepnp.cpp @@ -48,7 +48,6 @@ #include "ap3p.h" #include "ippe.hpp" #include "sqpnp.hpp" -#include "calib3d_c_api.h" #include "usac.hpp" @@ -892,12 +891,7 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints, tvec.create(3, 1, CV_64FC1); } - CvMat c_objectPoints = cvMat(opoints), c_imagePoints = cvMat(ipoints); - CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs); - CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec); - cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix, - (c_distCoeffs.rows && c_distCoeffs.cols) ? &c_distCoeffs : 0, - &c_rvec, &c_tvec, useExtrinsicGuess ); + findExtrinsicCameraParams2(opoints, ipoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess); vec_rvecs.push_back(rvec); vec_tvecs.push_back(tvec);