Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2018-09-07 12:40:27 +03:00
commit 73bfe68821
141 changed files with 3074 additions and 4777 deletions

View File

@ -543,7 +543,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
featureEvaluator = _featureEvaluator;
max_c_count = MAX( 2, featureEvaluator->getMaxCatCount() );
_resp = featureEvaluator->getCls();
_resp = cvMat(featureEvaluator->getCls());
responses = &_resp;
// TODO: check responses: elements must be 0 or 1

View File

@ -2122,12 +2122,12 @@ CvBoost::train( const Mat& _train_data, int _tflag,
const Mat& _missing_mask,
CvBoostParams _params, bool _update )
{
train_data_hdr = _train_data;
train_data_hdr = cvMat(_train_data);
train_data_mat = _train_data;
responses_hdr = _responses;
responses_hdr = cvMat(_responses);
responses_mat = _responses;
CvMat vidx = _var_idx, sidx = _sample_idx, vtype = _var_type, mmask = _missing_mask;
CvMat vidx = cvMat(_var_idx), sidx = cvMat(_sample_idx), vtype = cvMat(_var_type), mmask = cvMat(_missing_mask);
return train(&train_data_hdr, _tflag, &responses_hdr, vidx.data.ptr ? &vidx : 0,
sidx.data.ptr ? &sidx : 0, vtype.data.ptr ? &vtype : 0,
@ -2138,7 +2138,7 @@ float
CvBoost::predict( const Mat& _sample, const Mat& _missing,
const Range& slice, bool raw_mode, bool return_sum ) const
{
CvMat sample = _sample, mmask = _missing;
CvMat sample = cvMat(_sample), mmask = cvMat(_missing);
/*if( weak_responses )
{
int weak_count = cvSliceLength( slice, weak );

View File

@ -1592,12 +1592,12 @@ bool CvDTree::train( const Mat& _train_data, int _tflag,
const Mat& _sample_idx, const Mat& _var_type,
const Mat& _missing_mask, CvDTreeParams _params )
{
train_data_hdr = _train_data;
train_data_hdr = cvMat(_train_data);
train_data_mat = _train_data;
responses_hdr = _responses;
responses_hdr = cvMat(_responses);
responses_mat = _responses;
CvMat vidx=_var_idx, sidx=_sample_idx, vtype=_var_type, mmask=_missing_mask;
CvMat vidx=cvMat(_var_idx), sidx=cvMat(_sample_idx), vtype=cvMat(_var_type), mmask=cvMat(_missing_mask);
return train(&train_data_hdr, _tflag, &responses_hdr, vidx.data.ptr ? &vidx : 0, sidx.data.ptr ? &sidx : 0,
vtype.data.ptr ? &vtype : 0, mmask.data.ptr ? &mmask : 0, _params);
@ -3734,7 +3734,7 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
CvDTreeNode* CvDTree::predict( const Mat& _sample, const Mat& _missing, bool preprocessed_input ) const
{
CvMat sample = _sample, mmask = _missing;
CvMat sample = cvMat(_sample), mmask = cvMat(_missing);
return predict(&sample, mmask.data.ptr ? &mmask : 0, preprocessed_input);
}

View File

@ -125,8 +125,8 @@ if(CV_GCC OR CV_CLANG)
)
add_extra_compiler_option(-Wimplicit-fallthrough=3)
endif()
if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 7.2.0)
add_extra_compiler_option(-Wno-strict-overflow) # Issue is fixed in GCC 7.2.1
if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
add_extra_compiler_option(-Wno-strict-overflow) # Issue appears when compiling surf.cpp from opencv_contrib/modules/xfeatures2d
endif()
endif()
add_extra_compiler_option(-fdiagnostics-show-option)

View File

@ -52,8 +52,8 @@ PERF_TEST_P(PointsNum_Algo, solvePnP,
cv::solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, algo);
}
SANITY_CHECK(rvec, 1e-6);
SANITY_CHECK(tvec, 1e-6);
SANITY_CHECK(rvec, 1e-4);
SANITY_CHECK(tvec, 1e-4);
}
PERF_TEST_P(PointsNum_Algo, solvePnPSmallPoints,

View File

@ -977,7 +977,7 @@ CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
int i, count;
double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];
double MM[9], U[9], V[9], W[3];
CvScalar Mc;
cv::Scalar Mc;
double param[6];
CvMat matA = cvMat( 3, 3, CV_64F, a );
CvMat _Ar = cvMat( 3, 3, CV_64F, ar );
@ -1478,7 +1478,7 @@ static double cvCalibrateCamera2Internal( const CvMat* objectPoints,
CV_Error( CV_StsOutOfRange,
"The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
}
CvMat _matM(matM), m(_m);
CvMat _matM = cvMat(matM), m = cvMat(_m);
cvInitIntrinsicParams2D( &_matM, &m, npoints, imageSize, &matA, aspectRatio );
}
@ -1550,8 +1550,8 @@ static double cvCalibrateCamera2Internal( const CvMat* objectPoints,
cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
CvMat _Mi(matM.colRange(pos, pos + ni));
CvMat _mi(_m.colRange(pos, pos + ni));
CvMat _Mi = cvMat(matM.colRange(pos, pos + ni));
CvMat _mi = cvMat(_m.colRange(pos, pos + ni));
cvFindExtrinsicCameraParams2( &_Mi, &_mi, &matA, &_k, &_ri, &_ti );
}
@ -1590,17 +1590,17 @@ static double cvCalibrateCamera2Internal( const CvMat* objectPoints,
cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
CvMat _Mi(matM.colRange(pos, pos + ni));
CvMat _mi(_m.colRange(pos, pos + ni));
CvMat _me(allErrors.colRange(pos, pos + ni));
CvMat _Mi = cvMat(matM.colRange(pos, pos + ni));
CvMat _mi = cvMat(_m.colRange(pos, pos + ni));
CvMat _me = cvMat(allErrors.colRange(pos, pos + ni));
_Je.resize(ni*2); _Ji.resize(ni*2); _err.resize(ni*2);
CvMat _dpdr(_Je.colRange(0, 3));
CvMat _dpdt(_Je.colRange(3, 6));
CvMat _dpdf(_Ji.colRange(0, 2));
CvMat _dpdc(_Ji.colRange(2, 4));
CvMat _dpdk(_Ji.colRange(4, NINTRINSIC));
CvMat _mp(_err.reshape(2, 1));
CvMat _dpdr = cvMat(_Je.colRange(0, 3));
CvMat _dpdt = cvMat(_Je.colRange(3, 6));
CvMat _dpdf = cvMat(_Ji.colRange(0, 2));
CvMat _dpdc = cvMat(_Ji.colRange(2, 4));
CvMat _dpdk = cvMat(_Ji.colRange(4, NINTRINSIC));
CvMat _mp = cvMat(_err.reshape(2, 1));
if( calcJ )
{
@ -2081,7 +2081,7 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i
for( i = ofs = 0; i < nimages; ofs += ni, i++ )
{
ni = npoints->data.i[i];
CvMat objpt_i, _part;
CvMat objpt_i;
om[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6);
T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3);
@ -2095,12 +2095,12 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i
objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
err.resize(ni*2); Je.resize(ni*2); J_LR.resize(ni*2); Ji.resize(ni*2);
CvMat tmpimagePoints(err.reshape(2, 1));
CvMat dpdf(Ji.colRange(0, 2));
CvMat dpdc(Ji.colRange(2, 4));
CvMat dpdk(Ji.colRange(4, NINTRINSIC));
CvMat dpdrot(Je.colRange(0, 3));
CvMat dpdt(Je.colRange(3, 6));
CvMat tmpimagePoints = cvMat(err.reshape(2, 1));
CvMat dpdf = cvMat(Ji.colRange(0, 2));
CvMat dpdc = cvMat(Ji.colRange(2, 4));
CvMat dpdk = cvMat(Ji.colRange(4, NINTRINSIC));
CvMat dpdrot = cvMat(Je.colRange(0, 3));
CvMat dpdt = cvMat(Je.colRange(3, 6));
for( k = 0; k < 2; k++ )
{
@ -2363,7 +2363,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
// calculate projection/camera matrices
// these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
double fc_new = DBL_MAX;
CvPoint2D64f cc_new[2] = {{0,0}, {0,0}};
CvPoint2D64f cc_new[2] = {};
newImgSize = newImgSize.width * newImgSize.height != 0 ? newImgSize : imageSize;
const double ratio_x = (double)newImgSize.width / imageSize.width / 2;
@ -2375,8 +2375,8 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
{
const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2;
CvPoint2D32f _pts[4];
CvPoint3D32f _pts_3[4];
CvPoint2D32f _pts[4] = {};
CvPoint3D32f _pts_3[4] = {};
CvMat pts = cvMat(1, 4, CV_32FC2, _pts);
CvMat pts_3 = cvMat(1, 4, CV_32FC3, _pts_3);
@ -2485,18 +2485,22 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
if(roi1)
{
*roi1 = cv::Rect(cvCeil((inner1.x - cx1_0)*s + cx1),
*roi1 = cvRect(
cv::Rect(cvCeil((inner1.x - cx1_0)*s + cx1),
cvCeil((inner1.y - cy1_0)*s + cy1),
cvFloor(inner1.width*s), cvFloor(inner1.height*s))
& cv::Rect(0, 0, newImgSize.width, newImgSize.height);
& cv::Rect(0, 0, newImgSize.width, newImgSize.height)
);
}
if(roi2)
{
*roi2 = cv::Rect(cvCeil((inner2.x - cx2_0)*s + cx2),
*roi2 = cvRect(
cv::Rect(cvCeil((inner2.x - cx2_0)*s + cx2),
cvCeil((inner2.y - cy2_0)*s + cy2),
cvFloor(inner2.width*s), cvFloor(inner2.height*s))
& cv::Rect(0, 0, newImgSize.width, newImgSize.height);
& cv::Rect(0, 0, newImgSize.width, newImgSize.height)
);
}
}
@ -2557,7 +2561,7 @@ void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCo
(float)(inner.height*s));
cv::Rect r(cvCeil(inner.x), cvCeil(inner.y), cvFloor(inner.width), cvFloor(inner.height));
r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
*validPixROI = r;
*validPixROI = cvRect(r);
}
}
else
@ -2589,7 +2593,7 @@ void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCo
icvGetRectangles( cameraMatrix, distCoeffs, 0, &matM, imgSize, inner, outer );
cv::Rect r = inner;
r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
*validPixROI = r;
*validPixROI = cvRect(r);
}
}
@ -3162,30 +3166,29 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
Point3f* objPtData = objPtMat.ptr<Point3f>();
Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
for( i = 0; i < nimages; i++, j += ni )
{
Mat objpt = objectPoints.getMat(i);
Mat imgpt1 = imagePoints1.getMat(i);
ni = objpt.checkVector(3, CV_32F);
npoints.at<int>(i) = ni;
memcpy( objPtData + j, objpt.ptr(), ni*sizeof(objPtData[0]) );
memcpy( imgPtData1 + j, imgpt1.ptr(), ni*sizeof(imgPtData1[0]) );
for (int n = 0; n < ni; ++n)
{
objPtData[j + n] = objpt.ptr<Point3f>()[n];
imgPtData1[j + n] = imgpt1.ptr<Point2f>()[n];
}
if( imgPtData2 )
{
Mat imgpt2 = imagePoints2.getMat(i);
int ni2 = imgpt2.checkVector(2, CV_32F);
CV_Assert( ni == ni2 );
memcpy( imgPtData2 + j, imgpt2.ptr(), ni*sizeof(imgPtData2[0]) );
for (int n = 0; n < ni2; ++n)
{
imgPtData2[j + n] = imgpt2.ptr<Point2f>()[n];
}
}
}
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic pop
#endif
}
static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
@ -3228,11 +3231,11 @@ void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian)
bool v2m = src.cols == 1 || src.rows == 1;
_dst.create(3, v2m ? 3 : 1, src.depth());
Mat dst = _dst.getMat();
CvMat _csrc = src, _cdst = dst, _cjacobian;
CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _cjacobian;
if( _jacobian.needed() )
{
_jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth());
_cjacobian = _jacobian.getMat();
_cjacobian = cvMat(_jacobian.getMat());
}
bool ok = cvRodrigues2(&_csrc, &_cdst, _jacobian.needed() ? &_cjacobian : 0) > 0;
if( !ok )
@ -3247,7 +3250,8 @@ void cv::matMulDeriv( InputArray _Amat, InputArray _Bmat,
Mat A = _Amat.getMat(), B = _Bmat.getMat();
_dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type());
_dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type());
CvMat matA = A, matB = B, c_dABdA = _dABdA.getMat(), c_dABdB = _dABdB.getMat();
Mat dABdA = _dABdA.getMat(), dABdB = _dABdB.getMat();
CvMat matA = cvMat(A), matB = cvMat(B), c_dABdA = cvMat(dABdA), c_dABdB = cvMat(dABdB);
cvCalcMatMulDeriv(&matA, &matB, &c_dABdA, &c_dABdB);
}
@ -3267,8 +3271,8 @@ void cv::composeRT( InputArray _rvec1, InputArray _tvec1,
_tvec3.create(tvec1.size(), rtype);
Mat rvec3 = _rvec3.getMat(), tvec3 = _tvec3.getMat();
CvMat c_rvec1 = rvec1, c_tvec1 = tvec1, c_rvec2 = rvec2,
c_tvec2 = tvec2, c_rvec3 = rvec3, c_tvec3 = tvec3;
CvMat c_rvec1 = cvMat(rvec1), c_tvec1 = cvMat(tvec1), c_rvec2 = cvMat(rvec2),
c_tvec2 = cvMat(tvec2), c_rvec3 = cvMat(rvec3), c_tvec3 = cvMat(tvec3);
CvMat c_dr3dr1, c_dr3dt1, c_dr3dr2, c_dr3dt2, c_dt3dr1, c_dt3dt1, c_dt3dr2, c_dt3dt2;
CvMat *p_dr3dr1=0, *p_dr3dt1=0, *p_dr3dr2=0, *p_dr3dt2=0, *p_dt3dr1=0, *p_dt3dt1=0, *p_dt3dr2=0, *p_dt3dt2=0;
#define CV_COMPOSE_RT_PARAM(name) \
@ -3277,7 +3281,7 @@ void cv::composeRT( InputArray _rvec1, InputArray _tvec1,
{ \
_ ## name.create(3, 3, rtype); \
name = _ ## name.getMat(); \
p_ ## name = &(c_ ## name = name); \
p_ ## name = &(c_ ## name = cvMat(name)); \
}
CV_COMPOSE_RT_PARAM(dr3dr1); CV_COMPOSE_RT_PARAM(dr3dt1);
@ -3310,31 +3314,32 @@ void cv::projectPoints( InputArray _opoints,
_ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
Mat imagePoints = _ipoints.getMat();
CvMat c_imagePoints(imagePoints);
CvMat c_objectPoints = opoints;
CvMat c_imagePoints = cvMat(imagePoints);
CvMat c_objectPoints = cvMat(opoints);
Mat cameraMatrix = _cameraMatrix.getMat();
Mat rvec = _rvec.getMat(), tvec = _tvec.getMat();
CvMat c_cameraMatrix = cameraMatrix;
CvMat c_rvec = rvec, c_tvec = tvec;
CvMat c_cameraMatrix = cvMat(cameraMatrix);
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
double dc0buf[5]={0};
Mat dc0(5,1,CV_64F,dc0buf);
Mat distCoeffs = _distCoeffs.getMat();
if( distCoeffs.empty() )
distCoeffs = dc0;
CvMat c_distCoeffs = distCoeffs;
CvMat c_distCoeffs = cvMat(distCoeffs);
int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
Mat jacobian;
if( _jacobian.needed() )
{
_jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F);
Mat jacobian = _jacobian.getMat();
pdpdrot = &(dpdrot = jacobian.colRange(0, 3));
pdpdt = &(dpdt = jacobian.colRange(3, 6));
pdpdf = &(dpdf = jacobian.colRange(6, 8));
pdpdc = &(dpdc = jacobian.colRange(8, 10));
pdpddist = &(dpddist = jacobian.colRange(10, 10+ndistCoeffs));
jacobian = _jacobian.getMat();
pdpdrot = &(dpdrot = cvMat(jacobian.colRange(0, 3)));
pdpdt = &(dpdt = cvMat(jacobian.colRange(3, 6)));
pdpdf = &(dpdf = cvMat(jacobian.colRange(6, 8)));
pdpdc = &(dpdc = cvMat(jacobian.colRange(8, 10)));
pdpddist = &(dpddist = cvMat(jacobian.colRange(10, 10+ndistCoeffs)));
}
cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix, &c_distCoeffs,
@ -3350,9 +3355,9 @@ cv::Mat cv::initCameraMatrix2D( InputArrayOfArrays objectPoints,
Mat objPt, imgPt, npoints, cameraMatrix(3, 3, CV_64F);
collectCalibrationData( objectPoints, imagePoints, noArray(),
objPt, imgPt, 0, npoints );
CvMat _objPt = objPt, _imgPt = imgPt, _npoints = npoints, _cameraMatrix = cameraMatrix;
CvMat _objPt = cvMat(objPt), _imgPt = cvMat(imgPt), _npoints = cvMat(npoints), _cameraMatrix = cvMat(cameraMatrix);
cvInitIntrinsicParams2D( &_objPt, &_imgPt, &_npoints,
imageSize, &_cameraMatrix, aspectRatio );
cvSize(imageSize), &_cameraMatrix, aspectRatio );
return cameraMatrix;
}
@ -3434,16 +3439,16 @@ double cv::calibrateCamera(InputArrayOfArrays _objectPoints,
collectCalibrationData( _objectPoints, _imagePoints, noArray(),
objPt, imgPt, 0, npoints );
CvMat c_objPt = objPt, c_imgPt = imgPt, c_npoints = npoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvecM = rvecM, c_tvecM = tvecM, c_stdDev = stdDeviationsM, c_errors = errorsM;
CvMat c_objPt = cvMat(objPt), c_imgPt = cvMat(imgPt), c_npoints = cvMat(npoints);
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
CvMat c_rvecM = cvMat(rvecM), c_tvecM = cvMat(tvecM), c_stdDev = cvMat(stdDeviationsM), c_errors = cvMat(errorsM);
double reprojErr = cvCalibrateCamera2Internal(&c_objPt, &c_imgPt, &c_npoints, imageSize,
double reprojErr = cvCalibrateCamera2Internal(&c_objPt, &c_imgPt, &c_npoints, cvSize(imageSize),
&c_cameraMatrix, &c_distCoeffs,
rvecs_needed ? &c_rvecM : NULL,
tvecs_needed ? &c_tvecM : NULL,
stddev_needed ? &c_stdDev : NULL,
errors_needed ? &c_errors : NULL, flags, criteria );
errors_needed ? &c_errors : NULL, flags, cvTermCriteria(criteria));
if( stddev_needed )
{
@ -3582,35 +3587,40 @@ double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
collectCalibrationData( _objectPoints, _imagePoints1, _imagePoints2,
objPt, imgPt, &imgPt2, npoints );
CvMat c_objPt = objPt, c_imgPt = imgPt, c_imgPt2 = imgPt2, c_npoints = npoints;
CvMat c_cameraMatrix1 = cameraMatrix1, c_distCoeffs1 = distCoeffs1;
CvMat c_cameraMatrix2 = cameraMatrix2, c_distCoeffs2 = distCoeffs2;
CvMat c_matR = _Rmat.getMat(), c_matT = _Tmat.getMat(), c_matE, c_matF, c_matErr;
CvMat c_objPt = cvMat(objPt), c_imgPt = cvMat(imgPt), c_imgPt2 = cvMat(imgPt2), c_npoints = cvMat(npoints);
CvMat c_cameraMatrix1 = cvMat(cameraMatrix1), c_distCoeffs1 = cvMat(distCoeffs1);
CvMat c_cameraMatrix2 = cvMat(cameraMatrix2), c_distCoeffs2 = cvMat(distCoeffs2);
Mat matR_ = _Rmat.getMat(), matT_ = _Tmat.getMat();
CvMat c_matR = cvMat(matR_), c_matT = cvMat(matT_), c_matE, c_matF, c_matErr;
bool E_needed = _Emat.needed(), F_needed = _Fmat.needed(), errors_needed = _perViewErrors.needed();
Mat matE_, matF_, matErr_;
if( E_needed )
{
_Emat.create(3, 3, rtype);
c_matE = _Emat.getMat();
matE_ = _Emat.getMat();
c_matE = cvMat(matE_);
}
if( F_needed )
{
_Fmat.create(3, 3, rtype);
c_matF = _Fmat.getMat();
matF_ = _Fmat.getMat();
c_matF = cvMat(matF_);
}
if( errors_needed )
{
int nimages = int(_objectPoints.total());
_perViewErrors.create(nimages, 2, CV_64F);
c_matErr = _perViewErrors.getMat();
matErr_ = _perViewErrors.getMat();
c_matErr = cvMat(matErr_);
}
double err = cvStereoCalibrateImpl(&c_objPt, &c_imgPt, &c_imgPt2, &c_npoints, &c_cameraMatrix1,
&c_distCoeffs1, &c_cameraMatrix2, &c_distCoeffs2, imageSize, &c_matR,
&c_distCoeffs1, &c_cameraMatrix2, &c_distCoeffs2, cvSize(imageSize), &c_matR,
&c_matT, E_needed ? &c_matE : NULL, F_needed ? &c_matF : NULL,
errors_needed ? &c_matErr : NULL, flags, criteria);
errors_needed ? &c_matErr : NULL, flags, cvTermCriteria(criteria));
cameraMatrix1.copyTo(_cameraMatrix1);
cameraMatrix2.copyTo(_cameraMatrix2);
@ -3633,31 +3643,32 @@ void cv::stereoRectify( InputArray _cameraMatrix1, InputArray _distCoeffs1,
Mat cameraMatrix1 = _cameraMatrix1.getMat(), cameraMatrix2 = _cameraMatrix2.getMat();
Mat distCoeffs1 = _distCoeffs1.getMat(), distCoeffs2 = _distCoeffs2.getMat();
Mat Rmat = _Rmat.getMat(), Tmat = _Tmat.getMat();
CvMat c_cameraMatrix1 = cameraMatrix1;
CvMat c_cameraMatrix2 = cameraMatrix2;
CvMat c_distCoeffs1 = distCoeffs1;
CvMat c_distCoeffs2 = distCoeffs2;
CvMat c_R = Rmat, c_T = Tmat;
CvMat c_cameraMatrix1 = cvMat(cameraMatrix1);
CvMat c_cameraMatrix2 = cvMat(cameraMatrix2);
CvMat c_distCoeffs1 = cvMat(distCoeffs1);
CvMat c_distCoeffs2 = cvMat(distCoeffs2);
CvMat c_R = cvMat(Rmat), c_T = cvMat(Tmat);
int rtype = CV_64F;
_Rmat1.create(3, 3, rtype);
_Rmat2.create(3, 3, rtype);
_Pmat1.create(3, 4, rtype);
_Pmat2.create(3, 4, rtype);
CvMat c_R1 = _Rmat1.getMat(), c_R2 = _Rmat2.getMat(), c_P1 = _Pmat1.getMat(), c_P2 = _Pmat2.getMat();
Mat R1 = _Rmat1.getMat(), R2 = _Rmat2.getMat(), P1 = _Pmat1.getMat(), P2 = _Pmat2.getMat(), Q;
CvMat c_R1 = cvMat(R1), c_R2 = cvMat(R2), c_P1 = cvMat(P1), c_P2 = cvMat(P2);
CvMat c_Q, *p_Q = 0;
if( _Qmat.needed() )
{
_Qmat.create(4, 4, rtype);
p_Q = &(c_Q = _Qmat.getMat());
p_Q = &(c_Q = cvMat(Q = _Qmat.getMat()));
}
CvMat *p_distCoeffs1 = distCoeffs1.empty() ? NULL : &c_distCoeffs1;
CvMat *p_distCoeffs2 = distCoeffs2.empty() ? NULL : &c_distCoeffs2;
cvStereoRectify( &c_cameraMatrix1, &c_cameraMatrix2, p_distCoeffs1, p_distCoeffs2,
imageSize, &c_R, &c_T, &c_R1, &c_R2, &c_P1, &c_P2, p_Q, flags, alpha,
newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2);
cvSize(imageSize), &c_R, &c_T, &c_R1, &c_R2, &c_P1, &c_P2, p_Q, flags, alpha,
cvSize(newImageSize), (CvRect*)validPixROI1, (CvRect*)validPixROI2);
}
bool cv::stereoRectifyUncalibrated( InputArray _points1, InputArray _points2,
@ -3671,11 +3682,12 @@ bool cv::stereoRectifyUncalibrated( InputArray _points1, InputArray _points2,
_Hmat2.create(3, 3, rtype);
Mat F = _Fmat.getMat();
Mat points1 = _points1.getMat(), points2 = _points2.getMat();
CvMat c_pt1 = points1, c_pt2 = points2;
CvMat c_F, *p_F=0, c_H1 = _Hmat1.getMat(), c_H2 = _Hmat2.getMat();
CvMat c_pt1 = cvMat(points1), c_pt2 = cvMat(points2);
Mat H1 = _Hmat1.getMat(), H2 = _Hmat2.getMat();
CvMat c_F, *p_F=0, c_H1 = cvMat(H1), c_H2 = cvMat(H2);
if( F.size() == Size(3, 3) )
p_F = &(c_F = F);
return cvStereoRectifyUncalibrated(&c_pt1, &c_pt2, p_F, imgSize, &c_H1, &c_H2, threshold) > 0;
p_F = &(c_F = cvMat(F));
return cvStereoRectifyUncalibrated(&c_pt1, &c_pt2, p_F, cvSize(imgSize), &c_H1, &c_H2, threshold) > 0;
}
cv::Mat cv::getOptimalNewCameraMatrix( InputArray _cameraMatrix,
@ -3686,14 +3698,14 @@ cv::Mat cv::getOptimalNewCameraMatrix( InputArray _cameraMatrix,
CV_INSTRUMENT_REGION()
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
Mat newCameraMatrix(3, 3, CV_MAT_TYPE(c_cameraMatrix.type));
CvMat c_newCameraMatrix = newCameraMatrix;
CvMat c_newCameraMatrix = cvMat(newCameraMatrix);
cvGetOptimalNewCameraMatrix(&c_cameraMatrix, &c_distCoeffs, imgSize,
cvGetOptimalNewCameraMatrix(&c_cameraMatrix, &c_distCoeffs, cvSize(imgSize),
alpha, &c_newCameraMatrix,
newImgSize, (CvRect*)validPixROI, (int)centerPrincipalPoint);
cvSize(newImgSize), (CvRect*)validPixROI, (int)centerPrincipalPoint);
return newCameraMatrix;
}
@ -3714,7 +3726,7 @@ cv::Vec3d cv::RQDecomp3x3( InputArray _Mmat,
Mat Qmat = _Qmat.getMat();
Vec3d eulerAngles;
CvMat matM = M, matR = Rmat, matQ = Qmat;
CvMat matM = cvMat(M), matR = cvMat(Rmat), matQ = cvMat(Qmat);
#define CV_RQDecomp3x3_PARAM(name) \
Mat name; \
CvMat c_ ## name, *p ## name = NULL; \
@ -3722,7 +3734,7 @@ cv::Vec3d cv::RQDecomp3x3( InputArray _Mmat,
{ \
_ ## name.create(3, 3, M.type()); \
name = _ ## name.getMat(); \
c_ ## name = name; p ## name = &c_ ## name; \
c_ ## name = cvMat(name); p ## name = &c_ ## name; \
}
CV_RQDecomp3x3_PARAM(Qx);
@ -3749,8 +3761,8 @@ void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraM
Mat cameraMatrix = _cameraMatrix.getMat();
Mat rotMatrix = _rotMatrix.getMat();
Mat transVect = _transVect.getMat();
CvMat c_projMatrix = projMatrix, c_cameraMatrix = cameraMatrix;
CvMat c_rotMatrix = rotMatrix, c_transVect = transVect;
CvMat c_projMatrix = cvMat(projMatrix), c_cameraMatrix = cvMat(cameraMatrix);
CvMat c_rotMatrix = cvMat(rotMatrix), c_transVect = cvMat(transVect);
CvPoint3D64f *p_eulerAngles = 0;
#define CV_decomposeProjectionMatrix_PARAM(name) \
@ -3760,7 +3772,7 @@ void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraM
{ \
_ ## name.create(3, 3, type); \
name = _ ## name.getMat(); \
c_ ## name = name; p_ ## name = &c_ ## name; \
c_ ## name = cvMat(name); p_ ## name = &c_ ## name; \
}
CV_decomposeProjectionMatrix_PARAM(rotMatrixX);

View File

@ -111,8 +111,8 @@ void cvFindStereoCorrespondenceBM( const CvArr* leftarr, const CvArr* rightarr,
CvRect cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize )
{
return (CvRect)cv::getValidDisparityROI( roi1, roi2, minDisparity,
numberOfDisparities, SADWindowSize );
return cvRect(cv::getValidDisparityROI( roi1, roi2, minDisparity,
numberOfDisparities, SADWindowSize));
}
void cvValidateDisparity( CvArr* _disp, const CvArr* _cost, int minDisparity,

View File

@ -134,9 +134,9 @@ bool solvePnP( InputArray _opoints, InputArray _ipoints,
}
else if (flags == SOLVEPNP_ITERATIVE)
{
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvec = rvec, c_tvec = tvec;
CvMat c_objectPoints = cvMat(opoints), c_imagePoints = cvMat(ipoints);
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
(c_distCoeffs.rows && c_distCoeffs.cols) ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );

View File

@ -358,11 +358,12 @@ void cv::triangulatePoints( InputArray _projMatr1, InputArray _projMatr2,
if((points2.rows == 1 || points2.cols == 1) && points2.channels() == 2)
points2 = points2.reshape(1, static_cast<int>(points2.total())).t();
CvMat cvMatr1 = matr1, cvMatr2 = matr2;
CvMat cvPoints1 = points1, cvPoints2 = points2;
CvMat cvMatr1 = cvMat(matr1), cvMatr2 = cvMat(matr2);
CvMat cvPoints1 = cvMat(points1), cvPoints2 = cvMat(points2);
_points4D.create(4, points1.cols, points1.type());
CvMat cvPoints4D = _points4D.getMat();
Mat cvPoints4D_ = _points4D.getMat();
CvMat cvPoints4D = cvMat(cvPoints4D_);
cvTriangulatePoints(&cvMatr1, &cvMatr2, &cvPoints1, &cvPoints2, &cvPoints4D);
}
@ -375,12 +376,13 @@ void cv::correctMatches( InputArray _F, InputArray _points1, InputArray _points2
Mat F = _F.getMat();
Mat points1 = _points1.getMat(), points2 = _points2.getMat();
CvMat cvPoints1 = points1, cvPoints2 = points2;
CvMat cvF = F;
CvMat cvPoints1 = cvMat(points1), cvPoints2 = cvMat(points2);
CvMat cvF = cvMat(F);
_newPoints1.create(points1.size(), points1.type());
_newPoints2.create(points2.size(), points2.type());
CvMat cvNewPoints1 = _newPoints1.getMat(), cvNewPoints2 = _newPoints2.getMat();
Mat cvNewPoints1_ = _newPoints1.getMat(), cvNewPoints2_ = _newPoints2.getMat();
CvMat cvNewPoints1 = cvMat(cvNewPoints1_), cvNewPoints2 = cvMat(cvNewPoints2_);
cvCorrectMatches(&cvF, &cvPoints1, &cvPoints2, &cvNewPoints1, &cvNewPoints2);
}

View File

@ -47,16 +47,15 @@ namespace opencv_test { namespace {
TEST(Calib3d_Affine3f, accuracy)
{
const double eps = 1e-5;
cv::Vec3d rvec(0.2, 0.5, 0.3);
cv::Affine3d affine(rvec);
cv::Mat expected;
cv::Rodrigues(rvec, expected);
ASSERT_EQ(0, cvtest::norm(cv::Mat(affine.matrix, false).colRange(0, 3).rowRange(0, 3) != expected, cv::NORM_L2));
ASSERT_EQ(0, cvtest::norm(cv::Mat(affine.linear()) != expected, cv::NORM_L2));
ASSERT_LE(cvtest::norm(cv::Mat(affine.matrix, false).colRange(0, 3).rowRange(0, 3), expected, cv::NORM_L2), eps);
ASSERT_LE(cvtest::norm(cv::Mat(affine.linear()), expected, cv::NORM_L2), eps);
cv::Matx33d R = cv::Matx33d::eye();

View File

@ -290,8 +290,8 @@ void CV_CameraCalibrationTest::run( int start_from )
cv::String filepath;
cv::String filename;
CvSize imageSize;
CvSize etalonSize;
Size imageSize;
Size etalonSize;
int numImages;
CvPoint2D64f* imagePoints;
@ -531,7 +531,7 @@ void CV_CameraCalibrationTest::run( int start_from )
/* Now we can calibrate camera */
calibrate( numImages,
numbers,
imageSize,
cvSize(imageSize),
imagePoints,
objectPoints,
distortion,
@ -1009,9 +1009,9 @@ void CV_CalibrationMatrixValuesTest_C::calibMatrixValues( const Mat& _cameraMatr
double& fovx, double& fovy, double& focalLength,
Point2d& principalPoint, double& aspectRatio )
{
CvMat cameraMatrix = _cameraMatrix;
CvPoint2D64f pp;
cvCalibrationMatrixValues( &cameraMatrix, imageSize, apertureWidth, apertureHeight,
CvMat cameraMatrix = cvMat(_cameraMatrix);
CvPoint2D64f pp = {0, 0};
cvCalibrationMatrixValues( &cameraMatrix, cvSize(imageSize), apertureWidth, apertureHeight,
&fovx, &fovy, &focalLength, &pp, &aspectRatio );
principalPoint.x = pp.x;
principalPoint.y = pp.y;
@ -1305,9 +1305,9 @@ void CV_ProjectPointsTest_C::project( const Mat& opoints, const Mat& rvec, const
dpdc.create(npoints*2, 2, CV_64F);
dpddist.create(npoints*2, distCoeffs.rows + distCoeffs.cols - 1, CV_64F);
Mat imagePoints(ipoints);
CvMat _objectPoints = opoints, _imagePoints = imagePoints;
CvMat _rvec = rvec, _tvec = tvec, _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;
CvMat _dpdrot = dpdrot, _dpdt = dpdt, _dpdf = dpdf, _dpdc = dpdc, _dpddist = dpddist;
CvMat _objectPoints = cvMat(opoints), _imagePoints = cvMat(imagePoints);
CvMat _rvec = cvMat(rvec), _tvec = cvMat(tvec), _cameraMatrix = cvMat(cameraMatrix), _distCoeffs = cvMat(distCoeffs);
CvMat _dpdrot = cvMat(dpdrot), _dpdt = cvMat(dpdt), _dpdf = cvMat(dpdf), _dpdc = cvMat(dpdc), _dpddist = cvMat(dpddist);
cvProjectPoints2( &_objectPoints, &_rvec, &_tvec, &_cameraMatrix, &_distCoeffs,
&_imagePoints, &_dpdrot, &_dpdt, &_dpdf, &_dpdc, &_dpddist, aspectRatio );
@ -1925,14 +1925,14 @@ double CV_StereoCalibrationTest_C::calibrateStereoCamera( const vector<vector<Po
std::copy(imagePoints1[i].begin(), imagePoints1[i].end(), imgPtData + j);
std::copy(imagePoints2[i].begin(), imagePoints2[i].end(), imgPtData2 + j);
}
CvMat _objPt = objPt, _imgPt = imgPt, _imgPt2 = imgPt2, _npoints = npoints;
CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
CvMat matR = R, matT = T, matE = E, matF = F;
CvMat _objPt = cvMat(objPt), _imgPt = cvMat(imgPt), _imgPt2 = cvMat(imgPt2), _npoints = cvMat(npoints);
CvMat _cameraMatrix1 = cvMat(cameraMatrix1), _distCoeffs1 = cvMat(distCoeffs1);
CvMat _cameraMatrix2 = cvMat(cameraMatrix2), _distCoeffs2 = cvMat(distCoeffs2);
CvMat matR = cvMat(R), matT = cvMat(T), matE = cvMat(E), matF = cvMat(F);
return cvStereoCalibrate(&_objPt, &_imgPt, &_imgPt2, &_npoints, &_cameraMatrix1,
&_distCoeffs1, &_cameraMatrix2, &_distCoeffs2, imageSize,
&matR, &matT, &matE, &matF, flags, criteria );
&_distCoeffs1, &_cameraMatrix2, &_distCoeffs2, cvSize(imageSize),
&matR, &matT, &matE, &matF, flags, cvTermCriteria(criteria));
}
void CV_StereoCalibrationTest_C::rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
@ -1948,12 +1948,12 @@ void CV_StereoCalibrationTest_C::rectify( const Mat& cameraMatrix1, const Mat& d
P1.create(3, 4, rtype);
P2.create(3, 4, rtype);
Q.create(4, 4, rtype);
CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
CvMat matR = R, matT = T, _R1 = R1, _R2 = R2, _P1 = P1, _P2 = P2, matQ = Q;
CvMat _cameraMatrix1 = cvMat(cameraMatrix1), _distCoeffs1 = cvMat(distCoeffs1);
CvMat _cameraMatrix2 = cvMat(cameraMatrix2), _distCoeffs2 = cvMat(distCoeffs2);
CvMat matR = cvMat(R), matT = cvMat(T), _R1 = cvMat(R1), _R2 = cvMat(R2), _P1 = cvMat(P1), _P2 = cvMat(P2), matQ = cvMat(Q);
cvStereoRectify( &_cameraMatrix1, &_cameraMatrix2, &_distCoeffs1, &_distCoeffs2,
imageSize, &matR, &matT, &_R1, &_R2, &_P1, &_P2, &matQ, flags,
alpha, newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2);
cvSize(imageSize), &matR, &matT, &_R1, &_R2, &_P1, &_P2, &matQ, flags,
alpha, cvSize(newImageSize), (CvRect*)validPixROI1, (CvRect*)validPixROI2);
}
bool CV_StereoCalibrationTest_C::rectifyUncalibrated( const Mat& points1,
@ -1961,19 +1961,19 @@ bool CV_StereoCalibrationTest_C::rectifyUncalibrated( const Mat& points1,
{
H1.create(3, 3, CV_64F);
H2.create(3, 3, CV_64F);
CvMat _pt1 = points1, _pt2 = points2, matF, *pF=0, _H1 = H1, _H2 = H2;
CvMat _pt1 = cvMat(points1), _pt2 = cvMat(points2), matF, *pF=0, _H1 = cvMat(H1), _H2 = cvMat(H2);
if( F.size() == Size(3, 3) )
pF = &(matF = F);
return cvStereoRectifyUncalibrated(&_pt1, &_pt2, pF, imgSize, &_H1, &_H2, threshold) > 0;
pF = &(matF = cvMat(F));
return cvStereoRectifyUncalibrated(&_pt1, &_pt2, pF, cvSize(imgSize), &_H1, &_H2, threshold) > 0;
}
void CV_StereoCalibrationTest_C::triangulate( const Mat& P1, const Mat& P2,
const Mat &points1, const Mat &points2,
Mat &points4D )
{
CvMat _P1 = P1, _P2 = P2, _points1 = points1, _points2 = points2;
CvMat _P1 = cvMat(P1), _P2 = cvMat(P2), _points1 = cvMat(points1), _points2 = cvMat(points2);
points4D.create(4, points1.cols, points1.type());
CvMat _points4D = points4D;
CvMat _points4D = cvMat(points4D);
cvTriangulatePoints(&_P1, &_P2, &_points1, &_points2, &_points4D);
}
@ -1981,10 +1981,10 @@ void CV_StereoCalibrationTest_C::correct( const Mat& F,
const Mat &points1, const Mat &points2,
Mat &newPoints1, Mat &newPoints2 )
{
CvMat _F = F, _points1 = points1, _points2 = points2;
CvMat _F = cvMat(F), _points1 = cvMat(points1), _points2 = cvMat(points2);
newPoints1.create(1, points1.cols, points1.type());
newPoints2.create(1, points2.cols, points2.type());
CvMat _newPoints1 = newPoints1, _newPoints2 = newPoints2;
CvMat _newPoints1 = cvMat(newPoints1), _newPoints2 = cvMat(newPoints2);
cvCorrectMatches(&_F, &_points1, &_points2, &_newPoints1, &_newPoints2);
}

View File

@ -75,7 +75,7 @@ protected:
void operator()() const
{
cvCalibrateCamera2(objPts, imgPts, npoints, imageSize,
cvCalibrateCamera2(objPts, imgPts, npoints, cvSize(imageSize),
cameraMatrix, distCoeffs, rvecs, tvecs, flags );
}
};
@ -137,13 +137,13 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
//CV_CALIB_FIX_PRINCIPAL_POINT //CV_CALIB_ZERO_TANGENT_DIST
//CV_CALIB_FIX_FOCAL_LENGTH //CV_CALIB_FIX_K1 //CV_CALIB_FIX_K2 //CV_CALIB_FIX_K3
objPts = objPts_cpp;
imgPts = imgPts_cpp;
npoints = npoints_cpp;
cameraMatrix = cameraMatrix_cpp;
distCoeffs = distCoeffs_cpp;
rvecs = rvecs_cpp;
tvecs = tvecs_cpp;
objPts = cvMat(objPts_cpp);
imgPts = cvMat(imgPts_cpp);
npoints = cvMat(npoints_cpp);
cameraMatrix = cvMat(cameraMatrix_cpp);
distCoeffs = cvMat(distCoeffs_cpp);
rvecs = cvMat(rvecs_cpp);
tvecs = cvMat(tvecs_cpp);
/* /*//*/ */
int errors = 0;
@ -178,8 +178,8 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
Mat bad_nts_cpp1 = Mat_<float>(M, 1, 1.f);
Mat bad_nts_cpp2 = Mat_<int>(3, 3, corSize.width * corSize.height);
CvMat bad_npts_c1 = bad_nts_cpp1;
CvMat bad_npts_c2 = bad_nts_cpp2;
CvMat bad_npts_c1 = cvMat(bad_nts_cpp1);
CvMat bad_npts_c2 = cvMat(bad_nts_cpp2);
bad_caller = caller;
bad_caller.npoints = &bad_npts_c1;
@ -197,13 +197,13 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
bad_caller.tvecs = (CvMat*)zeros.ptr();
errors += run_test_case( CV_StsBadArg, "Bad tvecs header", bad_caller );
Mat bad_rvecs_cpp1(M+1, 1, CV_32FC3); CvMat bad_rvecs_c1 = bad_rvecs_cpp1;
Mat bad_tvecs_cpp1(M+1, 1, CV_32FC3); CvMat bad_tvecs_c1 = bad_tvecs_cpp1;
Mat bad_rvecs_cpp1(M+1, 1, CV_32FC3); CvMat bad_rvecs_c1 = cvMat(bad_rvecs_cpp1);
Mat bad_tvecs_cpp1(M+1, 1, CV_32FC3); CvMat bad_tvecs_c1 = cvMat(bad_tvecs_cpp1);
Mat bad_rvecs_cpp2(M, 2, CV_32FC3); CvMat bad_rvecs_c2 = bad_rvecs_cpp2;
Mat bad_tvecs_cpp2(M, 2, CV_32FC3); CvMat bad_tvecs_c2 = bad_tvecs_cpp2;
Mat bad_rvecs_cpp2(M, 2, CV_32FC3); CvMat bad_rvecs_c2 = cvMat(bad_rvecs_cpp2);
Mat bad_tvecs_cpp2(M, 2, CV_32FC3); CvMat bad_tvecs_c2 = cvMat(bad_tvecs_cpp2);
bad_caller = caller;
bad_caller.rvecs = &bad_rvecs_c1;
@ -221,9 +221,9 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
bad_caller.tvecs = &bad_tvecs_c2;
errors += run_test_case( CV_StsBadArg, "Bad tvecs header", bad_caller );
Mat bad_cameraMatrix_cpp1(3, 3, CV_32S); CvMat bad_cameraMatrix_c1 = bad_cameraMatrix_cpp1;
Mat bad_cameraMatrix_cpp2(2, 3, CV_32F); CvMat bad_cameraMatrix_c2 = bad_cameraMatrix_cpp2;
Mat bad_cameraMatrix_cpp3(3, 2, CV_64F); CvMat bad_cameraMatrix_c3 = bad_cameraMatrix_cpp3;
Mat bad_cameraMatrix_cpp1(3, 3, CV_32S); CvMat bad_cameraMatrix_c1 = cvMat(bad_cameraMatrix_cpp1);
Mat bad_cameraMatrix_cpp2(2, 3, CV_32F); CvMat bad_cameraMatrix_c2 = cvMat(bad_cameraMatrix_cpp2);
Mat bad_cameraMatrix_cpp3(3, 2, CV_64F); CvMat bad_cameraMatrix_c3 = cvMat(bad_cameraMatrix_cpp3);
@ -239,9 +239,9 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
bad_caller.cameraMatrix = &bad_cameraMatrix_c3;
errors += run_test_case( CV_StsBadArg, "Bad camearaMatrix header", bad_caller );
Mat bad_distCoeffs_cpp1(1, 5, CV_32S); CvMat bad_distCoeffs_c1 = bad_distCoeffs_cpp1;
Mat bad_distCoeffs_cpp2(2, 2, CV_64F); CvMat bad_distCoeffs_c2 = bad_distCoeffs_cpp2;
Mat bad_distCoeffs_cpp3(1, 6, CV_64F); CvMat bad_distCoeffs_c3 = bad_distCoeffs_cpp3;
Mat bad_distCoeffs_cpp1(1, 5, CV_32S); CvMat bad_distCoeffs_c1 = cvMat(bad_distCoeffs_cpp1);
Mat bad_distCoeffs_cpp2(2, 2, CV_64F); CvMat bad_distCoeffs_c2 = cvMat(bad_distCoeffs_cpp2);
Mat bad_distCoeffs_cpp3(1, 6, CV_64F); CvMat bad_distCoeffs_c3 = cvMat(bad_distCoeffs_cpp3);
@ -259,7 +259,7 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
errors += run_test_case( CV_StsBadArg, "Bad distCoeffs header", bad_caller );
double CM[] = {0, 0, 0, /**/0, 0, 0, /**/0, 0, 0};
Mat bad_cameraMatrix_cpp4(3, 3, CV_64F, CM); CvMat bad_cameraMatrix_c4 = bad_cameraMatrix_cpp4;
Mat bad_cameraMatrix_cpp4(3, 3, CV_64F, CM); CvMat bad_cameraMatrix_c4 = cvMat(bad_cameraMatrix_cpp4);
bad_caller = caller;
bad_caller.flags |= CV_CALIB_USE_INTRINSIC_GUESS;
@ -302,7 +302,7 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
/////////////////////////////////////////////////////////////////////////////////////
bad_caller = caller;
Mat bad_objPts_cpp5 = objPts_cpp.clone(); CvMat bad_objPts_c5 = bad_objPts_cpp5;
Mat bad_objPts_cpp5 = objPts_cpp.clone(); CvMat bad_objPts_c5 = cvMat(bad_objPts_cpp5);
bad_caller.objPts = &bad_objPts_c5;
cv::RNG& rng = theRNG();
@ -347,9 +347,9 @@ protected:
Mat zeros(1, sizeof(CvMat), CV_8U, Scalar(0));
CvMat src_c, dst_c, jacobian_c;
Mat src_cpp(3, 1, CV_32F); src_c = src_cpp;
Mat dst_cpp(3, 3, CV_32F); dst_c = dst_cpp;
Mat jacobian_cpp(3, 9, CV_32F); jacobian_c = jacobian_cpp;
Mat src_cpp(3, 1, CV_32F); src_c = cvMat(src_cpp);
Mat dst_cpp(3, 3, CV_32F); dst_c = cvMat(dst_cpp);
Mat jacobian_cpp(3, 9, CV_32F); jacobian_c = cvMat(jacobian_cpp);
C_Caller caller, bad_caller;
caller.src = &src_c;
@ -373,11 +373,11 @@ protected:
bad_caller.dst = 0;
errors += run_test_case( CV_StsNullPtr, "Dst is zero pointer", bad_caller );
Mat bad_src_cpp1(3, 1, CV_8U); CvMat bad_src_c1 = bad_src_cpp1;
Mat bad_dst_cpp1(3, 1, CV_8U); CvMat bad_dst_c1 = bad_dst_cpp1;
Mat bad_jac_cpp1(3, 1, CV_8U); CvMat bad_jac_c1 = bad_jac_cpp1;
Mat bad_jac_cpp2(3, 1, CV_32FC2); CvMat bad_jac_c2 = bad_jac_cpp2;
Mat bad_jac_cpp3(3, 1, CV_32F); CvMat bad_jac_c3 = bad_jac_cpp3;
Mat bad_src_cpp1(3, 1, CV_8U); CvMat bad_src_c1 = cvMat(bad_src_cpp1);
Mat bad_dst_cpp1(3, 1, CV_8U); CvMat bad_dst_c1 = cvMat(bad_dst_cpp1);
Mat bad_jac_cpp1(3, 1, CV_8U); CvMat bad_jac_c1 = cvMat(bad_jac_cpp1);
Mat bad_jac_cpp2(3, 1, CV_32FC2); CvMat bad_jac_c2 = cvMat(bad_jac_cpp2);
Mat bad_jac_cpp3(3, 1, CV_32F); CvMat bad_jac_c3 = cvMat(bad_jac_cpp3);
bad_caller = caller;
bad_caller.src = &bad_src_c1;
@ -403,15 +403,15 @@ protected:
bad_caller.jacobian = &bad_jac_c3;
errors += run_test_case( CV_StsBadSize, "Bad jacobian format", bad_caller );
Mat bad_src_cpp2(1, 1, CV_32F); CvMat bad_src_c2 = bad_src_cpp2;
Mat bad_src_cpp2(1, 1, CV_32F); CvMat bad_src_c2 = cvMat(bad_src_cpp2);
bad_caller = caller;
bad_caller.src = &bad_src_c2;
errors += run_test_case( CV_StsBadSize, "Bad src format", bad_caller );
Mat bad_dst_cpp2(2, 1, CV_32F); CvMat bad_dst_c2 = bad_dst_cpp2;
Mat bad_dst_cpp3(3, 2, CV_32F); CvMat bad_dst_c3 = bad_dst_cpp3;
Mat bad_dst_cpp4(3, 3, CV_32FC2); CvMat bad_dst_c4 = bad_dst_cpp4;
Mat bad_dst_cpp2(2, 1, CV_32F); CvMat bad_dst_c2 = cvMat(bad_dst_cpp2);
Mat bad_dst_cpp3(3, 2, CV_32F); CvMat bad_dst_c3 = cvMat(bad_dst_cpp3);
Mat bad_dst_cpp4(3, 3, CV_32FC2); CvMat bad_dst_c4 = cvMat(bad_dst_cpp4);
bad_caller = caller;
bad_caller.dst = &bad_dst_c2;
@ -427,11 +427,11 @@ protected:
/********/
src_cpp.create(3, 3, CV_32F); src_c = src_cpp;
dst_cpp.create(3, 1, CV_32F); dst_c = dst_cpp;
src_cpp.create(3, 3, CV_32F); src_c = cvMat(src_cpp);
dst_cpp.create(3, 1, CV_32F); dst_c = cvMat(dst_cpp);
Mat bad_dst_cpp5(5, 5, CV_32F); CvMat bad_dst_c5 = bad_dst_cpp5;
Mat bad_dst_cpp5(5, 5, CV_32F); CvMat bad_dst_c5 = cvMat(bad_dst_cpp5);
bad_caller = caller;
bad_caller.dst = &bad_dst_c5;
@ -488,15 +488,7 @@ protected:
void run(int /* start_from */ )
{
CvMat zeros;
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
memset(&zeros, 0, sizeof(zeros));
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic pop
#endif
CvMat zeros = CvMat();
C_Caller caller, bad_caller;
CvMat objectPoints_c, r_vec_c, t_vec_c, A_c, distCoeffs_c, imagePoints_c,
@ -504,24 +496,24 @@ protected:
const int n = 10;
Mat imagePoints_cpp(1, n, CV_32FC2); imagePoints_c = imagePoints_cpp;
Mat imagePoints_cpp(1, n, CV_32FC2); imagePoints_c = cvMat(imagePoints_cpp);
Mat objectPoints_cpp(1, n, CV_32FC3);
randu(objectPoints_cpp, Scalar::all(1), Scalar::all(10));
objectPoints_c = objectPoints_cpp;
objectPoints_c = cvMat(objectPoints_cpp);
Mat t_vec_cpp(Mat::zeros(1, 3, CV_32F)); t_vec_c = t_vec_cpp;
Mat t_vec_cpp(Mat::zeros(1, 3, CV_32F)); t_vec_c = cvMat(t_vec_cpp);
Mat r_vec_cpp(3, 1, CV_32F);
cvtest::Rodrigues(Mat::eye(3, 3, CV_32F), r_vec_cpp); r_vec_c = r_vec_cpp;
cvtest::Rodrigues(Mat::eye(3, 3, CV_32F), r_vec_cpp); r_vec_c = cvMat(r_vec_cpp);
Mat A_cpp = camMat.clone(); A_c = A_cpp;
Mat distCoeffs_cpp = distCoeffs.clone(); distCoeffs_c = distCoeffs_cpp;
Mat A_cpp = camMat.clone(); A_c = cvMat(A_cpp);
Mat distCoeffs_cpp = distCoeffs.clone(); distCoeffs_c = cvMat(distCoeffs_cpp);
Mat dpdr_cpp(2*n, 3, CV_32F); dpdr_c = dpdr_cpp;
Mat dpdt_cpp(2*n, 3, CV_32F); dpdt_c = dpdt_cpp;
Mat dpdf_cpp(2*n, 2, CV_32F); dpdf_c = dpdf_cpp;
Mat dpdc_cpp(2*n, 2, CV_32F); dpdc_c = dpdc_cpp;
Mat dpdk_cpp(2*n, 4, CV_32F); dpdk_c = dpdk_cpp;
Mat dpdr_cpp(2*n, 3, CV_32F); dpdr_c = cvMat(dpdr_cpp);
Mat dpdt_cpp(2*n, 3, CV_32F); dpdt_c = cvMat(dpdt_cpp);
Mat dpdf_cpp(2*n, 2, CV_32F); dpdf_c = cvMat(dpdf_cpp);
Mat dpdc_cpp(2*n, 2, CV_32F); dpdc_c = cvMat(dpdc_cpp);
Mat dpdk_cpp(2*n, 4, CV_32F); dpdk_c = cvMat(dpdk_cpp);
caller.aspectRatio = 1.0;
caller.objectPoints = &objectPoints_c;
@ -561,9 +553,9 @@ protected:
errors += run_test_case( CV_StsBadArg, "Zero imagePoints", bad_caller );
/****************************/
Mat bad_r_vec_cpp1(r_vec_cpp.size(), CV_32S); CvMat bad_r_vec_c1 = bad_r_vec_cpp1;
Mat bad_r_vec_cpp2(2, 2, CV_32F); CvMat bad_r_vec_c2 = bad_r_vec_cpp2;
Mat bad_r_vec_cpp3(r_vec_cpp.size(), CV_32FC2); CvMat bad_r_vec_c3 = bad_r_vec_cpp3;
Mat bad_r_vec_cpp1(r_vec_cpp.size(), CV_32S); CvMat bad_r_vec_c1 = cvMat(bad_r_vec_cpp1);
Mat bad_r_vec_cpp2(2, 2, CV_32F); CvMat bad_r_vec_c2 = cvMat(bad_r_vec_cpp2);
Mat bad_r_vec_cpp3(r_vec_cpp.size(), CV_32FC2); CvMat bad_r_vec_c3 = cvMat(bad_r_vec_cpp3);
bad_caller = caller;
bad_caller.r_vec = &bad_r_vec_c1;
@ -578,9 +570,9 @@ protected:
errors += run_test_case( CV_StsBadArg, "Bad rvec format", bad_caller );
/****************************/
Mat bad_t_vec_cpp1(t_vec_cpp.size(), CV_32S); CvMat bad_t_vec_c1 = bad_t_vec_cpp1;
Mat bad_t_vec_cpp2(2, 2, CV_32F); CvMat bad_t_vec_c2 = bad_t_vec_cpp2;
Mat bad_t_vec_cpp3(1, 1, CV_32FC2); CvMat bad_t_vec_c3 = bad_t_vec_cpp3;
Mat bad_t_vec_cpp1(t_vec_cpp.size(), CV_32S); CvMat bad_t_vec_c1 = cvMat(bad_t_vec_cpp1);
Mat bad_t_vec_cpp2(2, 2, CV_32F); CvMat bad_t_vec_c2 = cvMat(bad_t_vec_cpp2);
Mat bad_t_vec_cpp3(1, 1, CV_32FC2); CvMat bad_t_vec_c3 = cvMat(bad_t_vec_cpp3);
bad_caller = caller;
bad_caller.t_vec = &bad_t_vec_c1;
@ -595,8 +587,8 @@ protected:
errors += run_test_case( CV_StsBadArg, "Bad tvec format", bad_caller );
/****************************/
Mat bad_A_cpp1(A_cpp.size(), CV_32S); CvMat bad_A_c1 = bad_A_cpp1;
Mat bad_A_cpp2(2, 2, CV_32F); CvMat bad_A_c2 = bad_A_cpp2;
Mat bad_A_cpp1(A_cpp.size(), CV_32S); CvMat bad_A_c1 = cvMat(bad_A_cpp1);
Mat bad_A_cpp2(2, 2, CV_32F); CvMat bad_A_c2 = cvMat(bad_A_cpp2);
bad_caller = caller;
bad_caller.A = &bad_A_c1;
@ -607,9 +599,9 @@ protected:
errors += run_test_case( CV_StsBadArg, "Bad A format", bad_caller );
/****************************/
Mat bad_distCoeffs_cpp1(distCoeffs_cpp.size(), CV_32S); CvMat bad_distCoeffs_c1 = bad_distCoeffs_cpp1;
Mat bad_distCoeffs_cpp2(2, 2, CV_32F); CvMat bad_distCoeffs_c2 = bad_distCoeffs_cpp2;
Mat bad_distCoeffs_cpp3(1, 7, CV_32F); CvMat bad_distCoeffs_c3 = bad_distCoeffs_cpp3;
Mat bad_distCoeffs_cpp1(distCoeffs_cpp.size(), CV_32S); CvMat bad_distCoeffs_c1 = cvMat(bad_distCoeffs_cpp1);
Mat bad_distCoeffs_cpp2(2, 2, CV_32F); CvMat bad_distCoeffs_c2 = cvMat(bad_distCoeffs_cpp2);
Mat bad_distCoeffs_cpp3(1, 7, CV_32F); CvMat bad_distCoeffs_c3 = cvMat(bad_distCoeffs_cpp3);
bad_caller = caller;
bad_caller.distCoeffs = &zeros;
@ -629,9 +621,9 @@ protected:
/****************************/
Mat bad_dpdr_cpp1(dpdr_cpp.size(), CV_32S); CvMat bad_dpdr_c1 = bad_dpdr_cpp1;
Mat bad_dpdr_cpp2(dpdr_cpp.cols+1, 3, CV_32F); CvMat bad_dpdr_c2 = bad_dpdr_cpp2;
Mat bad_dpdr_cpp3(dpdr_cpp.cols, 7, CV_32F); CvMat bad_dpdr_c3 = bad_dpdr_cpp3;
Mat bad_dpdr_cpp1(dpdr_cpp.size(), CV_32S); CvMat bad_dpdr_c1 = cvMat(bad_dpdr_cpp1);
Mat bad_dpdr_cpp2(dpdr_cpp.cols+1, 3, CV_32F); CvMat bad_dpdr_c2 = cvMat(bad_dpdr_cpp2);
Mat bad_dpdr_cpp3(dpdr_cpp.cols, 7, CV_32F); CvMat bad_dpdr_c3 = cvMat(bad_dpdr_cpp3);
bad_caller = caller;
bad_caller.dpdr = &zeros;
@ -669,7 +661,7 @@ protected:
/****************************/
Mat bad_dpdf_cpp2(dpdr_cpp.cols+1, 2, CV_32F); CvMat bad_dpdf_c2 = bad_dpdf_cpp2;
Mat bad_dpdf_cpp2(dpdr_cpp.cols+1, 2, CV_32F); CvMat bad_dpdf_c2 = cvMat(bad_dpdf_cpp2);
bad_caller = caller;
bad_caller.dpdf = &zeros;

View File

@ -78,9 +78,9 @@ protected:
findChessboardCorners(img, pattern_size, corners, flags);
else
if (!drawCorners)
cvFindChessboardCorners( &arr, pattern_size, out_corners, out_corner_count, flags );
cvFindChessboardCorners( &arr, cvSize(pattern_size), out_corners, out_corner_count, flags );
else
cvDrawChessboardCorners( &drawCorImg, pattern_size,
cvDrawChessboardCorners( &drawCorImg, cvSize(pattern_size),
(CvPoint2D32f*)(corners.empty() ? 0 : &corners[0]),
(int)corners.size(), was_found);
}
@ -128,14 +128,14 @@ void CV_ChessboardDetectorBadArgTest::run( int /*start_from */)
drawCorners = false;
img = cb.clone();
arr = img;
arr = cvMat(img);
out_corner_count = 0;
out_corners = 0;
errors += run_test_case( CV_StsNullPtr, "Null pointer to corners" );
drawCorners = true;
Mat cvdrawCornImg(img.size(), CV_8UC2);
drawCorImg = cvdrawCornImg;
drawCorImg = cvMat(cvdrawCornImg);
was_found = true;
errors += run_test_case( CV_StsUnsupportedFormat, "2 channel image" );

View File

@ -96,7 +96,7 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
{
int count0 = -1;
int count = 0;
CvSize pattern_size;
Size pattern_size;
int result, result1 = 0;
const char* imgname = cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4), "dummy.txt");
@ -110,7 +110,7 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
filename = cv::format("%s%s", filepath.c_str(), imgname );
cv::Mat img2 = cv::imread( filename );
img = img2;
img = cvIplImage(img2);
if( img2.empty() )
{
@ -135,11 +135,11 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
v = (CvPoint2D32f*)_v->data.fl;
int64 _time0 = cvGetTickCount();
result = cvCheckChessboard(gray, pattern_size);
result = cvCheckChessboard(gray, cvSize(pattern_size));
int64 _time01 = cvGetTickCount();
OPENCV_CALL( result1 = cvFindChessboardCorners(
gray, pattern_size, v, &count, 15 ));
gray, cvSize(pattern_size), v, &count, 15 ));
int64 _time1 = cvGetTickCount();
if( result != is_chessboard )

View File

@ -180,7 +180,7 @@ void CV_ChessboardSubpixelTest::run( int )
break;
}
IplImage chessboard_image_header = chessboard_image;
IplImage chessboard_image_header = cvIplImage(chessboard_image);
cvFindCornerSubPix(&chessboard_image_header, (CvPoint2D32f*)&test_corners[0],
(int)test_corners.size(), cvSize(3, 3), cvSize(1, 1), cvTermCriteria(CV_TERMCRIT_EPS|CV_TERMCRIT_ITER,300,0.1));
find4QuadCornerSubpix(chessboard_image, test_corners, Size(5, 5));

View File

@ -351,9 +351,9 @@ static int cvTsRodrigues( const CvMat* src, CvMat* dst, CvMat* jacobian )
{
CV_Assert(src.data != dst.data && "Inplace is not supported");
CV_Assert(!dst.empty() && "'dst' must be allocated");
CvMat _src = src, _dst = dst, _jac;
CvMat _src = cvMat(src), _dst = cvMat(dst), _jac;
if( jac )
_jac = *jac;
_jac = cvMat(*jac);
cvTsRodrigues(&_src, &_dst, jac ? &_jac : 0);
}
@ -667,13 +667,13 @@ void CV_RodriguesTest::run_func()
if( calc_jacobians )
{
v2m_jac = test_mat[OUTPUT][1];
m2v_jac = test_mat[OUTPUT][3];
v2m_jac = cvMat(test_mat[OUTPUT][1]);
m2v_jac = cvMat(test_mat[OUTPUT][3]);
}
if( !test_cpp )
{
CvMat _input = test_mat[INPUT][0], _output = test_mat[OUTPUT][0], _output2 = test_mat[OUTPUT][2];
CvMat _input = cvMat(test_mat[INPUT][0]), _output = cvMat(test_mat[OUTPUT][0]), _output2 = cvMat(test_mat[OUTPUT][2]);
cvRodrigues2( &_input, &_output, calc_jacobians ? &v2m_jac : 0 );
cvRodrigues2( &_output, &_output2, calc_jacobians ? &m2v_jac : 0 );
}
@ -980,8 +980,8 @@ int CV_FundamentalMatTest::prepare_test_case( int test_case_idx )
void CV_FundamentalMatTest::run_func()
{
// cvFindFundamentalMat calls cv::findFundamentalMat
CvMat _input0 = test_mat[INPUT][0], _input1 = test_mat[INPUT][1];
CvMat F = test_mat[TEMP][0], mask = test_mat[TEMP][1];
CvMat _input0 = cvMat(test_mat[INPUT][0]), _input1 = cvMat(test_mat[INPUT][1]);
CvMat F = cvMat(test_mat[TEMP][0]), mask = cvMat(test_mat[TEMP][1]);
f_result = cvFindFundamentalMat( &_input0, &_input1, &F, method, MAX(sigma*3, 0.01), 0, &mask );
}
@ -1543,7 +1543,7 @@ void CV_ConvertHomogeneousTest::fill_array( int /*test_case_idx*/, int /*i*/, in
void CV_ConvertHomogeneousTest::run_func()
{
CvMat _input = test_mat[INPUT][0], _output = test_mat[OUTPUT][0];
CvMat _input = cvMat(test_mat[INPUT][0]), _output = cvMat(test_mat[OUTPUT][0]);
cvConvertPointsHomogeneous( &_input, &_output );
}
@ -1678,7 +1678,7 @@ void CV_ComputeEpilinesTest::fill_array( int test_case_idx, int i, int j, Mat& a
void CV_ComputeEpilinesTest::run_func()
{
CvMat _points = test_mat[INPUT][0], _F = test_mat[INPUT][1], _lines = test_mat[OUTPUT][0];
CvMat _points = cvMat(test_mat[INPUT][0]), _F = cvMat(test_mat[INPUT][1]), _lines = cvMat(test_mat[OUTPUT][0]);
cvComputeCorrespondEpilines( &_points, which_image, &_F, &_lines );
}

View File

@ -124,7 +124,7 @@ protected:
Mat_<out3d_t> _3dImg(disp.size());
CvMat cvdisp = disp; CvMat cv_3dImg = _3dImg; CvMat cvQ = Q;
CvMat cvdisp = cvMat(disp); CvMat cv_3dImg = cvMat(_3dImg); CvMat cvQ = cvMat(Q);
cvReprojectImageTo3D( &cvdisp, &cv_3dImg, &cvQ, handleMissingValues );
if (std::numeric_limits<OutT>::max() == std::numeric_limits<float>::max())

View File

@ -410,7 +410,7 @@ void CV_UndistortPointsTest::prepare_to_validation(int /*test_case_idx*/)
{
if (useDstMat)
{
CvMat temp = dst_points_mat;
CvMat temp = cvMat(dst_points_mat);
for (int i=0;i<N_POINTS*2;i++)
{
points[i] = temp.data.fl[i];
@ -469,14 +469,14 @@ void CV_UndistortPointsTest::run_func()
}
else
{
CvMat _input0 = test_mat[INPUT][0], _input1 = test_mat[INPUT][1], _input2, _input3, _input4;
CvMat _output = test_mat[TEMP][0];
CvMat _input0 = cvMat(test_mat[INPUT][0]), _input1 = cvMat(test_mat[INPUT][1]), _input2, _input3, _input4;
CvMat _output = cvMat(test_mat[TEMP][0]);
if(!zero_distortion)
_input2 = test_mat[INPUT][2];
_input2 = cvMat(test_mat[INPUT][2]);
if(!zero_R)
_input3 = test_mat[INPUT][3];
_input3 = cvMat(test_mat[INPUT][3]);
if(!zero_new_cam)
_input4 = test_mat[INPUT][4];
_input4 = cvMat(test_mat[INPUT][4]);
cvUndistortPoints(&_input0, &_output, &_input1,
zero_distortion ? 0 : &_input2,
zero_R ? 0 : &_input3,
@ -853,10 +853,10 @@ void CV_InitUndistortRectifyMapTest::prepare_to_validation(int/* test_case_idx*/
CvMat _new_cam = cvMat(test_mat[INPUT][4].rows,test_mat[INPUT][4].cols,CV_64F,new_cam);
CvMat _points= cvMat(test_mat[INPUT][0].rows,test_mat[INPUT][0].cols,CV_64FC2,points);
CvMat _input1 = test_mat[INPUT][1];
CvMat _input2 = test_mat[INPUT][2];
CvMat _input3 = test_mat[INPUT][3];
CvMat _input4 = test_mat[INPUT][4];
CvMat _input1 = cvMat(test_mat[INPUT][1]);
CvMat _input2 = cvMat(test_mat[INPUT][2]);
CvMat _input3 = cvMat(test_mat[INPUT][3]);
CvMat _input4 = cvMat(test_mat[INPUT][4]);
cvtest::convert(cvarrToMat(&_input1), cvarrToMat(&_camera), -1);
cvtest::convert(cvarrToMat(&_input2), cvarrToMat(&_distort), -1);
@ -871,8 +871,8 @@ void CV_InitUndistortRectifyMapTest::prepare_to_validation(int/* test_case_idx*/
}
cv::Mat map1,map2;
cv::convertMaps(mapx,mapy,map1,map2,CV_32FC1);
CvMat _map1 = map1;
CvMat _map2 = map2;
CvMat _map1 = cvMat(map1);
CvMat _map2 = cvMat(map2);
for (int i=0;i<N_POINTS;i++)
{
double u = test_mat[INPUT][0].ptr<double>()[2*i];
@ -886,7 +886,7 @@ void CV_InitUndistortRectifyMapTest::prepare_to_validation(int/* test_case_idx*/
cvUndistortPoints(&_points,&ref_points,&_camera,
zero_distortion ? 0 : &_distort, zero_R ? 0 : &_rot, zero_new_cam ? &_camera : &_new_cam);
//cvTsDistortPoints(&_points,&ref_points,&_camera,&_distort,&_rot,&_new_cam);
CvMat dst = test_mat[REF_OUTPUT][0];
CvMat dst = cvMat(test_mat[REF_OUTPUT][0]);
cvtest::convert(cvarrToMat(&ref_points), cvarrToMat(&dst), -1);
cvtest::copy(test_mat[INPUT][0],test_mat[OUTPUT][0]);
@ -912,13 +912,13 @@ void CV_InitUndistortRectifyMapTest::run_func()
}
else
{
CvMat input1 = test_mat[INPUT][1], input2, input3, input4;
CvMat input1 = cvMat(test_mat[INPUT][1]), input2, input3, input4;
if( !zero_distortion )
input2 = test_mat[INPUT][2];
input2 = cvMat(test_mat[INPUT][2]);
if( !zero_R )
input3 = test_mat[INPUT][3];
input3 = cvMat(test_mat[INPUT][3]);
if( !zero_new_cam )
input4 = test_mat[INPUT][4];
input4 = cvMat(test_mat[INPUT][4]);
cvInitUndistortRectifyMap(&input1,
zero_distortion ? 0 : &input2,
zero_R ? 0 : &input3,

View File

@ -3064,7 +3064,7 @@ template<typename _Tp> inline void Seq<_Tp>::copyTo(std::vector<_Tp>& vec, const
size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start;
vec.resize(len);
if( seq && len )
cvCvtSeqToArray(seq, &vec[0], range);
cvCvtSeqToArray(seq, &vec[0], cvSlice(range));
}
template<typename _Tp> inline Seq<_Tp>::operator std::vector<_Tp>() const

View File

@ -219,15 +219,10 @@ enum CpuFeatures {
typedef union Cv16suf
{
short i;
ushort u;
#if CV_FP16_TYPE
__fp16 h;
#endif
struct _fp16Format
{
unsigned int significand : 10;
unsigned int exponent : 5;
unsigned int sign : 1;
} fmt;
}
Cv16suf;
@ -236,12 +231,6 @@ typedef union Cv32suf
int i;
unsigned u;
float f;
struct _fp32Format
{
unsigned int significand : 23;
unsigned int exponent : 8;
unsigned int sign : 1;
} fmt;
}
Cv32suf;
@ -515,6 +504,115 @@ typedef ::uint64_t uint64_t;
#include <stdint.h>
#endif
#ifdef __cplusplus
namespace cv
{
class float16_t
{
public:
#if CV_FP16_TYPE
float16_t() {}
explicit float16_t(float x) { h = (__fp16)x; }
operator float() const { return (float)h; }
static float16_t fromBits(ushort w)
{
Cv16suf u;
u.u = w;
float16_t result;
result.h = u.h;
return result;
}
static float16_t zero()
{
float16_t result;
result.h = (__fp16)0;
return result;
}
ushort bits() const
{
Cv16suf u;
u.h = h;
return u.u;
}
protected:
__fp16 h;
#else
float16_t() {}
explicit float16_t(float x)
{
#if CV_AVX2
__m128 v = _mm_load_ss(&x);
w = (ushort)_mm_cvtsi128_si32(_mm_cvtps_ph(v, 0));
#else
Cv32suf in;
in.f = x;
unsigned sign = in.u & 0x80000000;
in.u ^= sign;
if( in.u >= 0x47800000 )
w = (ushort)(in.u > 0x7f800000 ? 0x7e00 : 0x7c00);
else
{
if (in.u < 0x38800000)
{
in.f += 0.5f;
w = (ushort)(in.u - 0x3f000000);
}
else
{
unsigned t = in.u + 0xc8000fff;
w = (ushort)((t + ((in.u >> 13) & 1)) >> 13);
}
}
w = (ushort)(w | (sign >> 16));
#endif
}
operator float() const
{
#if CV_AVX2
float f;
_mm_store_ss(&f, _mm_cvtph_ps(_mm_cvtsi32_si128(w)));
return f;
#else
Cv32suf out;
unsigned t = ((w & 0x7fff) << 13) + 0x38000000;
unsigned sign = (w & 0x8000) << 16;
unsigned e = w & 0x7c00;
out.u = t + (1 << 23);
out.u = (e >= 0x7c00 ? t + 0x38000000 :
e == 0 ? (out.f -= 6.103515625e-05f, out.u) : t) | sign;
return out.f;
#endif
}
static float16_t fromBits(ushort b)
{
float16_t result;
result.w = b;
return result;
}
static float16_t zero()
{
float16_t result;
result.w = (ushort)0;
return result;
}
ushort bits() const { return w; }
protected:
ushort w;
#endif
};
}
#endif
//! @}

View File

@ -252,7 +252,8 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
CV_INTRIN_DEFINE_WIDE_LOAD_EXPAND(unsigned, v_uint64, prefix) \
CV_INTRIN_DEFINE_WIDE_INTRIN(float, v_float32, f32, prefix, load) \
CV_INTRIN_DEFINE_WIDE_INTRIN(int64, v_int64, s64, prefix, load) \
CV_INTRIN_DEFINE_WIDE_INTRIN(uint64, v_uint64, u64, prefix, load)
CV_INTRIN_DEFINE_WIDE_INTRIN(uint64, v_uint64, u64, prefix, load) \
CV_INTRIN_DEFINE_WIDE_LOAD_EXPAND(float16_t, v_float32, prefix)
template<typename _Tp> struct V_RegTraits
{
@ -286,9 +287,6 @@ template<typename _Tp> struct V_RegTraits
#if CV_SIMD128_64F
CV_DEF_REG_TRAITS(v, v_float64x2, double, f64, v_float64x2, void, void, v_int64x2, v_int32x4);
#endif
#if CV_SIMD128_FP16
CV_DEF_REG_TRAITS(v, v_float16x8, short, f16, v_float16x8, void, void, v_int16x8, v_int16x8);
#endif
#endif
#if CV_SIMD256
@ -302,9 +300,6 @@ template<typename _Tp> struct V_RegTraits
CV_DEF_REG_TRAITS(v256, v_uint64x4, uint64, u64, v_uint64x4, void, void, v_int64x4, void);
CV_DEF_REG_TRAITS(v256, v_int64x4, int64, s64, v_uint64x4, void, void, v_int64x4, void);
CV_DEF_REG_TRAITS(v256, v_float64x4, double, f64, v_float64x4, void, void, v_int64x4, v_int32x8);
#if CV_SIMD256_FP16
CV_DEF_REG_TRAITS(v256, v_float16x16, short, f16, v_float16x16, void, void, v_int16x16, void);
#endif
#endif
#if CV_SIMD512 && (!defined(CV__SIMD_FORCE_WIDTH) || CV__SIMD_FORCE_WIDTH == 512)
@ -335,14 +330,6 @@ namespace CV__SIMD_NAMESPACE {
#if CV_SIMD256_64F
typedef v_float64x4 v_float64;
#endif
#if CV_FP16
#define vx_load_fp16_f32 v256_load_fp16_f32
#define vx_store_fp16 v_store_fp16
#endif
#if CV_SIMD256_FP16
typedef v_float16x16 v_float16;
CV_INTRIN_DEFINE_WIDE_INTRIN(short, v_float16, f16, v256, load_f16)
#endif
CV_INTRIN_DEFINE_WIDE_INTRIN_ALL_TYPES(v256)
CV_INTRIN_DEFINE_WIDE_INTRIN(double, v_float64, f64, v256, load)
inline void vx_cleanup() { v256_cleanup(); }
@ -353,7 +340,6 @@ using namespace CV__SIMD_NAMESPACE;
namespace CV__SIMD_NAMESPACE {
#define CV_SIMD CV_SIMD128
#define CV_SIMD_64F CV_SIMD128_64F
#define CV_SIMD_FP16 CV_SIMD128_FP16
#define CV_SIMD_WIDTH 16
typedef v_uint8x16 v_uint8;
typedef v_int8x16 v_int8;
@ -367,14 +353,6 @@ namespace CV__SIMD_NAMESPACE {
#if CV_SIMD128_64F
typedef v_float64x2 v_float64;
#endif
#if CV_FP16
#define vx_load_fp16_f32 v128_load_fp16_f32
#define vx_store_fp16 v_store_fp16
#endif
#if CV_SIMD128_FP16
typedef v_float16x8 v_float16;
CV_INTRIN_DEFINE_WIDE_INTRIN(short, v_float16, f16, v, load_f16)
#endif
CV_INTRIN_DEFINE_WIDE_INTRIN_ALL_TYPES(v)
#if CV_SIMD128_64F
CV_INTRIN_DEFINE_WIDE_INTRIN(double, v_float64, f64, v, load)

View File

@ -234,7 +234,15 @@ struct v_uint64x4
{ val = _mm256_setr_epi64x((int64)v0, (int64)v1, (int64)v2, (int64)v3); }
v_uint64x4() : val(_mm256_setzero_si256()) {}
uint64 get0() const
{ return (uint64)_mm_cvtsi128_si64(_mm256_castsi256_si128(val)); }
{
#if defined __x86_64__ || defined _M_X64
return (uint64)_mm_cvtsi128_si64(_mm256_castsi256_si128(val));
#else
int a = _mm_cvtsi128_si32(_mm256_castsi256_si128(val));
int b = _mm_cvtsi128_si32(_mm256_castsi256_si128(_mm256_srli_epi64(val, 32)));
return (unsigned)a | ((uint64)(unsigned)b << 32);
#endif
}
};
struct v_int64x4
@ -247,7 +255,17 @@ struct v_int64x4
v_int64x4(int64 v0, int64 v1, int64 v2, int64 v3)
{ val = _mm256_setr_epi64x(v0, v1, v2, v3); }
v_int64x4() : val(_mm256_setzero_si256()) {}
int64 get0() const { return (int64)_mm_cvtsi128_si64(_mm256_castsi256_si128(val)); }
int64 get0() const
{
#if defined __x86_64__ || defined _M_X64
return (int64)_mm_cvtsi128_si64(_mm256_castsi256_si128(val));
#else
int a = _mm_cvtsi128_si32(_mm256_castsi256_si128(val));
int b = _mm_cvtsi128_si32(_mm256_castsi256_si128(_mm256_srli_epi64(val, 32)));
return (int64)((unsigned)a | ((uint64)(unsigned)b << 32));
#endif
}
};
struct v_float64x4
@ -1396,10 +1414,17 @@ inline v_int8x32 v_pack(const v_int16x16& a, const v_int16x16& b)
{ return v_int8x32(_v256_shuffle_odd_64(_mm256_packs_epi16(a.val, b.val))); }
inline v_uint8x32 v_pack(const v_uint16x16& a, const v_uint16x16& b)
{ return v_uint8x32(_v256_shuffle_odd_64(_mm256_packus_epi16(a.val, b.val))); }
{
__m256i t = _mm256_set1_epi16(255);
__m256i a1 = _mm256_min_epu16(a.val, t);
__m256i b1 = _mm256_min_epu16(b.val, t);
return v_uint8x32(_v256_shuffle_odd_64(_mm256_packus_epi16(a1, b1)));
}
inline v_uint8x32 v_pack_u(const v_int16x16& a, const v_int16x16& b)
{ return v_pack(v_reinterpret_as_u16(a), v_reinterpret_as_u16(b)); }
{
return v_uint8x32(_v256_shuffle_odd_64(_mm256_packus_epi16(a.val, b.val)));
}
inline void v_pack_store(schar* ptr, const v_int16x16& a)
{ v_store_low(ptr, v_pack(a, a)); }
@ -2372,6 +2397,18 @@ OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float32x8, float, f32, v_uint32x8, un
OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int64x4, int64, s64, v_uint64x4, uint64, u64)
OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float64x4, double, f64, v_uint64x4, uint64, u64)
// FP16
inline v_float32x8 v256_load_expand(const float16_t* ptr)
{
return v_float32x8(_mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr)));
}
inline void v_pack_store(float16_t* ptr, const v_float32x8& a)
{
__m128i ah = _mm256_cvtps_ph(a.val, 0);
_mm_storeu_si128((__m128i*)ptr, ah);
}
inline void v256_cleanup() { _mm256_zeroupper(); }
//! @name Check SIMD256 support

View File

@ -2062,6 +2062,28 @@ inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + m3.s[3]);
}
////// FP16 suport ///////
inline v_reg<float, V_TypeTraits<float>::nlanes128>
v_load_expand(const float16_t* ptr)
{
v_reg<float, V_TypeTraits<float>::nlanes128> v;
for( int i = 0; i < v.nlanes; i++ )
{
v.s[i] = ptr[i];
}
return v;
}
inline void
v_pack_store(float16_t* ptr, v_reg<float, V_TypeTraits<float>::nlanes128>& v)
{
for( int i = 0; i < v.nlanes; i++ )
{
ptr[i] = float16_t(v.s[i]);
}
}
inline void v_cleanup() {}
//! @}

View File

@ -62,15 +62,6 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
#define CV_SIMD128_64F 0
#endif
#ifndef CV_SIMD128_FP16
# if CV_FP16 && (defined(__GNUC__) && __GNUC__ >= 5) // #12027: float16x8_t is missing in GCC 4.8.2
# define CV_SIMD128_FP16 1
# endif
#endif
#ifndef CV_SIMD128_FP16
# define CV_SIMD128_FP16 0
#endif
#if CV_SIMD128_64F
#define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) \
template <typename T> static inline \
@ -329,53 +320,6 @@ inline void v_store_fp16(short* ptr, const v_float32x4& a)
}
#endif
#if CV_SIMD128_FP16
// Workaround for old compilers
static inline int16x8_t vreinterpretq_s16_f16(float16x8_t a) { return (int16x8_t)a; }
static inline float16x8_t vreinterpretq_f16_s16(int16x8_t a) { return (float16x8_t)a; }
static inline float16x8_t cv_vld1q_f16(const void* ptr)
{
#ifndef vld1q_f16 // APPLE compiler defines vld1_f16 as macro
return vreinterpretq_f16_s16(vld1q_s16((const short*)ptr));
#else
return vld1q_f16((const __fp16*)ptr);
#endif
}
static inline void cv_vst1q_f16(void* ptr, float16x8_t a)
{
#ifndef vst1q_f16 // APPLE compiler defines vst1_f16 as macro
vst1q_s16((short*)ptr, vreinterpretq_s16_f16(a));
#else
vst1q_f16((__fp16*)ptr, a);
#endif
}
struct v_float16x8
{
typedef short lane_type;
enum { nlanes = 8 };
v_float16x8() {}
explicit v_float16x8(float16x8_t v) : val(v) {}
v_float16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
{
short v[] = {v0, v1, v2, v3, v4, v5, v6, v7};
val = cv_vld1q_f16(v);
}
short get0() const
{
return vgetq_lane_s16(vreinterpretq_s16_f16(val), 0);
}
float16x8_t val;
};
inline v_float16x8 v_setzero_f16() { return v_float16x8(vreinterpretq_f16_s16(vdupq_n_s16((short)0))); }
inline v_float16x8 v_setall_f16(short v) { return v_float16x8(vreinterpretq_f16_s16(vdupq_n_s16(v))); }
#endif // CV_SIMD128_FP16
#define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \
inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \
inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(vdupq_n_##suffix(v)); } \
@ -934,24 +878,6 @@ OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32)
OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64)
#endif
#if CV_SIMD128_FP16
// Workaround for old comiplers
inline v_float16x8 v_load_f16(const short* ptr)
{ return v_float16x8(cv_vld1q_f16(ptr)); }
inline v_float16x8 v_load_f16_aligned(const short* ptr)
{ return v_float16x8(cv_vld1q_f16(ptr)); }
inline v_float16x8 v_load_f16_low(const short* ptr)
{ return v_float16x8(vcombine_f16(cv_vld1_f16(ptr), vdup_n_f16((float16_t)0))); }
inline v_float16x8 v_load_f16_halves(const short* ptr0, const short* ptr1)
{ return v_float16x8(vcombine_f16(cv_vld1_f16(ptr0), cv_vld1_f16(ptr1))); }
inline void v_store(short* ptr, const v_float16x8& a)
{ cv_vst1q_f16(ptr, a.val); }
inline void v_store_aligned(short* ptr, const v_float16x8& a)
{ cv_vst1q_f16(ptr, a.val); }
#endif
#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
inline scalartype v_reduce_##func(const _Tpvec& a) \
{ \
@ -1507,22 +1433,6 @@ inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
}
#endif
#if CV_SIMD128_FP16
inline v_float32x4 v_cvt_f32(const v_float16x8& a)
{
return v_float32x4(vcvt_f32_f16(vget_low_f16(a.val)));
}
inline v_float32x4 v_cvt_f32_high(const v_float16x8& a)
{
return v_float32x4(vcvt_f32_f16(vget_high_f16(a.val)));
}
inline v_float16x8 v_cvt_f16(const v_float32x4& a, const v_float32x4& b)
{
return v_float16x8(vcombine_f16(vcvt_f16_f32(a.val), vcvt_f16_f32(b.val)));
}
#endif
////////////// Lookup table access ////////////////////
inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec)
@ -1588,6 +1498,47 @@ inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_flo
}
#endif
////// FP16 suport ///////
#if CV_FP16
inline v_float32x4 v_load_expand(const float16_t* ptr)
{
float16x4_t v =
#ifndef vld1_f16 // APPLE compiler defines vld1_f16 as macro
(float16x4_t)vld1_s16((const short*)ptr);
#else
vld1_f16((const __fp16*)ptr);
#endif
return v_float32x4(vcvt_f32_f16(v));
}
inline void v_pack_store(float16_t* ptr, const v_float32x4& v)
{
float16x4_t hv = vcvt_f16_f32(v.val);
#ifndef vst1_f16 // APPLE compiler defines vst1_f16 as macro
vst1_s16((short*)ptr, (int16x4_t)hv);
#else
vst1_f16((__fp16*)ptr, hv);
#endif
}
#else
inline v_float32x4 v_load_expand(const float16_t* ptr)
{
const int N = 4;
float buf[N];
for( int i = 0; i < N; i++ ) buf[i] = (float)ptr[i];
return v_load(buf);
}
inline void v_pack_store(float16_t* ptr, const v_float32x4& v)
{
const int N = 4;
float buf[N];
v_store(buf, v);
for( int i = 0; i < N; i++ ) ptr[i] = float16_t(buf[i]);
}
#endif
inline void v_cleanup() {}
//! @name Check SIMD support

View File

@ -404,7 +404,7 @@ void v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a)
inline v_int8x16 v_pack(const v_int16x8& a, const v_int16x8& b)
{ return v_int8x16(_mm_packs_epi16(a.val, b.val)); }
inline void v_pack_store(schar* ptr, v_int16x8& a)
inline void v_pack_store(schar* ptr, const v_int16x8& a)
{ _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi16(a.val, a.val)); }
template<int n> inline
@ -2655,6 +2655,50 @@ inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_flo
y = v_float64x2(_mm_unpackhi_pd(xy0, xy1));
}
////////////// FP16 support ///////////////////////////
inline v_float32x4 v_load_expand(const float16_t* ptr)
{
const __m128i z = _mm_setzero_si128(), delta = _mm_set1_epi32(0x38000000);
const __m128i signmask = _mm_set1_epi32(0x80000000), maxexp = _mm_set1_epi32(0x7c000000);
const __m128 deltaf = _mm_castsi128_ps(_mm_set1_epi32(0x38800000));
__m128i bits = _mm_unpacklo_epi16(z, _mm_loadl_epi64((const __m128i*)ptr)); // h << 16
__m128i e = _mm_and_si128(bits, maxexp), sign = _mm_and_si128(bits, signmask);
__m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_xor_si128(bits, sign), 3), delta); // ((h & 0x7fff) << 13) + delta
__m128i zt = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_add_epi32(t, _mm_set1_epi32(1 << 23))), deltaf));
t = _mm_add_epi32(t, _mm_and_si128(delta, _mm_cmpeq_epi32(maxexp, e)));
__m128i zmask = _mm_cmpeq_epi32(e, z);
__m128i ft = v_select_si128(zmask, zt, t);
return v_float32x4(_mm_castsi128_ps(_mm_or_si128(ft, sign)));
}
inline void v_pack_store(float16_t* ptr, const v_float32x4& v)
{
const __m128i signmask = _mm_set1_epi32(0x80000000);
const __m128i rval = _mm_set1_epi32(0x3f000000);
__m128i t = _mm_castps_si128(v.val);
__m128i sign = _mm_srai_epi32(_mm_and_si128(t, signmask), 16);
t = _mm_andnot_si128(signmask, t);
__m128i finitemask = _mm_cmpgt_epi32(_mm_set1_epi32(0x47800000), t);
__m128i isnan = _mm_cmpgt_epi32(t, _mm_set1_epi32(0x7f800000));
__m128i naninf = v_select_si128(isnan, _mm_set1_epi32(0x7e00), _mm_set1_epi32(0x7c00));
__m128i tinymask = _mm_cmpgt_epi32(_mm_set1_epi32(0x38800000), t);
__m128i tt = _mm_castps_si128(_mm_add_ps(_mm_castsi128_ps(t), _mm_castsi128_ps(rval)));
tt = _mm_sub_epi32(tt, rval);
__m128i odd = _mm_and_si128(_mm_srli_epi32(t, 13), _mm_set1_epi32(1));
__m128i nt = _mm_add_epi32(t, _mm_set1_epi32(0xc8000fff));
nt = _mm_srli_epi32(_mm_add_epi32(nt, odd), 13);
t = v_select_si128(tinymask, tt, nt);
t = v_select_si128(finitemask, t, naninf);
t = _mm_or_si128(t, sign);
t = _mm_packs_epi32(t, t);
_mm_storel_epi64((__m128i*)ptr, t);
}
inline void v_cleanup() {}
//! @name Check SIMD support

View File

@ -916,6 +916,24 @@ inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_flo
y = v_float64x2(tab[idx[0]+1], tab[idx[1]+1]);
}
/////// FP16 support ////////
// [TODO] implement these 2 using VSX or universal intrinsics (copy from intrin_sse.cpp and adopt)
inline v_float32x4 v_load_expand(const float16_t* ptr)
{
return v_float32x4((float)ptr[0], (float)ptr[1], (float)ptr[2], (float)ptr[3]);
}
inline void v_pack_store(float16_t* ptr, const v_float32x4& v)
{
float CV_DECL_ALIGNED(32) f[4];
v_store_aligned(f, v);
ptr[0] = float16_t(f[0]);
ptr[1] = float16_t(f[1]);
ptr[2] = float16_t(f[2]);
ptr[3] = float16_t(f[3]);
}
inline void v_cleanup() {}

View File

@ -44,6 +44,29 @@
#ifndef OPENCV_CORE_TYPES_H
#define OPENCV_CORE_TYPES_H
#if !defined(__OPENCV_BUILD) && !defined(CV__DISABLE_C_API_CTORS)
#define CV__ENABLE_C_API_CTORS // enable C API ctors (must be removed)
#endif
//#define CV__VALIDATE_UNUNITIALIZED_VARS 1 // C++11 & GCC only
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#define CV_STRUCT_INITIALIZER {0,}
#else
#if defined(__GNUC__) && __GNUC__ == 4 // GCC 4.x warns on "= {}" initialization, fixed in GCC 5.0
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#endif
#define CV_STRUCT_INITIALIZER {}
#endif
#else
#define CV_STRUCT_INITIALIZER {0}
#endif
#ifdef HAVE_IPL
# ifndef __IPL_H__
# if defined _WIN32
@ -285,6 +308,11 @@ CV_INLINE double cvRandReal( CvRNG* rng )
#define IPL_BORDER_REFLECT 2
#define IPL_BORDER_WRAP 3
#ifdef __cplusplus
typedef struct _IplImage IplImage;
CV_EXPORTS _IplImage cvIplImage(const cv::Mat& m);
#endif
/** The IplImage is taken from the Intel Image Processing Library, in which the format is native. OpenCV
only supports a subset of possible IplImage formats, as outlined in the parameter list above.
@ -294,9 +322,6 @@ hand, the Intel Image Processing Library processes the area of intersection betw
destination images (or ROIs), allowing them to vary independently.
*/
typedef struct
#ifdef __cplusplus
CV_EXPORTS
#endif
_IplImage
{
int nSize; /**< sizeof(IplImage) */
@ -330,13 +355,22 @@ _IplImage
(not necessarily aligned) -
needed for correct deallocation */
#ifdef __cplusplus
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
_IplImage() {}
_IplImage(const cv::Mat& m);
_IplImage(const cv::Mat& m) { *this = cvIplImage(m); }
#endif
}
IplImage;
CV_INLINE IplImage cvIplImage()
{
#if !defined(CV__ENABLE_C_API_CTORS)
IplImage self = CV_STRUCT_INITIALIZER; self.nSize = sizeof(IplImage); return self;
#else
return _IplImage();
#endif
}
typedef struct _IplTileInfo IplTileInfo;
typedef struct _IplROI
@ -460,13 +494,10 @@ typedef struct CvMat
int cols;
#endif
#ifdef __cplusplus
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvMat() {}
CvMat(const CvMat& m) { memcpy(this, &m, sizeof(CvMat));}
CvMat(const cv::Mat& m);
CvMat(const cv::Mat& m) { *this = cvMat(m); }
#endif
}
CvMat;
@ -529,15 +560,8 @@ CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)
}
#ifdef __cplusplus
inline CvMat::CvMat(const cv::Mat& m)
{
CV_DbgAssert(m.dims <= 2);
*this = cvMat(m.rows, m.dims == 1 ? 1 : m.cols, m.type(), m.data);
step = (int)m.step[0];
type = (type & ~cv::Mat::CONTINUOUS_FLAG) | (m.flags & cv::Mat::CONTINUOUS_FLAG);
}
inline CvMat cvMat(const cv::Mat& m)
CV_INLINE CvMat cvMat(const cv::Mat& m)
{
CvMat self;
CV_DbgAssert(m.dims <= 2);
@ -546,7 +570,24 @@ inline CvMat cvMat(const cv::Mat& m)
self.type = (self.type & ~cv::Mat::CONTINUOUS_FLAG) | (m.flags & cv::Mat::CONTINUOUS_FLAG);
return self;
}
CV_INLINE CvMat cvMat()
{
#if !defined(CV__ENABLE_C_API_CTORS)
CvMat self = CV_STRUCT_INITIALIZER; return self;
#else
return CvMat();
#endif
}
CV_INLINE CvMat cvMat(const CvMat& m)
{
#if !defined(CV__ENABLE_C_API_CTORS)
CvMat self = CV_STRUCT_INITIALIZER; memcpy(&self, &m, sizeof(self)); return self;
#else
return CvMat(m);
#endif
}
#endif // __cplusplus
#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \
@ -630,13 +671,15 @@ CV_INLINE int cvIplDepth( int type )
#define CV_MAX_DIM 32
#ifdef __cplusplus
typedef struct CvMatND CvMatND;
CV_EXPORTS CvMatND cvMatND(const cv::Mat& m);
#endif
/**
@deprecated consider using cv::Mat instead
*/
typedef struct
#ifdef __cplusplus
CV_EXPORTS
#endif
CvMatND
{
int type;
@ -661,13 +704,23 @@ CvMatND
}
dim[CV_MAX_DIM];
#ifdef __cplusplus
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvMatND() {}
CvMatND(const cv::Mat& m);
CvMatND(const cv::Mat& m) { *this = cvMatND(m); }
#endif
}
CvMatND;
CV_INLINE CvMatND cvMatND()
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvMatND self = CV_STRUCT_INITIALIZER; return self;
#else
return CvMatND();
#endif
}
#define CV_IS_MATND_HDR(mat) \
((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL)
@ -684,11 +737,7 @@ CvMatND;
struct CvSet;
typedef struct
#ifdef __cplusplus
CV_EXPORTS
#endif
CvSparseMat
typedef struct CvSparseMat
{
int type;
int dims;
@ -703,13 +752,13 @@ CvSparseMat
int size[CV_MAX_DIM];
#ifdef __cplusplus
void copyToSparseMat(cv::SparseMat& m) const;
CV_EXPORTS void copyToSparseMat(cv::SparseMat& m) const;
#endif
}
CvSparseMat;
#ifdef __cplusplus
CV_EXPORTS CvSparseMat* cvCreateSparseMat(const cv::SparseMat& m);
CV_EXPORTS CvSparseMat* cvCreateSparseMat(const cv::SparseMat& m);
#endif
#define CV_IS_SPARSE_MAT_HDR(mat) \
@ -796,10 +845,23 @@ typedef struct CvRect
int width;
int height;
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvRect() __attribute__(( warning("Non-initialized variable") )) {};
template<typename _Tp> CvRect(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 4);
x = y = width = height = 0;
if (list.size() == 4)
{
x = list.begin()[0]; y = list.begin()[1]; width = list.begin()[2]; height = list.begin()[3];
}
};
#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvRect(int _x = 0, int _y = 0, int w = 0, int h = 0): x(_x), y(_y), width(w), height(h) {}
template<typename _Tp>
CvRect(const cv::Rect_<_Tp>& r): x(cv::saturate_cast<int>(r.x)), y(cv::saturate_cast<int>(r.y)), width(cv::saturate_cast<int>(r.width)), height(cv::saturate_cast<int>(r.height)) {}
#endif
#ifdef __cplusplus
template<typename _Tp>
operator cv::Rect_<_Tp>() const { return cv::Rect_<_Tp>((_Tp)x, (_Tp)y, (_Tp)width, (_Tp)height); }
#endif
@ -809,16 +871,16 @@ CvRect;
/** constructs CvRect structure. */
CV_INLINE CvRect cvRect( int x, int y, int width, int height )
{
CvRect r;
r.x = x;
r.y = y;
r.width = width;
r.height = height;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvRect r = {x, y, width, height};
#else
CvRect r(x, y , width, height);
#endif
return r;
}
#ifdef __cplusplus
CV_INLINE CvRect cvRect(const cv::Rect& rc) { return cvRect(rc.x, rc.y, rc.width, rc.height); }
#endif
CV_INLINE IplROI cvRectToROI( CvRect rect, int coi )
{
@ -853,26 +915,28 @@ typedef struct CvTermCriteria
CV_TERMCRIT_EPS */
int max_iter;
double epsilon;
#ifdef __cplusplus
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvTermCriteria(int _type = 0, int _iter = 0, double _eps = 0) : type(_type), max_iter(_iter), epsilon(_eps) {}
CvTermCriteria(const cv::TermCriteria& t) : type(t.type), max_iter(t.maxCount), epsilon(t.epsilon) {}
#endif
#ifdef __cplusplus
operator cv::TermCriteria() const { return cv::TermCriteria(type, max_iter, epsilon); }
#endif
}
CvTermCriteria;
CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon )
{
CvTermCriteria t;
t.type = type;
t.max_iter = max_iter;
t.epsilon = (float)epsilon;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvTermCriteria t = { type, max_iter, (float)epsilon};
#else
CvTermCriteria t(type, max_iter, epsilon);
#endif
return t;
}
#ifdef __cplusplus
CV_INLINE CvTermCriteria cvTermCriteria(const cv::TermCriteria& t) { return cvTermCriteria(t.type, t.maxCount, t.epsilon); }
#endif
/******************************* CvPoint and variants ***********************************/
@ -882,10 +946,23 @@ typedef struct CvPoint
int x;
int y;
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvPoint() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvPoint(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 2);
x = y = 0;
if (list.size() == 2)
{
x = list.begin()[0]; y = list.begin()[1];
}
};
#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvPoint(int _x = 0, int _y = 0): x(_x), y(_y) {}
template<typename _Tp>
CvPoint(const cv::Point_<_Tp>& pt): x((int)pt.x), y((int)pt.y) {}
#endif
#ifdef __cplusplus
template<typename _Tp>
operator cv::Point_<_Tp>() const { return cv::Point_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y)); }
#endif
@ -895,24 +972,39 @@ CvPoint;
/** constructs CvPoint structure. */
CV_INLINE CvPoint cvPoint( int x, int y )
{
CvPoint p;
p.x = x;
p.y = y;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvPoint p = {x, y};
#else
CvPoint p(x, y);
#endif
return p;
}
#ifdef __cplusplus
CV_INLINE CvPoint cvPoint(const cv::Point& pt) { return cvPoint(pt.x, pt.y); }
#endif
typedef struct CvPoint2D32f
{
float x;
float y;
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvPoint2D32f() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvPoint2D32f(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 2);
x = y = 0;
if (list.size() == 2)
{
x = list.begin()[0]; y = list.begin()[1];
}
};
#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvPoint2D32f(float _x = 0, float _y = 0): x(_x), y(_y) {}
template<typename _Tp>
CvPoint2D32f(const cv::Point_<_Tp>& pt): x((float)pt.x), y((float)pt.y) {}
#endif
#ifdef __cplusplus
template<typename _Tp>
operator cv::Point_<_Tp>() const { return cv::Point_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y)); }
#endif
@ -922,11 +1014,11 @@ CvPoint2D32f;
/** constructs CvPoint2D32f structure. */
CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y )
{
CvPoint2D32f p;
p.x = (float)x;
p.y = (float)y;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvPoint2D32f p = { (float)x, (float)y };
#else
CvPoint2D32f p((float)x, (float)y);
#endif
return p;
}
@ -934,7 +1026,11 @@ CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y )
template<typename _Tp>
CvPoint2D32f cvPoint2D32f(const cv::Point_<_Tp>& pt)
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvPoint2D32f p = { (float)pt.x, (float)pt.y };
#else
CvPoint2D32f p((float)pt.x, (float)pt.y);
#endif
return p;
}
#endif
@ -948,10 +1044,11 @@ CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point )
/** converts CvPoint2D32f to CvPoint. */
CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point )
{
CvPoint ipt;
ipt.x = cvRound(point.x);
ipt.y = cvRound(point.y);
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvPoint ipt = { cvRound(point.x), cvRound(point.y) };
#else
CvPoint ipt(cvRound(point.x), cvRound(point.y));
#endif
return ipt;
}
@ -962,10 +1059,23 @@ typedef struct CvPoint3D32f
float y;
float z;
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvPoint3D32f() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvPoint3D32f(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 3);
x = y = z = 0;
if (list.size() == 3)
{
x = list.begin()[0]; y = list.begin()[1]; z = list.begin()[2];
}
};
#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvPoint3D32f(float _x = 0, float _y = 0, float _z = 0): x(_x), y(_y), z(_z) {}
template<typename _Tp>
CvPoint3D32f(const cv::Point3_<_Tp>& pt): x((float)pt.x), y((float)pt.y), z((float)pt.z) {}
#endif
#ifdef __cplusplus
template<typename _Tp>
operator cv::Point3_<_Tp>() const { return cv::Point3_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y), cv::saturate_cast<_Tp>(z)); }
#endif
@ -975,31 +1085,51 @@ CvPoint3D32f;
/** constructs CvPoint3D32f structure. */
CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z )
{
CvPoint3D32f p;
p.x = (float)x;
p.y = (float)y;
p.z = (float)z;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvPoint3D32f p = { (float)x, (float)y, (float)z };
#else
CvPoint3D32f p((float)x, (float)y, (float)z);
#endif
return p;
}
#ifdef __cplusplus
template<typename _Tp>
CvPoint3D32f cvPoint3D32f(const cv::Point3_<_Tp>& pt)
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvPoint3D32f p = { (float)pt.x, (float)pt.y, (float)pt.z };
#else
CvPoint3D32f p((float)pt.x, (float)pt.y, (float)pt.z);
#endif
return p;
}
#endif
typedef struct CvPoint2D64f
{
double x;
double y;
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvPoint2D64f() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvPoint2D64f(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 2);
x = y = 0;
if (list.size() == 2)
{
x = list.begin()[0]; y = list.begin()[1];
}
};
#endif
}
CvPoint2D64f;
/** constructs CvPoint2D64f structure.*/
CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y )
{
CvPoint2D64f p;
p.x = x;
p.y = y;
CvPoint2D64f p = { x, y };
return p;
}
@ -1009,18 +1139,25 @@ typedef struct CvPoint3D64f
double x;
double y;
double z;
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvPoint3D64f() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvPoint3D64f(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 3);
x = y = z = 0;
if (list.size() == 3)
{
x = list.begin()[0]; y = list.begin()[1]; z = list.begin()[2];
}
};
#endif
}
CvPoint3D64f;
/** constructs CvPoint3D64f structure. */
CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z )
{
CvPoint3D64f p;
p.x = x;
p.y = y;
p.z = z;
CvPoint3D64f p = { x, y, z };
return p;
}
@ -1032,10 +1169,23 @@ typedef struct CvSize
int width;
int height;
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvSize() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvSize(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 2);
width = 0; height = 0;
if (list.size() == 2)
{
width = list.begin()[0]; height = list.begin()[1];
}
};
#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvSize(int w = 0, int h = 0): width(w), height(h) {}
template<typename _Tp>
CvSize(const cv::Size_<_Tp>& sz): width(cv::saturate_cast<int>(sz.width)), height(cv::saturate_cast<int>(sz.height)) {}
#endif
#ifdef __cplusplus
template<typename _Tp>
operator cv::Size_<_Tp>() const { return cv::Size_<_Tp>(cv::saturate_cast<_Tp>(width), cv::saturate_cast<_Tp>(height)); }
#endif
@ -1045,23 +1195,48 @@ CvSize;
/** constructs CvSize structure. */
CV_INLINE CvSize cvSize( int width, int height )
{
CvSize s;
s.width = width;
s.height = height;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvSize s = { width, height };
#else
CvSize s(width, height);
#endif
return s;
}
#ifdef __cplusplus
CV_INLINE CvSize cvSize(const cv::Size& sz)
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvSize s = { sz.width, sz.height };
#else
CvSize s(sz.width, sz.height);
#endif
return s;
}
#endif
typedef struct CvSize2D32f
{
float width;
float height;
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvSize2D32f() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvSize2D32f(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 2);
width = 0; height = 0;
if (list.size() == 2)
{
width = list.begin()[0]; height = list.begin()[1];
}
};
#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvSize2D32f(float w = 0, float h = 0): width(w), height(h) {}
template<typename _Tp>
CvSize2D32f(const cv::Size_<_Tp>& sz): width(cv::saturate_cast<float>(sz.width)), height(cv::saturate_cast<float>(sz.height)) {}
#endif
#ifdef __cplusplus
template<typename _Tp>
operator cv::Size_<_Tp>() const { return cv::Size_<_Tp>(cv::saturate_cast<_Tp>(width), cv::saturate_cast<_Tp>(height)); }
#endif
@ -1071,13 +1246,25 @@ CvSize2D32f;
/** constructs CvSize2D32f structure. */
CV_INLINE CvSize2D32f cvSize2D32f( double width, double height )
{
CvSize2D32f s;
s.width = (float)width;
s.height = (float)height;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvSize2D32f s = { (float)width, (float)height };
#else
CvSize2D32f s((float)width, (float)height);
#endif
return s;
}
#ifdef __cplusplus
template<typename _Tp>
CvSize2D32f cvSize2D32f(const cv::Size_<_Tp>& sz)
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvSize2D32f s = { (float)sz.width, (float)sz.height };
#else
CvSize2D32f s((float)sz.width, (float)sz.height);
#endif
return s;
}
#endif
/** @sa RotatedRect
*/
@ -1088,15 +1275,37 @@ typedef struct CvBox2D
float angle; /**< Angle between the horizontal axis */
/**< and the first side (i.e. length) in degrees */
#ifdef __cplusplus
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvBox2D(CvPoint2D32f c = CvPoint2D32f(), CvSize2D32f s = CvSize2D32f(), float a = 0) : center(c), size(s), angle(a) {}
CvBox2D(const cv::RotatedRect& rr) : center(rr.center), size(rr.size), angle(rr.angle) {}
#endif
#ifdef __cplusplus
operator cv::RotatedRect() const { return cv::RotatedRect(center, size, angle); }
#endif
}
CvBox2D;
#ifdef __cplusplus
CV_INLINE CvBox2D cvBox2D(CvPoint2D32f c = CvPoint2D32f(), CvSize2D32f s = CvSize2D32f(), float a = 0)
{
CvBox2D self;
self.center = c;
self.size = s;
self.angle = a;
return self;
}
CV_INLINE CvBox2D cvBox2D(const cv::RotatedRect& rr)
{
CvBox2D self;
self.center = cvPoint2D32f(rr.center);
self.size = cvSize2D32f(rr.size);
self.angle = rr.angle;
return self;
}
#endif
/** Line iterator state: */
typedef struct CvLineIterator
{
@ -1122,7 +1331,19 @@ typedef struct CvSlice
{
int start_index, end_index;
#if defined(__cplusplus) && !defined(__CUDACC__)
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvSlice() __attribute__(( warning("Non-initialized variable") )) {}
template<typename _Tp> CvSlice(const std::initializer_list<_Tp> list)
{
CV_Assert(list.size() == 0 || list.size() == 2);
start_index = end_index = 0;
if (list.size() == 2)
{
start_index = list.begin()[0]; end_index = list.begin()[1];
}
};
#endif
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) && !defined(__CUDACC__)
CvSlice(int start = 0, int end = 0) : start_index(start), end_index(end) {}
CvSlice(const cv::Range& r) { *this = (r.start != INT_MIN && r.end != INT_MAX) ? CvSlice(r.start, r.end) : CvSlice(0, CV_WHOLE_SEQ_END_INDEX); }
operator cv::Range() const { return (start_index == 0 && end_index == CV_WHOLE_SEQ_END_INDEX ) ? cv::Range::all() : cv::Range(start_index, end_index); }
@ -1132,13 +1353,21 @@ CvSlice;
CV_INLINE CvSlice cvSlice( int start, int end )
{
CvSlice slice;
slice.start_index = start;
slice.end_index = end;
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvSlice slice = { start, end };
#else
CvSlice slice(start, end);
#endif
return slice;
}
#if defined(__cplusplus)
CV_INLINE CvSlice cvSlice(const cv::Range& r)
{
CvSlice slice = (r.start != INT_MIN && r.end != INT_MAX) ? cvSlice(r.start, r.end) : cvSlice(0, CV_WHOLE_SEQ_END_INDEX);
return slice;
}
#endif
/************************************* CvScalar *****************************************/
@ -1148,13 +1377,22 @@ typedef struct CvScalar
{
double val[4];
#ifdef __cplusplus
#ifdef CV__VALIDATE_UNUNITIALIZED_VARS
CvScalar() __attribute__(( warning("Non-initialized variable") )) {}
CvScalar(const std::initializer_list<double> list)
{
CV_Assert(list.size() == 0 || list.size() == 4);
val[0] = val[1] = val[2] = val[3] = 0;
if (list.size() == 4)
{
val[0] = list.begin()[0]; val[1] = list.begin()[1]; val[2] = list.begin()[2]; val[3] = list.begin()[3];
}
};
#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvScalar() {}
CvScalar(double d0, double d1 = 0, double d2 = 0, double d3 = 0) { val[0] = d0; val[1] = d1; val[2] = d2; val[3] = d3; }
template<typename _Tp>
CvScalar(const cv::Scalar_<_Tp>& s) { val[0] = s.val[0]; val[1] = s.val[1]; val[2] = s.val[2]; val[3] = s.val[3]; }
template<typename _Tp>
operator cv::Scalar_<_Tp>() const { return cv::Scalar_<_Tp>(cv::saturate_cast<_Tp>(val[0]), cv::saturate_cast<_Tp>(val[1]), cv::saturate_cast<_Tp>(val[2]), cv::saturate_cast<_Tp>(val[3])); }
template<typename _Tp, int cn>
CvScalar(const cv::Vec<_Tp, cn>& v)
{
@ -1163,22 +1401,59 @@ typedef struct CvScalar
for( ; i < 4; i++ ) val[i] = 0;
}
#endif
#ifdef __cplusplus
template<typename _Tp>
operator cv::Scalar_<_Tp>() const { return cv::Scalar_<_Tp>(cv::saturate_cast<_Tp>(val[0]), cv::saturate_cast<_Tp>(val[1]), cv::saturate_cast<_Tp>(val[2]), cv::saturate_cast<_Tp>(val[3])); }
#endif
}
CvScalar;
CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0),
double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0))
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvScalar scalar = CV_STRUCT_INITIALIZER;
#else
CvScalar scalar;
#endif
scalar.val[0] = val0; scalar.val[1] = val1;
scalar.val[2] = val2; scalar.val[3] = val3;
return scalar;
}
#ifdef __cplusplus
CV_INLINE CvScalar cvScalar()
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvScalar scalar = CV_STRUCT_INITIALIZER;
#else
CvScalar scalar;
#endif
scalar.val[0] = scalar.val[1] = scalar.val[2] = scalar.val[3] = 0;
return scalar;
}
CV_INLINE CvScalar cvScalar(const cv::Scalar& s)
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvScalar scalar = CV_STRUCT_INITIALIZER;
#else
CvScalar scalar;
#endif
scalar.val[0] = s.val[0];
scalar.val[1] = s.val[1];
scalar.val[2] = s.val[2];
scalar.val[3] = s.val[3];
return scalar;
}
#endif
CV_INLINE CvScalar cvRealScalar( double val0 )
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvScalar scalar = CV_STRUCT_INITIALIZER;
#else
CvScalar scalar;
#endif
scalar.val[0] = val0;
scalar.val[1] = scalar.val[2] = scalar.val[3] = 0;
return scalar;
@ -1186,7 +1461,11 @@ CV_INLINE CvScalar cvRealScalar( double val0 )
CV_INLINE CvScalar cvScalarAll( double val0123 )
{
#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus))
CvScalar scalar = CV_STRUCT_INITIALIZER;
#else
CvScalar scalar;
#endif
scalar.val[0] = val0123;
scalar.val[1] = val0123;
scalar.val[2] = val0123;
@ -1239,7 +1518,7 @@ typedef struct CvSeqBlock
{
struct CvSeqBlock* prev; /**< Previous sequence block. */
struct CvSeqBlock* next; /**< Next sequence block. */
int start_index; /**< Index of the first element in the block + */
int start_index; /**< Index of the first element in the block + */
/**< sequence->first->start_index. */
int count; /**< Number of elements in the block. */
schar* data; /**< Pointer to the first element of the block. */

View File

@ -117,7 +117,7 @@ OCL_PERF_TEST_P(LogFixture, Log, ::testing::Combine(
OCL_TEST_CYCLE() cv::log(src, dst);
if (CV_MAT_DEPTH(type) >= CV_32F)
SANITY_CHECK(dst, 1e-5, ERROR_RELATIVE);
SANITY_CHECK(dst, 2e-4, ERROR_RELATIVE);
else
SANITY_CHECK(dst, 1);
}

View File

@ -11,6 +11,7 @@ PERF_TEST_P(Size_MatType, addWeighted, TYPICAL_MATS_ADWEIGHTED)
{
Size size = get<0>(GetParam());
int type = get<1>(GetParam());
int depth = CV_MAT_DEPTH(type);
Mat src1(size, type);
Mat src2(size, type);
double alpha = 3.75;
@ -21,7 +22,7 @@ PERF_TEST_P(Size_MatType, addWeighted, TYPICAL_MATS_ADWEIGHTED)
declare.in(src1, src2, dst, WARMUP_RNG).out(dst);
if (CV_MAT_DEPTH(type) == CV_32S)
if (depth == CV_32S)
{
// there might be not enough precision for integers
src1 /= 2048;
@ -30,7 +31,7 @@ PERF_TEST_P(Size_MatType, addWeighted, TYPICAL_MATS_ADWEIGHTED)
TEST_CYCLE() cv::addWeighted( src1, alpha, src2, beta, gamma, dst, dst.type() );
SANITY_CHECK(dst, 1);
SANITY_CHECK(dst, depth == CV_32S ? 4 : 1);
}
} // namespace

View File

@ -33,7 +33,7 @@ PERF_TEST_P( Size_DepthSrc_DepthDst_Channels_alpha, convertTo,
int runs = (sz.width <= 640) ? 8 : 1;
TEST_CYCLE_MULTIRUN(runs) src.convertTo(dst, depthDst, alpha);
double eps = depthSrc <= CV_32S ? 1e-12 : (FLT_EPSILON * maxValue);
double eps = depthSrc <= CV_32S && (depthDst <= CV_32S || depthDst == CV_64F) ? 1e-12 : (FLT_EPSILON * maxValue);
eps = eps * std::max(1.0, fabs(alpha));
SANITY_CHECK(dst, eps);
}

View File

@ -27,11 +27,7 @@ PERF_TEST_P( Size_Depth_Channels, split,
int runs = (sz.width <= 640) ? 8 : 1;
TEST_CYCLE_MULTIRUN(runs) split(m, (vector<Mat>&)mv);
#if defined (__aarch64__)
SANITY_CHECK(mv, 2e-5);
#else
SANITY_CHECK(mv, 1e-12);
#endif
}
} // namespace

View File

@ -617,7 +617,7 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
if( (kind1 == kind2 || cn == 1) && sz1 == sz2 && dims1 <= 2 && dims2 <= 2 && type1 == type2 &&
!haveMask && ((!_dst.fixedType() && (dtype < 0 || CV_MAT_DEPTH(dtype) == depth1)) ||
(_dst.fixedType() && _dst.type() == type1)) &&
((src1Scalar && src2Scalar) || (!src1Scalar && !src2Scalar)) )
(src1Scalar == src2Scalar) )
{
_dst.createSameSize(*psrc1, type1);
CV_OCL_RUN(use_opencl,
@ -1204,7 +1204,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op)
compare(_src2, _src1, _dst, op);
return;
}
else if( (is_src1_scalar && is_src2_scalar) || (!is_src1_scalar && !is_src2_scalar) )
else if(is_src1_scalar == is_src2_scalar)
CV_Error( CV_StsUnmatchedSizes,
"The operation is neither 'array op array' (where arrays have the same size and the same type), "
"nor 'array op scalar', nor 'scalar op array'" );

View File

@ -1017,7 +1017,7 @@ cvGetRawData( const CvArr* arr, uchar** data, int* step, CvSize* roi_size )
*data = mat->data.ptr;
if( roi_size )
*roi_size = cvGetMatSize( mat );
*roi_size = cvSize(cvGetMatSize( mat ));
}
else if( CV_IS_IMAGE( arr ))
{
@ -1218,7 +1218,7 @@ cvGetDimSize( const CvArr* arr, int index )
CV_IMPL CvSize
cvGetSize( const CvArr* arr )
{
CvSize size;
CvSize size = {0, 0};
if( CV_IS_MAT_HDR_Z( arr ))
{
@ -1918,7 +1918,7 @@ cvPtrND( const CvArr* arr, const int* idx, int* _type,
CV_IMPL CvScalar
cvGet1D( const CvArr* arr, int idx )
{
CvScalar scalar(0);
CvScalar scalar = cvScalar();
int type = 0;
uchar* ptr;
@ -1953,7 +1953,7 @@ cvGet1D( const CvArr* arr, int idx )
CV_IMPL CvScalar
cvGet2D( const CvArr* arr, int y, int x )
{
CvScalar scalar(0);
CvScalar scalar = cvScalar();
int type = 0;
uchar* ptr;
@ -1987,7 +1987,7 @@ cvGet2D( const CvArr* arr, int y, int x )
CV_IMPL CvScalar
cvGet3D( const CvArr* arr, int z, int y, int x )
{
CvScalar scalar(0);
CvScalar scalar = cvScalar();
int type = 0;
uchar* ptr;
@ -2009,7 +2009,7 @@ cvGet3D( const CvArr* arr, int z, int y, int x )
CV_IMPL CvScalar
cvGetND( const CvArr* arr, const int* idx )
{
CvScalar scalar(0);
CvScalar scalar = cvScalar();
int type = 0;
uchar* ptr;
@ -2916,15 +2916,7 @@ cvInitImageHeader( IplImage * image, CvSize size, int depth,
if( !image )
CV_Error( CV_HeaderIsNull, "null pointer to header" );
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
memset( image, 0, sizeof( *image ));
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic pop
#endif
image->nSize = sizeof( *image );
*image = cvIplImage();
icvGetColorModel( channels, &colorModel, &channelSeq );
for (int i = 0; i < 4; i++)
@ -3081,7 +3073,7 @@ cvResetImageROI( IplImage* image )
CV_IMPL CvRect
cvGetImageROI( const IplImage* img )
{
CvRect rect;
CvRect rect = {0, 0, 0, 0};
if( !img )
CV_Error( CV_StsNullPtr, "Null pointer to image" );

View File

@ -5,6 +5,7 @@
#include "precomp.hpp"
#include "stat.hpp"
#include <opencv2/core/hal/hal.hpp>
namespace cv
{
@ -45,6 +46,24 @@ void batchDistL2Sqr_(const _Tp* src1, const _Tp* src2, size_t step2,
}
}
template<>
void batchDistL2Sqr_(const float* src1, const float* src2, size_t step2,
int nvecs, int len, float* dist, const uchar* mask)
{
step2 /= sizeof(src2[0]);
if( !mask )
{
for( int i = 0; i < nvecs; i++ )
dist[i] = hal::normL2Sqr_(src1, src2 + step2*i, len);
}
else
{
float val0 = std::numeric_limits<float>::max();
for( int i = 0; i < nvecs; i++ )
dist[i] = mask[i] ? hal::normL2Sqr_(src1, src2 + step2*i, len) : val0;
}
}
template<typename _Tp, typename _Rt>
void batchDistL2_(const _Tp* src1, const _Tp* src2, size_t step2,
int nvecs, int len, _Rt* dist, const uchar* mask)
@ -63,6 +82,24 @@ void batchDistL2_(const _Tp* src1, const _Tp* src2, size_t step2,
}
}
template<>
void batchDistL2_(const float* src1, const float* src2, size_t step2,
int nvecs, int len, float* dist, const uchar* mask)
{
step2 /= sizeof(src2[0]);
if( !mask )
{
for( int i = 0; i < nvecs; i++ )
dist[i] = std::sqrt(hal::normL2Sqr_(src1, src2 + step2*i, len));
}
else
{
float val0 = std::numeric_limits<float>::max();
for( int i = 0; i < nvecs; i++ )
dist[i] = mask[i] ? std::sqrt(hal::normL2Sqr_(src1, src2 + step2*i, len)) : val0;
}
}
static void batchDistHamming(const uchar* src1, const uchar* src2, size_t step2,
int nvecs, int len, int* dist, const uchar* mask)
{

View File

@ -1,40 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "convert.hpp"
namespace cv
{
namespace opt_AVX2
{
void cvtScale_s16s32f32Line_AVX2(const short* src, int* dst, float scale, float shift, int width)
{
int x = 0;
__m256 scale256 = _mm256_set1_ps(scale);
__m256 shift256 = _mm256_set1_ps(shift);
const int shuffle = 0xD8;
for (; x <= width - 16; x += 16)
{
__m256i v_src = _mm256_loadu_si256((const __m256i *)(src + x));
v_src = _mm256_permute4x64_epi64(v_src, shuffle);
__m256i v_src_lo = _mm256_srai_epi32(_mm256_unpacklo_epi16(v_src, v_src), 16);
__m256i v_src_hi = _mm256_srai_epi32(_mm256_unpackhi_epi16(v_src, v_src), 16);
__m256 v_dst0 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_lo), scale256), shift256);
__m256 v_dst1 = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(v_src_hi), scale256), shift256);
_mm256_storeu_si256((__m256i *)(dst + x), _mm256_cvtps_epi32(v_dst0));
_mm256_storeu_si256((__m256i *)(dst + x + 8), _mm256_cvtps_epi32(v_dst1));
}
for (; x < width; x++)
dst[x] = saturate_cast<int>(src[x] * scale + shift);
}
}
} // cv::
/* End of file. */

File diff suppressed because it is too large Load Diff

View File

@ -1,126 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "convert.hpp"
namespace cv
{
namespace opt_FP16
{
#if !defined(CV_NEON) || !CV_NEON
const static int cVectorWidth = 8;
void cvtScaleHalf_SIMD32f16f( const float* src, size_t sstep, short* dst, size_t dstep, cv::Size size )
{
CV_INSTRUMENT_REGION()
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
for ( ; x <= size.width - cVectorWidth ; x += cVectorWidth )
{
__m256 v_src = _mm256_loadu_ps(src + x);
// round to nearest even
__m128i v_dst = _mm256_cvtps_ph(v_src, 0);
_mm_storeu_si128((__m128i*)(dst + x), v_dst);
}
for ( ; x < size.width; x++ )
{
dst[x] = convertFp16SW(src[x]);
}
}
}
void cvtScaleHalf_SIMD16f32f( const short* src, size_t sstep, float* dst, size_t dstep, cv::Size size )
{
CV_INSTRUMENT_REGION()
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
for ( ; x <= size.width - cVectorWidth ; x += cVectorWidth )
{
__m128i v_src = _mm_loadu_si128((__m128i*)(src + x));
__m256 v_dst = _mm256_cvtph_ps(v_src);
_mm256_storeu_ps(dst + x, v_dst);
}
for ( ; x < size.width; x++ )
{
dst[x] = convertFp16SW(src[x]);
}
}
}
#elif CV_NEON
const static int cVectorWidth = 4;
void cvtScaleHalf_SIMD32f16f( const float* src, size_t sstep, short* dst, size_t dstep, cv::Size size )
{
CV_INSTRUMENT_REGION()
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
for ( ; x <= size.width - cVectorWidth ; x += cVectorWidth)
{
float32x4_t v_src = vld1q_f32(src + x);
float16x4_t v_dst = vcvt_f16_f32(v_src);
cv_vst1_f16(dst + x, v_dst);
}
for ( ; x < size.width; x++ )
{
dst[x] = convertFp16SW(src[x]);
}
}
}
void cvtScaleHalf_SIMD16f32f( const short* src, size_t sstep, float* dst, size_t dstep, cv::Size size )
{
CV_INSTRUMENT_REGION()
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
for ( ; x <= size.width - cVectorWidth ; x += cVectorWidth )
{
float16x4_t v_src = cv_vld1_f16((__fp16*)src + x);
float32x4_t v_dst = vcvt_f32_f16(v_src);
vst1q_f32(dst + x, v_dst);
}
for ( ; x < size.width; x++ )
{
dst[x] = convertFp16SW(src[x]);
}
}
}
#else
#error "Unsupported build configuration"
#endif
}
} // cv::

View File

@ -8,192 +8,402 @@
#include "opencv2/core/types.hpp"
namespace
{
float convertFp16SW(short fp16);
short convertFp16SW(float fp32);
#if !CV_FP16_TYPE
// const numbers for floating points format
const unsigned int kShiftSignificand = 13;
const unsigned int kMaskFp16Significand = 0x3ff;
const unsigned int kBiasFp16Exponent = 15;
const unsigned int kBiasFp32Exponent = 127;
#endif
#if CV_FP16_TYPE
inline float convertFp16SW(short fp16)
{
// Fp16 -> Fp32
Cv16suf a;
a.i = fp16;
return (float)a.h;
}
#else
inline float convertFp16SW(short fp16)
{
// Fp16 -> Fp32
Cv16suf b;
b.i = fp16;
int exponent = b.fmt.exponent - kBiasFp16Exponent;
int significand = b.fmt.significand;
Cv32suf a;
a.i = 0;
a.fmt.sign = b.fmt.sign; // sign bit
if( exponent == 16 )
{
// Inf or NaN
a.i = a.i | 0x7F800000;
if( significand != 0 )
{
// NaN
#if defined(__x86_64__) || defined(_M_X64)
// 64bit
a.i = a.i | 0x7FC00000;
#endif
a.fmt.significand = a.fmt.significand | (significand << kShiftSignificand);
}
return a.f;
}
else if ( exponent == -(int)kBiasFp16Exponent )
{
// subnormal in Fp16
if( significand == 0 )
{
// zero
return a.f;
}
else
{
int shift = -1;
while( ( significand & 0x400 ) == 0 )
{
significand = significand << 1;
shift++;
}
significand = significand & kMaskFp16Significand;
exponent -= shift;
}
}
a.fmt.exponent = (exponent+kBiasFp32Exponent);
a.fmt.significand = significand << kShiftSignificand;
return a.f;
}
#endif
#if CV_FP16_TYPE
inline short convertFp16SW(float fp32)
{
// Fp32 -> Fp16
Cv16suf a;
a.h = (__fp16)fp32;
return a.i;
}
#else
inline short convertFp16SW(float fp32)
{
// Fp32 -> Fp16
Cv32suf a;
a.f = fp32;
int exponent = a.fmt.exponent - kBiasFp32Exponent;
int significand = a.fmt.significand;
Cv16suf result;
result.i = 0;
unsigned int absolute = a.i & 0x7fffffff;
if( 0x477ff000 <= absolute )
{
// Inf in Fp16
result.i = result.i | 0x7C00;
if( exponent == 128 && significand != 0 )
{
// NaN
result.i = (short)( result.i | 0x200 | ( significand >> kShiftSignificand ) );
}
}
else if ( absolute < 0x33000001 )
{
// too small for fp16
result.i = 0;
}
else if ( absolute < 0x387fe000 )
{
// subnormal in Fp16
int fp16Significand = significand | 0x800000;
int bitShift = (-exponent) - 1;
fp16Significand = fp16Significand >> bitShift;
// special cases to round up
bitShift = exponent + 24;
int threshold = ( ( 0x400000 >> bitShift ) | ( ( ( significand & ( 0x800000 >> bitShift ) ) >> ( 126 - a.fmt.exponent ) ) ^ 1 ) );
if( absolute == 0x33c00000 )
{
result.i = 2;
}
else
{
if( threshold <= ( significand & ( 0xffffff >> ( exponent + 25 ) ) ) )
{
fp16Significand++;
}
result.i = (short)fp16Significand;
}
}
else
{
// usual situation
// exponent
result.fmt.exponent = ( exponent + kBiasFp16Exponent );
// significand;
short fp16Significand = (short)(significand >> kShiftSignificand);
result.fmt.significand = fp16Significand;
// special cases to round up
short lsb10bitsFp32 = (significand & 0x1fff);
short threshold = 0x1000 + ( ( fp16Significand & 0x1 ) ? 0 : 1 );
if( threshold <= lsb10bitsFp32 )
{
result.i++;
}
else if ( fp16Significand == kMaskFp16Significand && exponent == -15)
{
result.i++;
}
}
// sign bit
result.fmt.sign = a.fmt.sign;
return result.i;
}
#endif
}
namespace cv
{
namespace opt_FP16
#if CV_SIMD
static inline void vx_load_as(const uchar* ptr, v_float32& a)
{ a = v_cvt_f32(v_reinterpret_as_s32(vx_load_expand_q(ptr))); }
static inline void vx_load_as(const schar* ptr, v_float32& a)
{ a = v_cvt_f32(vx_load_expand_q(ptr)); }
static inline void vx_load_as(const ushort* ptr, v_float32& a)
{ a = v_cvt_f32(v_reinterpret_as_s32(vx_load_expand(ptr))); }
static inline void vx_load_as(const short* ptr, v_float32& a)
{ a = v_cvt_f32(v_reinterpret_as_s32(vx_load_expand(ptr))); }
static inline void vx_load_as(const int* ptr, v_float32& a)
{ a = v_cvt_f32(vx_load(ptr)); }
static inline void vx_load_as(const float* ptr, v_float32& a)
{ a = vx_load(ptr); }
static inline void vx_load_as(const float16_t* ptr, v_float32& a)
{ a = vx_load_expand(ptr); }
static inline void v_store_as(ushort* ptr, const v_float32& a)
{ v_pack_u_store(ptr, v_round(a)); }
static inline void v_store_as(short* ptr, const v_float32& a)
{ v_pack_store(ptr, v_round(a)); }
static inline void v_store_as(int* ptr, const v_float32& a)
{ v_store(ptr, v_round(a)); }
static inline void v_store_as(float* ptr, const v_float32& a)
{ v_store(ptr, a); }
static inline void v_store_as(float16_t* ptr, const v_float32& a)
{ v_pack_store(ptr, a); }
static inline void vx_load_pair_as(const uchar* ptr, v_uint16& a, v_uint16& b)
{ v_expand(vx_load(ptr), a, b); }
static inline void vx_load_pair_as(const schar* ptr, v_uint16& a, v_uint16& b)
{
void cvtScaleHalf_SIMD32f16f( const float* src, size_t sstep, short* dst, size_t dstep, cv::Size size );
void cvtScaleHalf_SIMD16f32f( const short* src, size_t sstep, float* dst, size_t dstep, cv::Size size );
const v_int8 z = vx_setzero_s8();
v_int16 sa, sb;
v_expand(v_max(vx_load(ptr), z), sa, sb);
a = v_reinterpret_as_u16(sa);
b = v_reinterpret_as_u16(sb);
}
namespace opt_AVX2
static inline void vx_load_pair_as(const ushort* ptr, v_uint16& a, v_uint16& b)
{ a = vx_load(ptr); b = vx_load(ptr + v_uint16::nlanes); }
static inline void vx_load_pair_as(const uchar* ptr, v_int16& a, v_int16& b)
{
void cvtScale_s16s32f32Line_AVX2(const short* src, int* dst, float scale, float shift, int width);
v_uint16 ua, ub;
v_expand(vx_load(ptr), ua, ub);
a = v_reinterpret_as_s16(ua);
b = v_reinterpret_as_s16(ub);
}
namespace opt_SSE4_1
static inline void vx_load_pair_as(const schar* ptr, v_int16& a, v_int16& b)
{ v_expand(vx_load(ptr), a, b); }
static inline void vx_load_pair_as(const short* ptr, v_int16& a, v_int16& b)
{ a = vx_load(ptr); b = vx_load(ptr + v_uint16::nlanes); }
static inline void vx_load_pair_as(const uchar* ptr, v_int32& a, v_int32& b)
{
int cvtScale_SIMD_u8u16f32_SSE41(const uchar * src, ushort * dst, int width, float scale, float shift);
int cvtScale_SIMD_s8u16f32_SSE41(const schar * src, ushort * dst, int width, float scale, float shift);
int cvtScale_SIMD_u16u16f32_SSE41(const ushort * src, ushort * dst, int width, float scale, float shift);
int cvtScale_SIMD_s16u16f32_SSE41(const short * src, ushort * dst, int width, float scale, float shift);
int cvtScale_SIMD_s32u16f32_SSE41(const int * src, ushort * dst, int width, float scale, float shift);
int cvtScale_SIMD_f32u16f32_SSE41(const float * src, ushort * dst, int width, float scale, float shift);
int cvtScale_SIMD_f64u16f32_SSE41(const double * src, ushort * dst, int width, float scale, float shift);
int Cvt_SIMD_f64u16_SSE41(const double * src, ushort * dst, int width);
v_uint32 ua, ub;
v_expand(vx_load_expand(ptr), ua, ub);
a = v_reinterpret_as_s32(ua);
b = v_reinterpret_as_s32(ub);
}
static inline void vx_load_pair_as(const schar* ptr, v_int32& a, v_int32& b)
{ v_expand(vx_load_expand(ptr), a, b); }
static inline void vx_load_pair_as(const ushort* ptr, v_int32& a, v_int32& b)
{
v_uint32 ua, ub;
v_expand(vx_load(ptr), ua, ub);
a = v_reinterpret_as_s32(ua);
b = v_reinterpret_as_s32(ub);
}
static inline void vx_load_pair_as(const short* ptr, v_int32& a, v_int32& b)
{
v_expand(vx_load(ptr), a, b);
}
static inline void vx_load_pair_as(const int* ptr, v_int32& a, v_int32& b)
{
a = vx_load(ptr);
b = vx_load(ptr + v_int32::nlanes);
}
static inline void vx_load_pair_as(const uchar* ptr, v_float32& a, v_float32& b)
{
v_uint32 ua, ub;
v_expand(vx_load_expand(ptr), ua, ub);
a = v_cvt_f32(v_reinterpret_as_s32(ua));
b = v_cvt_f32(v_reinterpret_as_s32(ub));
}
static inline void vx_load_pair_as(const schar* ptr, v_float32& a, v_float32& b)
{
v_int32 ia, ib;
v_expand(vx_load_expand(ptr), ia, ib);
a = v_cvt_f32(ia);
b = v_cvt_f32(ib);
}
static inline void vx_load_pair_as(const ushort* ptr, v_float32& a, v_float32& b)
{
v_uint32 ua, ub;
v_expand(vx_load(ptr), ua, ub);
a = v_cvt_f32(v_reinterpret_as_s32(ua));
b = v_cvt_f32(v_reinterpret_as_s32(ub));
}
static inline void vx_load_pair_as(const short* ptr, v_float32& a, v_float32& b)
{
v_int32 ia, ib;
v_expand(vx_load(ptr), ia, ib);
a = v_cvt_f32(ia);
b = v_cvt_f32(ib);
}
static inline void vx_load_pair_as(const int* ptr, v_float32& a, v_float32& b)
{
v_int32 ia = vx_load(ptr), ib = vx_load(ptr + v_int32::nlanes);
a = v_cvt_f32(ia);
b = v_cvt_f32(ib);
}
static inline void vx_load_pair_as(const float* ptr, v_float32& a, v_float32& b)
{ a = vx_load(ptr); b = vx_load(ptr + v_float32::nlanes); }
//static inline void vx_load_pair_as(const float16_t* ptr, v_float32& a, v_float32& b)
//{
// a = vx_load_expand(ptr);
// b = vx_load_expand(ptr + v_float32::nlanes);
//}
static inline void v_store_pair_as(uchar* ptr, const v_uint16& a, const v_uint16& b)
{
v_store(ptr, v_pack(a, b));
}
static inline void v_store_pair_as(schar* ptr, const v_uint16& a, const v_uint16& b)
{
const v_uint8 maxval = vx_setall_u8((uchar)std::numeric_limits<schar>::max());
v_uint8 v = v_pack(a, b);
v_store(ptr, v_reinterpret_as_s8(v_min(v, maxval)));
}
static inline void v_store_pair_as(ushort* ptr, const v_uint16& a, const v_uint16& b)
{ v_store(ptr, a); v_store(ptr + v_uint16::nlanes, b); }
static inline void v_store_pair_as(uchar* ptr, const v_int16& a, const v_int16& b)
{ v_store(ptr, v_pack_u(a, b)); }
static inline void v_store_pair_as(schar* ptr, const v_int16& a, const v_int16& b)
{ v_store(ptr, v_pack(a, b)); }
static inline void v_store_pair_as(short* ptr, const v_int16& a, const v_int16& b)
{ v_store(ptr, a); v_store(ptr + v_int16::nlanes, b); }
static inline void v_store_pair_as(uchar* ptr, const v_int32& a, const v_int32& b)
{ v_pack_u_store(ptr, v_pack(a, b)); }
static inline void v_store_pair_as(schar* ptr, const v_int32& a, const v_int32& b)
{ v_pack_store(ptr, v_pack(a, b)); }
static inline void v_store_pair_as(ushort* ptr, const v_int32& a, const v_int32& b)
{ v_store(ptr, v_pack_u(a, b)); }
static inline void v_store_pair_as(short* ptr, const v_int32& a, const v_int32& b)
{ v_store(ptr, v_pack(a, b)); }
static inline void v_store_pair_as(int* ptr, const v_int32& a, const v_int32& b)
{
v_store(ptr, a);
v_store(ptr + v_int32::nlanes, b);
}
static inline void v_store_pair_as(uchar* ptr, const v_float32& a, const v_float32& b)
{ v_pack_u_store(ptr, v_pack(v_round(a), v_round(b))); }
static inline void v_store_pair_as(schar* ptr, const v_float32& a, const v_float32& b)
{ v_pack_store(ptr, v_pack(v_round(a), v_round(b))); }
static inline void v_store_pair_as(ushort* ptr, const v_float32& a, const v_float32& b)
{ v_store(ptr, v_pack_u(v_round(a), v_round(b))); }
static inline void v_store_pair_as(short* ptr, const v_float32& a, const v_float32& b)
{ v_store(ptr, v_pack(v_round(a), v_round(b))); }
static inline void v_store_pair_as(int* ptr, const v_float32& a, const v_float32& b)
{
v_int32 ia = v_round(a), ib = v_round(b);
v_store(ptr, ia);
v_store(ptr + v_int32::nlanes, ib);
}
static inline void v_store_pair_as(float* ptr, const v_float32& a, const v_float32& b)
{ v_store(ptr, a); v_store(ptr + v_float32::nlanes, b); }
#if CV_SIMD_64F
static inline void vx_load_as(const double* ptr, v_float32& a)
{
v_float64 v0 = vx_load(ptr), v1 = vx_load(ptr + v_float64::nlanes);
a = v_cvt_f32(v0, v1);
}
static inline void vx_load_pair_as(const double* ptr, v_int32& a, v_int32& b)
{
v_float64 v0 = vx_load(ptr), v1 = vx_load(ptr + v_float64::nlanes);
v_float64 v2 = vx_load(ptr + v_float64::nlanes*2), v3 = vx_load(ptr + v_float64::nlanes*3);
v_int32 iv0 = v_round(v0), iv1 = v_round(v1);
v_int32 iv2 = v_round(v2), iv3 = v_round(v3);
a = v_combine_low(iv0, iv1);
b = v_combine_low(iv2, iv3);
}
static inline void vx_load_pair_as(const double* ptr, v_float32& a, v_float32& b)
{
v_float64 v0 = vx_load(ptr), v1 = vx_load(ptr + v_float64::nlanes);
v_float64 v2 = vx_load(ptr + v_float64::nlanes*2), v3 = vx_load(ptr + v_float64::nlanes*3);
a = v_cvt_f32(v0, v1);
b = v_cvt_f32(v2, v3);
}
static inline void vx_load_pair_as(const uchar* ptr, v_float64& a, v_float64& b)
{
v_int32 v0 = v_reinterpret_as_s32(vx_load_expand_q(ptr));
a = v_cvt_f64(v0);
b = v_cvt_f64_high(v0);
}
static inline void vx_load_pair_as(const schar* ptr, v_float64& a, v_float64& b)
{
v_int32 v0 = vx_load_expand_q(ptr);
a = v_cvt_f64(v0);
b = v_cvt_f64_high(v0);
}
static inline void vx_load_pair_as(const ushort* ptr, v_float64& a, v_float64& b)
{
v_int32 v0 = v_reinterpret_as_s32(vx_load_expand(ptr));
a = v_cvt_f64(v0);
b = v_cvt_f64_high(v0);
}
static inline void vx_load_pair_as(const short* ptr, v_float64& a, v_float64& b)
{
v_int32 v0 = vx_load_expand(ptr);
a = v_cvt_f64(v0);
b = v_cvt_f64_high(v0);
}
static inline void vx_load_pair_as(const int* ptr, v_float64& a, v_float64& b)
{
v_int32 v0 = vx_load(ptr);
a = v_cvt_f64(v0);
b = v_cvt_f64_high(v0);
}
static inline void vx_load_pair_as(const float* ptr, v_float64& a, v_float64& b)
{
v_float32 v0 = vx_load(ptr);
a = v_cvt_f64(v0);
b = v_cvt_f64_high(v0);
}
static inline void vx_load_pair_as(const double* ptr, v_float64& a, v_float64& b)
{
a = vx_load(ptr);
b = vx_load(ptr + v_float64::nlanes);
}
//static inline void vx_load_pair_as(const float16_t* ptr, v_float64& a, v_float64& b)
//{
// v_float32 v0 = vx_load_expand(ptr);
// a = v_cvt_f64(v0);
// b = v_cvt_f64_high(v0);
//}
static inline void v_store_as(double* ptr, const v_float32& a)
{
v_float64 fa0 = v_cvt_f64(a), fa1 = v_cvt_f64_high(a);
v_store(ptr, fa0);
v_store(ptr + v_float64::nlanes, fa1);
}
static inline void v_store_pair_as(double* ptr, const v_int32& a, const v_int32& b)
{
v_float64 fa0 = v_cvt_f64(a), fa1 = v_cvt_f64_high(a);
v_float64 fb0 = v_cvt_f64(b), fb1 = v_cvt_f64_high(b);
v_store(ptr, fa0);
v_store(ptr + v_float64::nlanes, fa1);
v_store(ptr + v_float64::nlanes*2, fb0);
v_store(ptr + v_float64::nlanes*3, fb1);
}
static inline void v_store_pair_as(double* ptr, const v_float32& a, const v_float32& b)
{
v_float64 fa0 = v_cvt_f64(a), fa1 = v_cvt_f64_high(a);
v_float64 fb0 = v_cvt_f64(b), fb1 = v_cvt_f64_high(b);
v_store(ptr, fa0);
v_store(ptr + v_float64::nlanes, fa1);
v_store(ptr + v_float64::nlanes*2, fb0);
v_store(ptr + v_float64::nlanes*3, fb1);
}
static inline void v_store_pair_as(double* ptr, const v_float64& a, const v_float64& b)
{
v_store(ptr, a);
v_store(ptr + v_float64::nlanes, b);
}
static inline void v_store_pair_as(int* ptr, const v_float64& a, const v_float64& b)
{
v_int32 ia = v_round(a), ib = v_round(b);
v_store(ptr, v_combine_low(ia, ib));
}
static inline void v_store_pair_as(float* ptr, const v_float64& a, const v_float64& b)
{
v_float32 v = v_cvt_f32(a, b);
v_store(ptr, v);
}
//static inline void v_store_pair_as(float16_t* ptr, const v_float64& a, const v_float64& b)
//{
// v_float32 v = v_cvt_f32(a, b);
// v_pack_store(ptr, v);
//}
#else
static inline void vx_load_as(const double* ptr, v_float32& a)
{
const int VECSZ = v_float32::nlanes;
float buf[VECSZ*2];
for( int i = 0; i < VECSZ; i++ )
buf[i] = saturate_cast<float>(ptr[i]);
a = vx_load(buf);
}
template<typename _Tdvec>
static inline void vx_load_pair_as(const double* ptr, _Tdvec& a, _Tdvec& b)
{
const int VECSZ = _Tdvec::nlanes;
typename _Tdvec::lane_type buf[VECSZ*2];
for( int i = 0; i < VECSZ*2; i++ )
buf[i] = saturate_cast<typename _Tdvec::lane_type>(ptr[i]);
a = vx_load(buf);
b = vx_load(buf + VECSZ);
}
static inline void v_store_as(double* ptr, const v_float32& a)
{
const int VECSZ = v_float32::nlanes;
float buf[VECSZ];
v_store(buf, a);
for( int i = 0; i < VECSZ; i++ )
ptr[i] = (double)buf[i];
}
template<typename _Tsvec>
static inline void v_store_pair_as(double* ptr, const _Tsvec& a, const _Tsvec& b)
{
const int VECSZ = _Tsvec::nlanes;
typename _Tsvec::lane_type buf[VECSZ*2];
v_store(buf, a); v_store(buf + VECSZ, b);
for( int i = 0; i < VECSZ*2; i++ )
ptr[i] = (double)buf[i];
}
#endif /////////// CV_SIMD_64F
#endif /////////// CV_SIMD
}
#endif // SRC_CONVERT_HPP

View File

@ -1,203 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "convert.hpp"
namespace cv
{
namespace opt_SSE4_1
{
int cvtScale_SIMD_u8u16f32_SSE41(const uchar * src, ushort * dst, int width, float scale, float shift)
{
int x = 0;
__m128i v_zero = _mm_setzero_si128();
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
for ( ; x <= width - 8; x += 8)
{
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src + x)), v_zero);
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
_mm_cvtps_epi32(v_dst_1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
int cvtScale_SIMD_s8u16f32_SSE41(const schar * src, ushort * dst, int width, float scale, float shift)
{
int x = 0;
__m128i v_zero = _mm_setzero_si128();
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
for ( ; x <= width - 8; x += 8)
{
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src + x))), 8);
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
_mm_cvtps_epi32(v_dst_1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
int cvtScale_SIMD_u16u16f32_SSE41(const ushort * src, ushort * dst, int width, float scale, float shift)
{
int x = 0;
__m128i v_zero = _mm_setzero_si128();
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
for ( ; x <= width - 8; x += 8)
{
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
__m128 v_src_f = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero));
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
v_src_f = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero));
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
_mm_cvtps_epi32(v_dst_1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
int cvtScale_SIMD_s16u16f32_SSE41(const short * src, ushort * dst, int width, float scale, float shift)
{
int x = 0;
__m128i v_zero = _mm_setzero_si128();
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
for ( ; x <= width - 8; x += 8)
{
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
__m128 v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
v_src_f = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src_f, v_scale), v_shift);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
_mm_cvtps_epi32(v_dst_1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
int cvtScale_SIMD_s32u16f32_SSE41(const int * src, ushort * dst, int width, float scale, float shift)
{
int x = 0;
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
for ( ; x <= width - 8; x += 8)
{
__m128i v_src = _mm_loadu_si128((__m128i const *)(src + x));
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
v_src = _mm_loadu_si128((__m128i const *)(src + x + 4));
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(v_src), v_scale), v_shift);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
_mm_cvtps_epi32(v_dst_1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
int cvtScale_SIMD_f32u16f32_SSE41(const float * src, ushort * dst, int width, float scale, float shift)
{
int x = 0;
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
for ( ; x <= width - 8; x += 8)
{
__m128 v_src = _mm_loadu_ps(src + x);
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
v_src = _mm_loadu_ps(src + x + 4);
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
_mm_cvtps_epi32(v_dst_1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
int cvtScale_SIMD_f64u16f32_SSE41(const double * src, ushort * dst, int width, float scale, float shift)
{
int x = 0;
__m128 v_scale = _mm_set1_ps(scale), v_shift = _mm_set1_ps(shift);
for ( ; x <= width - 8; x += 8)
{
__m128 v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x)),
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 2)));
__m128 v_dst_0 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
v_src = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(src + x + 4)),
_mm_cvtpd_ps(_mm_loadu_pd(src + x + 6)));
__m128 v_dst_1 = _mm_add_ps(_mm_mul_ps(v_src, v_scale), v_shift);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_dst_0),
_mm_cvtps_epi32(v_dst_1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
int Cvt_SIMD_f64u16_SSE41(const double * src, ushort * dst, int width)
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
__m128 v_src0 = _mm_cvtpd_ps(_mm_loadu_pd(src + x));
__m128 v_src1 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 2));
__m128 v_src2 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 4));
__m128 v_src3 = _mm_cvtpd_ps(_mm_loadu_pd(src + x + 6));
v_src0 = _mm_movelh_ps(v_src0, v_src1);
v_src1 = _mm_movelh_ps(v_src2, v_src3);
__m128i v_dst = _mm_packus_epi32(_mm_cvtps_epi32(v_src0),
_mm_cvtps_epi32(v_src1));
_mm_storeu_si128((__m128i *)(dst + x), v_dst);
}
return x;
}
}
} // cv::
/* End of file. */

File diff suppressed because it is too large Load Diff

View File

@ -258,7 +258,7 @@ void Mat::copyTo( OutputArray _dst ) const
UMat dst = _dst.getUMat();
CV_Assert(dst.u != NULL);
size_t i, sz[CV_MAX_DIM] = {0}, dstofs[CV_MAX_DIM], esz = elemSize();
CV_Assert(dims >= 0 && dims < CV_MAX_DIM);
CV_Assert(dims > 0 && dims < CV_MAX_DIM);
for( i = 0; i < (size_t)dims; i++ )
sz[i] = size.p[i];
sz[dims-1] *= esz;

View File

@ -43,6 +43,7 @@
#include "precomp.hpp"
#include <opencv2/core/utils/configuration.private.hpp>
#include <opencv2/core/hal/hal.hpp>
////////////////////////////////////////// kmeans ////////////////////////////////////////////
@ -74,7 +75,7 @@ public:
for (int i = begin; i<end; i++)
{
tdist2[i] = std::min(normL2Sqr(data.ptr<float>(i), data.ptr<float>(ci), dims), dist[i]);
tdist2[i] = std::min(hal::normL2Sqr_(data.ptr<float>(i), data.ptr<float>(ci), dims), dist[i]);
}
}
@ -106,7 +107,7 @@ static void generateCentersPP(const Mat& data, Mat& _out_centers,
for (int i = 0; i < N; i++)
{
dist[i] = normL2Sqr(data.ptr<float>(i), data.ptr<float>(centers[0]), dims);
dist[i] = hal::normL2Sqr_(data.ptr<float>(i), data.ptr<float>(centers[0]), dims);
sum0 += dist[i];
}
@ -185,7 +186,7 @@ public:
if (onlyDistance)
{
const float* center = centers.ptr<float>(labels[i]);
distances[i] = normL2Sqr(sample, center, dims);
distances[i] = hal::normL2Sqr_(sample, center, dims);
continue;
}
else
@ -196,7 +197,7 @@ public:
for (int k = 0; k < K; k++)
{
const float* center = centers.ptr<float>(k);
const double dist = normL2Sqr(sample, center, dims);
const double dist = hal::normL2Sqr_(sample, center, dims);
if (min_dist > dist)
{
@ -379,7 +380,7 @@ double cv::kmeans( InputArray _data, int K,
if (labels[i] != max_k)
continue;
const float* sample = data.ptr<float>(i);
double dist = normL2Sqr(sample, _base_center, dims);
double dist = hal::normL2Sqr_(sample, _base_center, dims);
if (max_dist <= dist)
{

View File

@ -4,20 +4,24 @@
// glue
CvMatND::CvMatND(const cv::Mat& m)
CvMatND cvMatND(const cv::Mat& m)
{
cvInitMatNDHeader(this, m.dims, m.size, m.type(), m.data );
CvMatND self;
cvInitMatNDHeader(&self, m.dims, m.size, m.type(), m.data );
int i, d = m.dims;
for( i = 0; i < d; i++ )
dim[i].step = (int)m.step[i];
type |= m.flags & cv::Mat::CONTINUOUS_FLAG;
self.dim[i].step = (int)m.step[i];
self.type |= m.flags & cv::Mat::CONTINUOUS_FLAG;
return self;
}
_IplImage::_IplImage(const cv::Mat& m)
_IplImage cvIplImage(const cv::Mat& m)
{
_IplImage self;
CV_Assert( m.dims <= 2 );
cvInitImageHeader(this, m.size(), cvIplDepth(m.flags), m.channels());
cvSetData(this, m.data, (int)m.step[0]);
cvInitImageHeader(&self, cvSize(m.size()), cvIplDepth(m.flags), m.channels());
cvSetData(&self, m.data, (int)m.step[0]);
return self;
}
namespace cv {
@ -222,7 +226,7 @@ CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar value )
CV_IMPL CvScalar cvTrace( const CvArr* arr )
{
return cv::trace(cv::cvarrToMat(arr));
return cvScalar(cv::trace(cv::cvarrToMat(arr)));
}

View File

@ -457,12 +457,12 @@ void write( FileStorage& fs, const String& name, const Mat& value )
{
if( value.dims <= 2 )
{
CvMat mat = value;
CvMat mat = cvMat(value);
cvWrite( *fs, name.size() ? name.c_str() : 0, &mat );
}
else
{
CvMatND mat = value;
CvMatND mat = cvMatND(value);
cvWrite( *fs, name.size() ? name.c_str() : 0, &mat );
}
}

View File

@ -31,7 +31,7 @@ static void icvWriteMat( CvFileStorage* fs, const char* name, const void* struct
{
const CvMat* mat = (const CvMat*)struct_ptr;
char dt[16];
CvSize size;
cv::Size size;
int y;
assert( CV_IS_MAT_HDR_Z(mat) );
@ -380,7 +380,7 @@ static void icvWriteImage( CvFileStorage* fs, const char* name, const void* stru
{
const IplImage* image = (const IplImage*)struct_ptr;
char dt_buf[16], *dt;
CvSize size;
cv::Size size;
int y, depth;
assert( CV_IS_IMAGE(image) );
@ -435,7 +435,7 @@ static void* icvReadImage( CvFileStorage* fs, CvFileNode* node )
CvFileNode* data;
CvFileNode* roi_node;
CvSeqReader reader;
CvRect roi;
cv::Rect roi;
int y, width, height, elem_type, coi, depth;
const char* origin, *data_order;
@ -472,7 +472,7 @@ static void* icvReadImage( CvFileStorage* fs, CvFileNode* node )
roi.height = cvReadIntByName( fs, roi_node, "height", 0 );
coi = cvReadIntByName( fs, roi_node, "coi", 0 );
cvSetImageROI( image, roi );
cvSetImageROI( image, cvRect(roi) );
cvSetImageCOI( image, coi );
}

View File

@ -17,7 +17,7 @@ CV_IMPL CvScalar cvSum( const CvArr* srcarr )
sum = cv::Scalar(sum[coi-1]);
}
}
return sum;
return cvScalar(sum);
}
CV_IMPL int cvCountNonZero( const CvArr* imgarr )
@ -43,7 +43,7 @@ cvAvg( const void* imgarr, const void* maskarr )
mean = cv::Scalar(mean[coi-1]);
}
}
return mean;
return cvScalar(mean);
}

View File

@ -1123,7 +1123,6 @@ template<typename R> struct TheTest
return *this;
}
#if CV_FP16
TheTest & test_loadstore_fp16_f32()
{
printf("test_loadstore_fp16_f32 ...\n");
@ -1133,14 +1132,14 @@ template<typename R> struct TheTest
AlignedData<v_float32> data_f32; data_f32.a.clear();
AlignedData<v_uint16> out;
R r1 = vx_load_fp16_f32((short*)data.a.d);
R r1 = vx_load_expand((const cv::float16_t*)data.a.d);
R r2(r1);
EXPECT_EQ(1.0f, r1.get0());
vx_store(data_f32.a.d, r2);
EXPECT_EQ(-2.0f, data_f32.a.d[R::nlanes - 1]);
out.a.clear();
vx_store_fp16((short*)out.a.d, r2);
v_pack_store((cv::float16_t*)out.a.d, r2);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(data.a[i], out.a[i]) << "i=" << i;
@ -1148,9 +1147,8 @@ template<typename R> struct TheTest
return *this;
}
#endif
#if CV_SIMD_FP16
#if 0
TheTest & test_loadstore_fp16()
{
printf("test_loadstore_fp16 ...\n");
@ -1165,7 +1163,7 @@ template<typename R> struct TheTest
// check some initialization methods
R r1 = data.u;
R r2 = vx_load_f16(data.a.d);
R r2 = vx_load_expand((const float16_t*)data.a.d);
R r3(r2);
EXPECT_EQ(data.u[0], r1.get0());
EXPECT_EQ(data.a[0], r2.get0());

View File

@ -214,7 +214,7 @@ protected:
}
CvMat* m = (CvMat*)fs["test_mat"].readObj();
CvMat _test_mat = test_mat;
CvMat _test_mat = cvMat(test_mat);
double max_diff = 0;
CvMat stub1, _test_stub1;
cvReshape(m, &stub1, 1, 0);
@ -234,7 +234,7 @@ protected:
cvReleaseMat(&m);
CvMatND* m_nd = (CvMatND*)fs["test_mat_nd"].readObj();
CvMatND _test_mat_nd = test_mat_nd;
CvMatND _test_mat_nd = cvMatND(test_mat_nd);
if( !m_nd || !CV_IS_MATND(m_nd) )
{
@ -263,7 +263,7 @@ protected:
MatND mat_nd2;
fs["test_mat_nd"] >> mat_nd2;
CvMatND m_nd2 = mat_nd2;
CvMatND m_nd2 = cvMatND(mat_nd2);
cvGetMat(&m_nd2, &stub, 0, 1);
cvReshape(&stub, &stub1, 1, 0);

View File

@ -415,15 +415,15 @@ TEST(Core_PCA, accuracy)
#ifdef CHECK_C
// 4. check C PCA & ROW
_points = rPoints;
_testPoints = rTestPoints;
_avg = avg;
_eval = eval;
_evec = evec;
_points = cvMat(rPoints);
_testPoints = cvMat(rTestPoints);
_avg = cvMat(avg);
_eval = cvMat(eval);
_evec = cvMat(evec);
prjTestPoints.create(rTestPoints.rows, maxComponents, rTestPoints.type() );
backPrjTestPoints.create(rPoints.size(), rPoints.type() );
_prjTestPoints = prjTestPoints;
_backPrjTestPoints = backPrjTestPoints;
_prjTestPoints = cvMat(prjTestPoints);
_backPrjTestPoints = cvMat(backPrjTestPoints);
cvCalcPCA( &_points, &_avg, &_eval, &_evec, CV_PCA_DATA_AS_ROW );
cvProjectPCA( &_testPoints, &_avg, &_evec, &_prjTestPoints );
@ -435,13 +435,13 @@ TEST(Core_PCA, accuracy)
ASSERT_LE(err, diffBackPrjEps) << "bad accuracy of cvBackProjectPCA() (CV_PCA_DATA_AS_ROW)";
// 5. check C PCA & COL
_points = cPoints;
_testPoints = cTestPoints;
avg = avg.t(); _avg = avg;
eval = eval.t(); _eval = eval;
evec = evec.t(); _evec = evec;
prjTestPoints = prjTestPoints.t(); _prjTestPoints = prjTestPoints;
backPrjTestPoints = backPrjTestPoints.t(); _backPrjTestPoints = backPrjTestPoints;
_points = cvMat(cPoints);
_testPoints = cvMat(cTestPoints);
avg = avg.t(); _avg = cvMat(avg);
eval = eval.t(); _eval = cvMat(eval);
evec = evec.t(); _evec = cvMat(evec);
prjTestPoints = prjTestPoints.t(); _prjTestPoints = cvMat(prjTestPoints);
backPrjTestPoints = backPrjTestPoints.t(); _backPrjTestPoints = cvMat(backPrjTestPoints);
cvCalcPCA( &_points, &_avg, &_eval, &_evec, CV_PCA_DATA_AS_COL );
cvProjectPCA( &_testPoints, &_avg, &_evec, &_prjTestPoints );
@ -615,7 +615,7 @@ void Core_ArrayOpTest::run( int /* start_from */)
{
int sz3[] = {5, 10, 15};
MatND A(3, sz3, CV_32F), B(3, sz3, CV_16SC4);
CvMatND matA = A, matB = B;
CvMatND matA = cvMatND(A), matB = cvMatND(B);
RNG rng;
rng.fill(A, CV_RAND_UNI, Scalar::all(-10), Scalar::all(10));
rng.fill(B, CV_RAND_UNI, Scalar::all(-10), Scalar::all(10));
@ -625,8 +625,8 @@ void Core_ArrayOpTest::run( int /* start_from */)
Scalar val1(-1000, 30, 3, 8);
cvSetRealND(&matA, idx0, val0);
cvSetReal3D(&matA, idx1[0], idx1[1], idx1[2], -val0);
cvSetND(&matB, idx0, val1);
cvSet3D(&matB, idx1[0], idx1[1], idx1[2], -val1);
cvSetND(&matB, idx0, cvScalar(val1));
cvSet3D(&matB, idx1[0], idx1[1], idx1[2], cvScalar(-val1));
Ptr<CvMatND> matC(cvCloneMatND(&matB));
if( A.at<float>(idx0[0], idx0[1], idx0[2]) != val0 ||

View File

@ -526,7 +526,7 @@ void Core_CrossProductTest::get_test_array_types_and_sizes( int,
RNG& rng = ts->get_rng();
int depth = cvtest::randInt(rng) % 2 + CV_32F;
int cn = cvtest::randInt(rng) & 1 ? 3 : 1, type = CV_MAKETYPE(depth, cn);
CvSize sz;
Size sz;
types[INPUT][0] = types[INPUT][1] = types[OUTPUT][0] = types[REF_OUTPUT][0] = type;
@ -549,7 +549,7 @@ void Core_CrossProductTest::run_func()
void Core_CrossProductTest::prepare_to_validation( int )
{
CvScalar a(0), b(0), c(0);
cv::Scalar a, b, c;
if( test_mat[INPUT][0].rows > 1 )
{
@ -595,7 +595,7 @@ void Core_CrossProductTest::prepare_to_validation( int )
}
else
{
cvSet1D( test_array[REF_OUTPUT][0], 0, c );
cvSet1D( test_array[REF_OUTPUT][0], 0, cvScalar(c) );
}
}
@ -896,7 +896,7 @@ double Core_TransformTest::get_success_error_level( int test_case_idx, int i, in
void Core_TransformTest::run_func()
{
CvMat _m = test_mat[INPUT][1], _shift = test_mat[INPUT][2];
CvMat _m = cvMat(test_mat[INPUT][1]), _shift = cvMat(test_mat[INPUT][2]);
cvTransform( test_array[INPUT][0], test_array[OUTPUT][0], &_m, _shift.data.ptr ? &_shift : 0);
}
@ -1010,7 +1010,7 @@ double Core_PerspectiveTransformTest::get_success_error_level( int test_case_idx
void Core_PerspectiveTransformTest::run_func()
{
CvMat _m = test_mat[INPUT][1];
CvMat _m = cvMat(test_mat[INPUT][1]);
cvPerspectiveTransform( test_array[INPUT][0], test_array[OUTPUT][0], &_m );
}
@ -1117,7 +1117,7 @@ static void cvTsPerspectiveTransform( const CvArr* _src, CvArr* _dst, const CvMa
void Core_PerspectiveTransformTest::prepare_to_validation( int )
{
CvMat transmat = test_mat[INPUT][1];
CvMat transmat = cvMat(test_mat[INPUT][1]);
cvTsPerspectiveTransform( test_array[INPUT][0], test_array[REF_OUTPUT][0], &transmat );
}
@ -1287,9 +1287,9 @@ int Core_CovarMatrixTest::prepare_test_case( int test_case_idx )
if( single_matrix )
{
if( !are_images )
*((CvMat*)_hdr_data) = test_mat[INPUT][0];
*((CvMat*)_hdr_data) = cvMat(test_mat[INPUT][0]);
else
*((IplImage*)_hdr_data) = test_mat[INPUT][0];
*((IplImage*)_hdr_data) = cvIplImage(test_mat[INPUT][0]);
temp_hdrs[0] = _hdr_data;
}
else
@ -1304,9 +1304,9 @@ int Core_CovarMatrixTest::prepare_test_case( int test_case_idx )
part = test_mat[INPUT][0].col(i);
if( !are_images )
*((CvMat*)ptr) = part;
*((CvMat*)ptr) = cvMat(part);
else
*((IplImage*)ptr) = part;
*((IplImage*)ptr) = cvIplImage(part);
temp_hdrs[i] = ptr;
}
@ -1539,7 +1539,7 @@ static double cvTsLU( CvMat* a, CvMat* b=NULL, CvMat* x=NULL, int* rank=0 )
void Core_DetTest::prepare_to_validation( int )
{
test_mat[INPUT][0].convertTo(test_mat[TEMP][0], test_mat[TEMP][0].type());
CvMat temp0 = test_mat[TEMP][0];
CvMat temp0 = cvMat(test_mat[TEMP][0]);
test_mat[REF_OUTPUT][0].at<Scalar>(0,0) = cvRealScalar(cvTsLU(&temp0, 0, 0));
}
@ -1676,7 +1676,7 @@ void Core_InvertTest::prepare_to_validation( int )
Mat& temp1 = test_mat[TEMP][1];
Mat& dst0 = test_mat[REF_OUTPUT][0];
Mat& dst = test_mat[OUTPUT][0];
CvMat _input = input;
CvMat _input = cvMat(input);
double ratio = 0, det = cvTsSVDet( &_input, &ratio );
double threshold = (input.depth() == CV_32F ? FLT_EPSILON : DBL_EPSILON)*1000;
@ -1733,7 +1733,7 @@ void Core_SolveTest::get_test_array_types_and_sizes( int test_case_idx, vector<v
RNG& rng = ts->get_rng();
int bits = cvtest::randInt(rng);
Base::get_test_array_types_and_sizes( test_case_idx, sizes, types );
CvSize in_sz = sizes[INPUT][0];
CvSize in_sz = cvSize(sizes[INPUT][0]);
if( in_sz.width > in_sz.height )
in_sz = cvSize(in_sz.height, in_sz.width);
Base::get_test_array_types_and_sizes( test_case_idx, sizes, types );
@ -1813,14 +1813,14 @@ void Core_SolveTest::prepare_to_validation( int )
Mat& temp1 = test_mat[TEMP][1];
cvtest::convert(input, temp1, temp1.type());
dst = Scalar::all(0);
CvMat _temp1 = temp1;
CvMat _temp1 = cvMat(temp1);
double det = cvTsLU( &_temp1, 0, 0 );
dst0 = Scalar::all(det != 0);
return;
}
double threshold = (input.type() == CV_32F ? FLT_EPSILON : DBL_EPSILON)*1000;
CvMat _input = input;
CvMat _input = cvMat(input);
double ratio = 0, det = cvTsSVDet( &_input, &ratio );
if( det < threshold || ratio < threshold )
{
@ -2105,7 +2105,7 @@ void Core_SVBkSbTest::get_test_array_types_and_sizes( int test_case_idx, vector<
int bits = cvtest::randInt(rng);
Base::get_test_array_types_and_sizes( test_case_idx, sizes, types );
int min_size, i, m, n;
CvSize b_size;
cv::Size b_size;
min_size = MIN( sizes[INPUT][0].width, sizes[INPUT][0].height );
@ -2122,7 +2122,7 @@ void Core_SVBkSbTest::get_test_array_types_and_sizes( int test_case_idx, vector<
n = sizes[INPUT][0].width;
sizes[INPUT][1] = Size(0,0);
b_size = Size(m,m);
b_size = cvSize(m, m);
if( have_b )
{
sizes[INPUT][1].height = sizes[INPUT][0].height;
@ -2174,7 +2174,7 @@ int Core_SVBkSbTest::prepare_test_case( int test_case_idx )
cvtest::copy( temp, input );
}
CvMat _input = input;
CvMat _input = cvMat(input);
cvSVD( &_input, test_array[TEMP][0], test_array[TEMP][1], test_array[TEMP][2], flags );
}
@ -2210,7 +2210,7 @@ void Core_SVBkSbTest::prepare_to_validation( int )
Size w_size = compact ? Size(min_size,min_size) : Size(m,n);
Mat& w = test_mat[TEMP][0];
Mat wdb( w_size.height, w_size.width, CV_64FC1 );
CvMat _w = w, _wdb = wdb;
CvMat _w = cvMat(w), _wdb = cvMat(wdb);
// use exactly the same threshold as in icvSVD... ,
// so the changes in the library and here should be synchronized.
double threshold = cv::sum(w)[0]*(DBL_EPSILON*2);//(is_float ? FLT_EPSILON*10 : DBL_EPSILON*2);
@ -3230,6 +3230,22 @@ softdouble naiveExp(softdouble x)
}
}
static float makeFP32(int sign, int exponent, int significand)
{
Cv32suf x;
x.u = (unsigned)(((sign & 1) << 31) | ((exponent&255) << 23) | (significand & 0x7fffff));
return x.f;
}
static float makeRandomFP32(RNG& rng, int sign, int exprange)
{
if( sign == -1 )
sign = rng() % 2;
int exponent = rng() % exprange;
int significand = rng() % (1 << 23);
return makeFP32(sign, exponent, significand);
}
TEST(Core_SoftFloat, exp32)
{
//special cases
@ -3246,13 +3262,11 @@ TEST(Core_SoftFloat, exp32)
inputs.push_back(softfloat::min());
for(int i = 0; i < 50000; i++)
{
Cv32suf x;
x.fmt.sign = rng() % 2;
x.fmt.exponent = rng() % (10 + 127); //bigger exponent will produce inf
x.fmt.significand = rng() % (1 << 23);
if(softfloat(x.f) > ln_max)
x.f = rng.uniform(0.0f, (float)ln_max);
inputs.push_back(softfloat(x.f));
float x = makeRandomFP32(rng, -1, 10+127 //bigger exponent will produce inf
);
if(softfloat(x) > ln_max)
x = rng.uniform(0.0f, (float)ln_max);
inputs.push_back(softfloat(x));
}
for(size_t i = 0; i < inputs.size(); i++)
@ -3323,11 +3337,7 @@ TEST(Core_SoftFloat, log32)
EXPECT_TRUE(log(softfloat::nan()).isNaN());
for(int i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 1;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32(x.f);
softfloat x32(makeRandomFP32(rng, 1, 255));
ASSERT_TRUE(log(x32).isNaN());
}
EXPECT_TRUE(log(softfloat::zero()).isInf());
@ -3340,11 +3350,7 @@ TEST(Core_SoftFloat, log32)
inputs.push_back(softfloat::max());
for(int i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 0;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
inputs.push_back(softfloat(x.f));
inputs.push_back(softfloat(makeRandomFP32(rng, 0, 255)));
}
for(size_t i = 0; i < inputs.size(); i++)
@ -3426,11 +3432,7 @@ TEST(Core_SoftFloat, cbrt32)
inputs.push_back(softfloat::min());
for(int i = 0; i < 50000; i++)
{
Cv32suf x;
x.fmt.sign = rng() % 2;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
inputs.push_back(softfloat(x.f));
inputs.push_back(softfloat(makeRandomFP32(rng, -1, 255)));
}
for(size_t i = 0; i < inputs.size(); i++)
@ -3522,11 +3524,8 @@ TEST(Core_SoftFloat, pow32)
// inf ** y == inf, if y > 0
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 0;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32 = softfloat(x.f);
float x = makeRandomFP32(rng, 0, 255);
softfloat x32 = softfloat(x);
ASSERT_TRUE(pow( inf, x32).isInf());
ASSERT_TRUE(pow(-inf, x32).isInf());
ASSERT_EQ(pow( inf, -x32), zero);
@ -3538,17 +3537,9 @@ TEST(Core_SoftFloat, pow32)
// x ** y == nan, if x < 0 and y is not integer
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 1;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32(x.f);
Cv32suf y;
y.fmt.sign = rng() % 2;
//bigger exponent produces integer numbers only
y.fmt.exponent = rng() % (23 + 127);
y.fmt.significand = rng() % (1 << 23);
softfloat y32(y.f);
softfloat x32(makeRandomFP32(rng, 1, 255));
softfloat y32(makeRandomFP32(rng, -1, 23+127 //bigger exponent produces integer numbers only
));
int yi = cvRound(y32);
if(y32 != softfloat(yi))
ASSERT_TRUE(pow(x32, y32).isNaN());
@ -3565,11 +3556,7 @@ TEST(Core_SoftFloat, pow32)
// 0 ** y == 0, if y > 0
for(size_t i = 0; i < nValues; i++)
{
Cv32suf x;
x.fmt.sign = 0;
x.fmt.exponent = rng() % 255;
x.fmt.significand = rng() % (1 << 23);
softfloat x32(x.f);
softfloat x32(makeRandomFP32(rng, 0, 255));
ASSERT_TRUE(pow(zero, -x32).isInf());
if(x32 != one)
{

View File

@ -970,7 +970,7 @@ bool CV_OperationsTest::operations1()
Size sz(10, 20);
if (sz.area() != 200) throw test_excep();
if (sz.width != 10 || sz.height != 20) throw test_excep();
if (((CvSize)sz).width != 10 || ((CvSize)sz).height != 20) throw test_excep();
if (cvSize(sz).width != 10 || cvSize(sz).height != 20) throw test_excep();
Vec<double, 5> v5d(1, 1, 1, 1, 1);
Vec<double, 6> v6d(1, 1, 1, 1, 1, 1);

View File

@ -373,7 +373,7 @@ namespace
// Discard under-size foreground regions:
d_foreground.download(h_foreground);
IplImage ipl_foreground = h_foreground;
IplImage ipl_foreground = cvIplImage(h_foreground);
CvSeq* first_seq = 0;
cvFindContours(&ipl_foreground, storage, &first_seq, sizeof(CvContour), CV_RETR_LIST);

View File

@ -158,8 +158,6 @@ CV__DNN_INLINE_NS_BEGIN
};
class CV_EXPORTS ActivationLayer;
class CV_EXPORTS BatchNormLayer;
class CV_EXPORTS ScaleLayer;
/** @brief This interface class allows to build new Layers - are building blocks of networks.
*
@ -174,20 +172,31 @@ CV__DNN_INLINE_NS_BEGIN
CV_PROP_RW std::vector<Mat> blobs;
/** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
* @param[in] input vector of already allocated input blobs
* @param[out] output vector of already allocated output blobs
*
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
CV_DEPRECATED virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
/** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @param[in] inputs vector of already allocated input blobs
* @param[out] outputs vector of already allocated output blobs
*
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
CV_WRAP virtual void finalize(InputArrayOfArrays inputs, OutputArrayOfArrays outputs);
/** @brief Given the @p input blobs, computes the output @p blobs.
* @deprecated Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
* @param[in] input the input blobs.
* @param[out] output allocated output blobs, which will store results of the computation.
* @param[out] internals allocated internal blobs
*/
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) = 0;
CV_DEPRECATED virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals);
/** @brief Given the @p input blobs, computes the output @p blobs.
* @param[in] inputs the input blobs.
@ -203,15 +212,23 @@ CV__DNN_INLINE_NS_BEGIN
*/
void forward_fallback(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals);
/** @brief @overload */
CV_WRAP void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
/** @brief
* @overload
* @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
*/
CV_DEPRECATED void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
/** @brief @overload */
CV_WRAP std::vector<Mat> finalize(const std::vector<Mat> &inputs);
/** @brief
* @overload
* @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
*/
CV_DEPRECATED std::vector<Mat> finalize(const std::vector<Mat> &inputs);
/** @brief Allocates layer and computes output. */
CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
CV_IN_OUT std::vector<Mat> &internals);
/** @brief Allocates layer and computes output.
* @deprecated This method will be removed in the future release.
*/
CV_DEPRECATED CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
CV_IN_OUT std::vector<Mat> &internals);
/** @brief Returns index of input blob into the input array.
* @param inputName label of input blob
@ -381,9 +398,6 @@ CV__DNN_INLINE_NS_BEGIN
/** @brief Returns pointers to input layers of specific layer. */
std::vector<Ptr<Layer> > getLayerInputs(LayerId layerId); // FIXIT: CV_WRAP
/** @brief Delete layer for the network (not implemented yet) */
CV_WRAP void deleteLayer(LayerId layer);
/** @brief Connects output of the first layer to input of the second layer.
* @param outPin descriptor of the first layer output.
* @param inpPin descriptor of the second layer input.

View File

@ -146,16 +146,16 @@ public:
return false;
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &) CV_OVERRIDE
virtual void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
std::vector<Mat> inps(inputs.size());
for (size_t i = 0; i < inputs.size(); ++i)
inps[i] = *inputs[i];
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
PyObject* args = pyopencv_from(inps);
PyObject* args = pyopencv_from(inputs);
PyObject* res = PyObject_CallMethodObjArgs(o, PyString_FromString("forward"), args, NULL);
Py_DECREF(args);
PyGILState_Release(gstate);
@ -174,11 +174,6 @@ public:
}
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
{
CV_Error(Error::StsNotImplemented, "");
}
private:
// Map layers types to python classes.
static std::map<std::string, std::vector<PyObject*> > pyLayers;

View File

@ -430,19 +430,24 @@ struct DataLayer : public Layer
backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1;
}
void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals) CV_OVERRIDE
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs, outputs, internals));
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs, outputs, internals);
}
if (outputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> outputs, internals;
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
void forward(std::vector<Mat*>&, std::vector<Mat>& outputs, std::vector<Mat> &) CV_OVERRIDE
{
// Supported modes:
// | Input type | Output type |
// | fp32 | fp32 |
@ -567,8 +572,11 @@ struct DataLayer : public Layer
return false;
}
void finalize(const std::vector<Mat*>&, std::vector<Mat>& outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
CV_Assert_N(outputs.size() == scaleFactors.size(), outputs.size() == means.size(),
inputsData.size() == outputs.size());
skip = true;
@ -1414,6 +1422,7 @@ struct Net::Impl
addInfEngineNetOutputs(ld);
net = Ptr<InfEngineBackendNet>();
netBlobsWrappers.clear();
layer->preferableTarget = DNN_TARGET_CPU;
continue;
}
ld.skip = true; // Initially skip all Inference Engine supported layers.
@ -1622,7 +1631,12 @@ struct Net::Impl
Ptr<Layer> layerPtr = ld.getLayerInstance();
{
layerPtr->finalize(ld.inputBlobs, ld.outputBlobs);
std::vector<Mat> inps(ld.inputBlobs.size());
for (int i = 0; i < ld.inputBlobs.size(); ++i)
{
inps[i] = *ld.inputBlobs[i];
}
layerPtr->finalize(inps, ld.outputBlobs);
layerPtr->preferableTarget = preferableTarget;
#if 0
std::cout << "\toutputs:";
@ -2138,7 +2152,12 @@ struct Net::Impl
ld.inputBlobsWrappers[i]->copyToHost();
}
layer->forward(ld.inputBlobs, ld.outputBlobs, ld.internals);
std::vector<Mat> inps(ld.inputBlobs.size());
for (int i = 0; i < ld.inputBlobs.size(); ++i)
{
inps[i] = *ld.inputBlobs[i];
}
layer->forward(inps, ld.outputBlobs, ld.internals);
if (DNN_CHECK_NAN_INF)
{
@ -2712,11 +2731,6 @@ int Net::getLayerId(const String &layer)
return impl->getLayerId(layer);
}
void Net::deleteLayer(LayerId)
{
CV_Error(Error::StsNotImplemented, "");
}
Ptr<Layer> Net::getLayer(LayerId layerId)
{
LayerData &ld = impl->getLayerData(layerId);
@ -3172,10 +3186,7 @@ static void vecToPVec(const std::vector<T> &v, std::vector<T*> &pv)
void Layer::finalize(const std::vector<Mat> &inputs, std::vector<Mat> &outputs)
{
CV_TRACE_FUNCTION();
std::vector<Mat*> inputsp;
vecToPVec(inputs, inputsp);
this->finalize(inputsp, outputs);
this->finalize((InputArrayOfArrays)inputs, (OutputArrayOfArrays)outputs);
}
void Layer::finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
@ -3183,6 +3194,18 @@ void Layer::finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
(void)input;(void)output;
}
void Layer::finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr)
{
CV_TRACE_FUNCTION();
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
std::vector<Mat*> inputsp;
vecToPVec(inputs, inputsp);
this->finalize(inputsp, outputs);
}
std::vector<Mat> Layer::finalize(const std::vector<Mat> &inputs)
{
CV_TRACE_FUNCTION();
@ -3192,12 +3215,17 @@ std::vector<Mat> Layer::finalize(const std::vector<Mat> &inputs)
return outputs;
}
void Layer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals)
void Layer::forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals)
{
// We kept this method for compatibility. DNN calls it now only to support users' implementations.
}
void Layer::forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs, outputs, internals);
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
void Layer::forward_fallback(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
@ -3241,7 +3269,6 @@ void Layer::forward_fallback(InputArrayOfArrays inputs_arr, OutputArrayOfArrays
internals_arr.assign(orig_internals);
return;
}
std::vector<Mat> inpvec;
std::vector<Mat> outputs;
std::vector<Mat> internals;
@ -3265,10 +3292,8 @@ void Layer::run(const std::vector<Mat> &inputs, std::vector<Mat> &outputs, std::
{
CV_TRACE_FUNCTION();
std::vector<Mat*> inputsp;
vecToPVec(inputs, inputsp);
this->finalize(inputsp, outputs);
this->forward(inputsp, outputs, internals);
this->finalize(inputs, outputs);
this->forward(inputs, outputs, internals);
}
Layer::~Layer() {}

View File

@ -234,18 +234,20 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(blobs.size() >= 2);
CV_Assert(inputs.size() == 1);
Mat &inpBlob = *inputs[0];
Mat &inpBlob = inputs[0];
CV_Assert(inpBlob.dims == 2 || inpBlob.dims == 4);
int rows = inpBlob.dims > 2 ? inpBlob.size[2] : 1;
int cols = inpBlob.dims > 2 ? inpBlob.size[3] : 1;

View File

@ -99,17 +99,19 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (int i = 0, n = outputs.size(); i < n; ++i)
if (outputs[i].data != inputs[i]->data)
inputs[i]->copyTo(outputs[i]);
if (outputs[i].data != inputs[i].data)
inputs[i].copyTo(outputs[i]);
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE

View File

@ -111,12 +111,12 @@ public:
class ChannelConcatInvoker : public ParallelLoopBody
{
public:
std::vector<Mat*>* inputs;
std::vector<Mat>* inputs;
Mat* output;
int nstripes;
std::vector<const float*> chptrs;
static void run(std::vector<Mat*>& inputs, Mat& output, int nstripes)
static void run(std::vector<Mat>& inputs, Mat& output, int nstripes)
{
ChannelConcatInvoker cc;
cc.inputs = &inputs;
@ -127,7 +127,7 @@ public:
int nchannels = 0, batchsz = output.size[0];
for( i = 0; i < ninputs; i++ )
{
Mat& inp = *inputs[i];
Mat& inp = inputs[i];
CV_Assert( inp.isContinuous() && (inp.type() == CV_32F || inp.type() == CV_16S) &&
inp.dims == 4 && inp.size[0] == output.size[0] &&
inp.size[2] == output.size[2] &&
@ -142,7 +142,7 @@ public:
int ofs = 0;
for( i = 0; i < ninputs; i++)
{
Mat& inp = *inputs[i];
Mat& inp = inputs[i];
for( int j = 0; j < batchsz; j++ )
for( int k = 0; k < inp.size[1]; k++ )
{
@ -241,15 +241,17 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
int cAxis = clamp(axis, inputs[0]->dims);
int cAxis = clamp(axis, inputs[0].dims);
Mat& outMat = outputs[0];
if (padding)
@ -267,14 +269,14 @@ public:
ranges[cAxis].start = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
ranges[cAxis].end = ranges[cAxis].start + inputs[i]->size[cAxis];
ranges[cAxis].end = ranges[cAxis].start + inputs[i].size[cAxis];
for (int j = 0; j < outMat.dims; ++j)
{
if (j == cAxis) continue;
ranges[j].start = (outMat.size[j] - inputs[i]->size[j]) / 2;
ranges[j].end = ranges[j].start + inputs[i]->size[j];
ranges[j].start = (outMat.size[j] - inputs[i].size[j]) / 2;
ranges[j].end = ranges[j].start + inputs[i].size[j];
}
inputs[i]->copyTo(outMat(&ranges[0]));
inputs[i].copyTo(outMat(&ranges[0]));
ranges[cAxis].start = ranges[cAxis].end;
}
}

View File

@ -79,49 +79,24 @@ public:
adjustPad.height < stride.height);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
{
if (type == "Convolution")
return preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height;
else
{
CV_Assert(type == "Deconvolution");
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;
if (group != 1)
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R3)
return preferableTarget == DNN_TARGET_CPU;
#endif
return false;
}
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
return dilation.width == 1 && dilation.height == 1;
return true;
}
}
else
#endif // HAVE_INF_ENGINE
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
CV_Assert(blobs.size() >= 1 && blobs.size() <= 2);
CV_Assert(blobs[0].dims == 4 && blobs[0].size[3] == kernel.width && blobs[0].size[2] == kernel.height);
const Mat &input = *inputs[0];
const Mat &input = inputs[0];
CV_Assert(input.dims == 4 && (input.type() == CV_32F || input.type() == CV_64F || input.type() == CV_16S));
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->type() == input.type());
CV_Assert(inputs[i]->dims == 4 && inputs[i]->size[1] == input.size[1]);
CV_Assert(inputs[i]->size[2] == input.size[2] && inputs[i]->size[3] == input.size[3]);
CV_Assert(inputs[i].type() == input.type());
CV_Assert(inputs[i].dims == 4 && inputs[i].size[1] == input.size[1]);
CV_Assert(inputs[i].size[2] == input.size[2] && inputs[i].size[3] == input.size[3]);
}
Size outSize = Size(outputs[0].size[3], outputs[0].size[2]);
@ -225,6 +200,14 @@ public:
return shape(out.area(), ksize);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height;
else
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
@ -262,9 +245,9 @@ public:
return false;
}
virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
BaseConvolutionLayerImpl::finalize(inputs, outputs);
BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
CV_Assert(!blobs.empty());
const int outCn = blobs[0].size[0];
@ -1007,22 +990,24 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
/*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
name.c_str(), inputs[0]->size[0], inputs[0]->size[1], inputs[0]->size[2], inputs[0]->size[3],
name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
kernel.width, kernel.height, pad.width, pad.height,
stride.width, stride.height, dilation.width, dilation.height);*/
CV_Assert_N(inputs.size() == (size_t)1, inputs[0]->size[1] % blobs[0].size[1] == 0,
outputs.size() == 1, inputs[0]->data != outputs[0].data);
CV_Assert_N(inputs.size() == (size_t)1, inputs[0].size[1] % blobs[0].size[1] == 0,
outputs.size() == 1, inputs[0].data != outputs[0].data);
int ngroups = inputs[0]->size[1]/blobs[0].size[1];
int ngroups = inputs[0].size[1]/blobs[0].size[1];
CV_Assert(outputs[0].size[1] % ngroups == 0);
int outCn = blobs[0].size[0];
@ -1049,7 +1034,7 @@ public:
int nstripes = std::max(getNumThreads(), 1);
ParallelConv::run(*inputs[0], outputs[0], weightsMat, biasvec, reluslope,
ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
kernel, pad, stride, dilation, activ.get(), ngroups, nstripes);
}
@ -1089,6 +1074,29 @@ public:
return shape(ksize, inpH * inpW);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
{
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;
if (group != 1)
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R3)
return preferableTarget == DNN_TARGET_CPU;
#endif
return false;
}
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
return dilation.width == 1 && dilation.height == 1;
return true;
}
else
#endif // HAVE_INF_ENGINE
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
@ -1141,11 +1149,15 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
BaseConvolutionLayerImpl::finalize(inputs, outputs);
BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
getConvPoolPaddings(Size(outputs[0].size[3], outputs[0].size[2]),
Size(inputs[0]->size[3], inputs[0]->size[2]),
Size(inputs[0].size[3], inputs[0].size[2]),
kernel, stride, padMode, dilation, pad);
}
@ -1494,18 +1506,21 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget) &&
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
forward_ocl(inputs_arr, outputs_arr, internals_arr));
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
int outCn = numOutput;
int inpCn = inputs[0]->size[1];
int inpCn = inputs[0].size[1];
bool is1x1flag = is1x1();
int nstripes = getNumThreads();
@ -1520,13 +1535,13 @@ public:
int ngroups = outCn / blobs[0].size[1];
int inpGroupCn = inpCn / ngroups;
int outGroupCn = blobs[0].size[1];
const Mat& inp = *inputs[ii];
const Mat& inp = inputs[ii];
Mat& out = outputs[ii];
int numImg = inp.size[0];
int inpH = inp.size[2], inpW = inp.size[3];
int outH = out.size[2], outW = out.size[3];
Mat convBlob = inputs[ii]->reshape(1, numImg*inpCn);
Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
Mat decnBlob = out.reshape(1, numImg*outCn);
for (int n = 0; n < numImg; n++)

View File

@ -40,17 +40,19 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat& inp = *inputs[0];
Mat& inp = inputs[0];
Mat& out = outputs[0];
Mat boxes = inputs[1]->reshape(1, inputs[1]->total() / 7);
Mat boxes = inputs[1].reshape(1, inputs[1].total() / 7);
const int numChannels = inp.size[1];
const int inpHeight = inp.size[2];
const int inpWidth = inp.size[3];

View File

@ -90,12 +90,14 @@ public:
return false;
}
void finalize(const std::vector<Mat *> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
CV_Assert(2 == inputs.size());
const Mat &inpBlob = *inputs[0];
const Mat &inpSzBlob = *inputs[1];
const Mat &inpBlob = inputs[0];
const Mat &inpSzBlob = inputs[1];
int dims = inpBlob.dims;
int start_axis = clamp(startAxis, dims);
@ -135,18 +137,18 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat &input = *inputs[0];
Mat &output = outputs[0];
input(&crop_ranges[0]).copyTo(output);
Mat &input = inputs[0];
input(&crop_ranges[0]).copyTo(outputs[0]);
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE

View File

@ -419,27 +419,28 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
std::vector<LabelBBox> allDecodedBBoxes;
std::vector<Mat> allConfidenceScores;
int num = inputs[0]->size[0];
int num = inputs[0].size[0];
// extract predictions from input layers
{
int numPriors = inputs[2]->size[2] / 4;
int numPriors = inputs[2].size[2] / 4;
const float* locationData = inputs[0]->ptr<float>();
const float* confidenceData = inputs[1]->ptr<float>();
const float* priorData = inputs[2]->ptr<float>();
const float* locationData = inputs[0].ptr<float>();
const float* confidenceData = inputs[1].ptr<float>();
const float* priorData = inputs[2].ptr<float>();
// Retrieve all location predictions
std::vector<LabelBBox> allLocationPredictions;
@ -465,9 +466,9 @@ public:
else
{
// Input image sizes;
CV_Assert(inputs[3]->dims == 4);
clipBounds.xmax = inputs[3]->size[3] - 1;
clipBounds.ymax = inputs[3]->size[2] - 1;
CV_Assert(inputs[3].dims == 4);
clipBounds.xmax = inputs[3].size[3] - 1;
clipBounds.ymax = inputs[3].size[2] - 1;
}
}
DecodeBBoxesAll(allLocationPredictions, priorBBoxes, priorVariances, num,
@ -502,6 +503,8 @@ public:
allIndices[i], _groupByClasses);
}
CV_Assert(count == numKept);
// Sync results back due changed output shape.
outputs_arr.assign(outputs);
}
size_t outputDetections_(

View File

@ -187,16 +187,19 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(this->preferableTarget),
func.applyOCL(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++)
{
const Mat &src = *inputs[i];
const Mat &src = inputs[i];
Mat &dst = outputs[i];
CV_Assert(src.size == dst.size && src.type() == dst.type() &&
src.isContinuous() && dst.isContinuous() && src.type() == CV_32F);

View File

@ -123,7 +123,7 @@ public:
class EltwiseInvoker : public ParallelLoopBody
{
public:
const Mat** srcs;
const Mat* srcs;
int nsrcs;
Mat* dst;
const std::vector<float>* coeffs;
@ -135,7 +135,7 @@ public:
EltwiseInvoker() : srcs(0), nsrcs(0), dst(0), coeffs(0), op(PROD), nstripes(0), activ(0), channels(0), planeSize(0) {}
static void run(const Mat** srcs, int nsrcs, Mat& dst,
static void run(const Mat* srcs, int nsrcs, Mat& dst,
const std::vector<float>& coeffs, EltwiseOp op,
const ActivationLayer* activ, int nstripes)
{
@ -144,9 +144,9 @@ public:
for( int i = 0; i > nsrcs; i++ )
{
CV_Assert(srcs[i]->size == dst.size &&
srcs[i]->type() == dst.type() &&
srcs[i]->isContinuous());
CV_Assert(srcs[i].size == dst.size &&
srcs[i].type() == dst.type() &&
srcs[i].isContinuous());
}
EltwiseInvoker p;
@ -200,14 +200,14 @@ public:
for( c = 0; c < channels; c++ )
{
size_t globalDelta = delta + (sampleIdx*channels + c)*planeSize;
const float* srcptr0 = srcs[0]->ptr<float>() + globalDelta;
const float* srcptr0 = srcs[0].ptr<float>() + globalDelta;
float* dstptr = dstptr0 + globalDelta;
if( op == PROD )
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = srcptr0[j]*srcptr1[j];
@ -219,7 +219,7 @@ public:
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = std::max(srcptr0[j], srcptr1[j]);
@ -231,7 +231,7 @@ public:
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = srcptr0[j] + srcptr1[j];
@ -244,7 +244,7 @@ public:
float c0 = coeffsptr[0];
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
float c1 = coeffsptr[k];
for( j = 0; j < blockSize; j++ )
{
@ -358,17 +358,19 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(outputs.size() == 1);
const int nstripes = getNumThreads();
EltwiseInvoker::run((const Mat**)&inputs[0], (int)inputs.size(), outputs[0],
EltwiseInvoker::run(&inputs[0], (int)inputs.size(), outputs[0],
coeffs, op, activ.get(), nstripes);
}

View File

@ -139,18 +139,23 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++)
{
MatShape outShape = shape(outputs[i]);
outputs[i] = inputs[i]->reshape(1, (int)outShape.size(), &outShape[0]);
if (inputs[i].data != outputs[i].data)
{
inputs[i].reshape(1, (int)outShape.size(), &outShape[0]).copyTo(outputs[i]);
}
}
}

View File

@ -273,7 +273,7 @@ public:
};
#ifdef HAVE_OPENCL
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
{
innerProductOp.release();
}
@ -393,20 +393,22 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> input, output;
inputs_arr.getMatVector(input);
outputs_arr.getMatVector(output);
int axisCan = clamp(axis, input[0]->dims);
int outerSize = input[0]->total(0, axisCan);
int axisCan = clamp(axis, input[0].dims);
int outerSize = input[0].total(0, axisCan);
for (size_t i = 0; i < input.size(); i++)
{
Mat srcMat = input[i]->reshape(1, outerSize);
Mat srcMat = input[i].reshape(1, outerSize);
Mat dstMat = output[i].reshape(1, outerSize);
const int nstripes = getNumThreads();

View File

@ -96,7 +96,7 @@ public:
}
#ifdef HAVE_OPENCL
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
{
lrnOp.release();
}
@ -152,21 +152,23 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == outputs.size());
for (int i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->dims == 4);
CV_Assert(inputs[i].dims == 4);
Mat &src = *inputs[i];
Mat &src = inputs[i];
Mat &dst = outputs[i];
switch (type)

View File

@ -62,17 +62,19 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 2);
Mat& input = *inputs[0];
Mat& indices = *inputs[1];
Mat& input = inputs[0];
Mat& indices = inputs[1];
CV_Assert(input.total() == indices.total());
CV_Assert(input.size[0] == 1);

View File

@ -96,13 +96,15 @@ public:
return fuse_relu;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
int splitDim = (acrossChannels) ? 1 : 2;
int i, newRows = 1;
for( i = 0; i < splitDim; i++ )
newRows *= inputs[0]->size[i];
zeroDev = inputs[0]->total() == newRows;
newRows *= inputs[0].size[i];
zeroDev = inputs[0].total() == newRows;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
@ -271,17 +273,20 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
{
Mat &inpBlob = *inputs[inpIdx];
Mat &inpBlob = inputs[inpIdx];
Mat &outBlob = outputs[inpIdx];
int splitDim = (acrossChannels) ? 1 : 2;

View File

@ -89,12 +89,14 @@ public:
return true;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
CV_Assert(inputs.size() == 1);
endAxis = endAxis == -1 ? (inputs[0]->dims - 1) : endAxis;
startAxis = startAxis == -1 ? (inputs[0]->dims - 1) : startAxis;
acrossSpatial = (startAxis == 1 && endAxis == inputs[0]->dims - 1);
endAxis = endAxis == -1 ? (inputs[0].dims - 1) : endAxis;
startAxis = startAxis == -1 ? (inputs[0].dims - 1) : startAxis;
acrossSpatial = (startAxis == 1 && endAxis == inputs[0].dims - 1);
}
#ifdef HAVE_OPENCL
@ -186,18 +188,21 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
CV_Assert(inputs.size() == 1 && outputs.size() == 1);
CV_Assert(inputs[0]->total() == outputs[0].total());
CV_Assert(inputs[0].total() == outputs[0].total());
const Mat& inp0 = *inputs[0];
const Mat& inp0 = inputs[0];
Mat& buffer = internals[0];
startAxis = clamp(startAxis, inp0.dims);
endAxis = clamp(endAxis, inp0.dims);

View File

@ -61,14 +61,17 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
// Compute dstRanges.
const MatSize& inpShape = inputs[0]->size;
const MatSize& inpShape = inputs[0].size;
dstRanges.resize(paddings.size());
int offset = 0;
if (inputDims != -1 && inputs[0]->dims != inputDims)
if (inputDims != -1 && inputs[0].dims != inputDims)
{
dstRanges.insert(dstRanges.begin(), Range::all());
offset = 1;
@ -81,7 +84,7 @@ public:
}
// Add the rest of dimensions.
for (int i = dstRanges.size(); i < inputs[0]->dims; ++i)
for (int i = dstRanges.size(); i < inputs[0].dims; ++i)
dstRanges.push_back(Range::all());
}
@ -96,31 +99,33 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (paddingType == "constant")
{
outputs[0].setTo(paddingValue);
inputs[0]->copyTo(outputs[0](dstRanges));
inputs[0].copyTo(outputs[0](dstRanges));
}
else if (paddingType == "reflect")
{
CV_Assert(inputs.size() == 1);
CV_Assert(outputs.size() == 1);
CV_Assert(inputs[0]->dims == 4);
CV_Assert(inputs[0].dims == 4);
CV_Assert(outputs[0].dims == 4);
if (inputs[0]->size[0] != outputs[0].size[0] || inputs[0]->size[1] != outputs[0].size[1])
if (inputs[0].size[0] != outputs[0].size[0] || inputs[0].size[1] != outputs[0].size[1])
CV_Error(Error::StsNotImplemented, "Only spatial reflection padding is supported.");
const int inpHeight = inputs[0]->size[2];
const int inpWidth = inputs[0]->size[3];
const int inpHeight = inputs[0].size[2];
const int inpWidth = inputs[0].size[3];
const int outHeight = outputs[0].size[2];
const int outWidth = outputs[0].size[3];
const int padTop = dstRanges[2].start;
@ -130,11 +135,11 @@ public:
CV_CheckLT(padTop, inpHeight, ""); CV_CheckLT(padBottom, inpHeight, "");
CV_CheckLT(padLeft, inpWidth, ""); CV_CheckLT(padRight, inpWidth, "");
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
for (size_t n = 0; n < inputs[0].size[0]; ++n)
{
for (size_t ch = 0; ch < inputs[0]->size[1]; ++ch)
for (size_t ch = 0; ch < inputs[0].size[1]; ++ch)
{
copyMakeBorder(getPlane(*inputs[0], n, ch),
copyMakeBorder(getPlane(inputs[0], n, ch),
getPlane(outputs[0], n, ch),
padTop, padBottom, padLeft, padRight,
BORDER_REFLECT_101);

View File

@ -172,18 +172,21 @@ public:
_count = _oldStride[0] * shapeBefore[0];
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
if(!_needsPermute)
{
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() > 0);
const Mat& inp0 = *inputs[0];
const Mat& inp0 = inputs[0];
CV_Assert((int)_numAxes == inp0.dims);
computeStrides(shape(*inputs[0]), shape(outputs[0]));
computeStrides(shape(inputs[0]), shape(outputs[0]));
#ifdef HAVE_OPENCL
if (uorder.empty())
@ -319,22 +322,24 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
size_t k, ninputs = inputs.size();
if(!_needsPermute)
{
for (k = 0; k < ninputs; k++)
{
CV_Assert(outputs[k].total() == inputs[k]->total());
if (outputs[k].data != inputs[k]->data)
inputs[k]->copyTo(outputs[k]);
CV_Assert(outputs[k].total() == inputs[k].total());
if (outputs[k].data != inputs[k].data)
inputs[k].copyTo(outputs[k]);
}
}
else
@ -346,10 +351,10 @@ public:
for (k = 0; k < ninputs; k++)
{
const Mat& inp = *inputs[k];
const Mat& inp = inputs[k];
Mat& out = outputs[k];
CV_Assert(inp.dims == numAxes && inp.size == inputs[0]->size);
CV_Assert(inp.dims == numAxes && inp.size == inputs[0].size);
CV_Assert(out.dims == numAxes && out.size == outputs[0].size);
CV_Assert(inp.isContinuous() && out.isContinuous());

View File

@ -114,11 +114,15 @@ public:
Ptr<OCL4DNNPool<float> > poolOp;
#endif
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(!inputs.empty());
cv::Size inp(inputs[0]->size[3], inputs[0]->size[2]),
cv::Size inp(inputs[0].size[3], inputs[0].size[2]),
out(outputs[0].size[3], outputs[0].size[2]);
if(globalPooling)
@ -204,28 +208,29 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
switch (type)
{
case MAX:
CV_Assert_N(inputs.size() == 1, outputs.size() == 2);
maxPooling(*inputs[0], outputs[0], outputs[1]);
maxPooling(inputs[0], outputs[0], outputs[1]);
break;
case AVE:
CV_Assert_N(inputs.size() == 1, outputs.size() == 1);
avePooling(*inputs[0], outputs[0]);
avePooling(inputs[0], outputs[0]);
break;
case ROI: case PSROI:
CV_Assert_N(inputs.size() == 2, outputs.size() == 1);
roiPooling(*inputs[0], *inputs[1], outputs[0]);
roiPooling(inputs[0], inputs[1], outputs[0]);
break;
default:
CV_Error(Error::StsNotImplemented, "Not implemented");

View File

@ -297,15 +297,18 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
CV_CheckGT(inputs.size(), (size_t)1, "");
CV_CheckEQ(inputs[0]->dims, 4, ""); CV_CheckEQ(inputs[1]->dims, 4, "");
int layerWidth = inputs[0]->size[3];
int layerHeight = inputs[0]->size[2];
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
int imageWidth = inputs[1]->size[3];
int imageHeight = inputs[1]->size[2];
CV_CheckGT(inputs.size(), (size_t)1, "");
CV_CheckEQ(inputs[0].dims, 4, ""); CV_CheckEQ(inputs[1].dims, 4, "");
int layerWidth = inputs[0].size[3];
int layerHeight = inputs[0].size[2];
int imageWidth = inputs[1].size[3];
int imageHeight = inputs[1].size[2];
_stepY = _stepY == 0 ? (static_cast<float>(imageHeight) / layerHeight) : _stepY;
_stepX = _stepX == 0 ? (static_cast<float>(imageWidth) / layerWidth) : _stepX;
@ -403,21 +406,23 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 2);
int _layerWidth = inputs[0]->size[3];
int _layerHeight = inputs[0]->size[2];
int _layerWidth = inputs[0].size[3];
int _layerHeight = inputs[0].size[2];
int _imageWidth = inputs[1]->size[3];
int _imageHeight = inputs[1]->size[2];
int _imageWidth = inputs[1].size[3];
int _imageHeight = inputs[1].size[2];
float* outputPtr = outputs[0].ptr<float>();
float _boxWidth, _boxHeight;

View File

@ -137,24 +137,27 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat*> layerInputs;
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
std::vector<Mat> layerInputs;
std::vector<Mat> layerOutputs;
// Scores permute layer.
Mat scores = getObjectScores(*inputs[0]);
layerInputs.assign(1, &scores);
Mat scores = getObjectScores(inputs[0]);
layerInputs.assign(1, scores);
layerOutputs.assign(1, Mat(shape(scores.size[0], scores.size[2],
scores.size[3], scores.size[1]), CV_32FC1));
scoresPermute->finalize(layerInputs, layerOutputs);
// BBox predictions permute layer.
Mat* bboxDeltas = inputs[1];
CV_Assert(bboxDeltas->dims == 4);
const Mat& bboxDeltas = inputs[1];
CV_Assert(bboxDeltas.dims == 4);
layerInputs.assign(1, bboxDeltas);
layerOutputs.assign(1, Mat(shape(bboxDeltas->size[0], bboxDeltas->size[2],
bboxDeltas->size[3], bboxDeltas->size[1]), CV_32FC1));
layerOutputs.assign(1, Mat(shape(bboxDeltas.size[0], bboxDeltas.size[2],
bboxDeltas.size[3], bboxDeltas.size[1]), CV_32FC1));
deltasPermute->finalize(layerInputs, layerOutputs);
}
@ -251,19 +254,22 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
CV_Assert(inputs.size() == 3);
CV_Assert(internals.size() == 3);
const Mat& scores = *inputs[0];
const Mat& bboxDeltas = *inputs[1];
const Mat& imInfo = *inputs[2];
const Mat& scores = inputs[0];
const Mat& bboxDeltas = inputs[1];
const Mat& imInfo = inputs[2];
Mat& priorBoxes = internals[0];
Mat& permuttedScores = internals[1];
Mat& permuttedDeltas = internals[2];

View File

@ -216,11 +216,14 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> input;
inputs_arr.getMatVector(input);
CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6);
CV_Assert(input.size() == 1);
const Mat& inp0 = *input[0];
const Mat& inp0 = input[0];
Mat &Wh = blobs[0], &Wx = blobs[1];
int numOut = Wh.size[1];
@ -256,13 +259,16 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> input, output, internals;
inputs_arr.getMatVector(input);
outputs_arr.getMatVector(output);
internals_arr.getMatVector(internals);
const Mat &Wh = blobs[0];
const Mat &Wx = blobs[1];
@ -277,7 +283,7 @@ public:
dummyOnes.setTo(1.);
int numSamplesTotal = numTimeStamps*numSamples;
Mat xTs = input[0]->reshape(1, numSamplesTotal);
Mat xTs = input[0].reshape(1, numSamplesTotal);
Mat hOutTs = output[0].reshape(1, numSamplesTotal);
Mat cOutTs = produceCellOutput ? output[1].reshape(1, numSamplesTotal) : Mat();
@ -432,8 +438,11 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> input, outputs;
inputs_arr.getMatVector(input);
CV_Assert(input.size() >= 1 && input.size() <= 2);
Wxh = blobs[0];
@ -446,7 +455,7 @@ public:
numX = Wxh.cols;
numO = Who.rows;
const Mat& inp0 = *input[0];
const Mat& inp0 = input[0];
CV_Assert(inp0.dims >= 2);
CV_Assert(inp0.total(2) == numX);
@ -477,15 +486,18 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> input, output, internals;
inputs_arr.getMatVector(input);
outputs_arr.getMatVector(output);
internals_arr.getMatVector(internals);
Mat xTs = input[0]->reshape(1, numSamplesTotal);
Mat xTs = input[0].reshape(1, numSamplesTotal);
Mat oTs = output[0].reshape(1, numSamplesTotal);
Mat hTs = produceH ? output[1].reshape(1, numSamplesTotal) : Mat();
Mat hCurr = internals[0];

View File

@ -190,13 +190,16 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
CV_Assert(inputs.size() >= 1);
CV_Assert(outputs.size() == 1);
@ -206,14 +209,14 @@ public:
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &inpBlob = inputs[ii];
Mat &outBlob = outputs[ii];
int rows = inpBlob.size[1];
int cols = inpBlob.size[2];
CV_Assert(inputs.size() < 2 || inputs[1]->dims == 4);
int hNorm = inputs.size() > 1 ? inputs[1]->size[2] : rows;
int wNorm = inputs.size() > 1 ? inputs[1]->size[3] : cols;
CV_Assert(inputs.size() < 2 || inputs[1].dims == 4);
int hNorm = inputs.size() > 1 ? inputs[1].size[2] : rows;
int wNorm = inputs.size() > 1 ? inputs[1].size[3] : cols;
const float *srcData = inpBlob.ptr<float>();
float *dstData = outBlob.ptr<float>();

View File

@ -139,17 +139,19 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++)
{
Mat srcBlob = *inputs[i];
Mat srcBlob = inputs[i];
MatShape inputShape = shape(srcBlob), outShape = shape(outputs[i]);
float *dstData = outputs[0].ptr<float>();
const float *srcData = srcBlob.ptr<float>();

View File

@ -237,17 +237,18 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < outputs.size(); i++)
{
Mat srcBlob = *inputs[i];
Mat srcBlob = inputs[i];
if (outputs[i].data != srcBlob.data)
srcBlob.reshape(1, shape(outputs[i])).copyTo(outputs[i]);
}

View File

@ -57,22 +57,26 @@ public:
return backendId == DNN_BACKEND_OPENCV;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
if (alignCorners && outHeight > 1)
scaleHeight = static_cast<float>(inputs[0]->size[2] - 1) / (outHeight - 1);
scaleHeight = static_cast<float>(inputs[0].size[2] - 1) / (outHeight - 1);
else
scaleHeight = static_cast<float>(inputs[0]->size[2]) / outHeight;
scaleHeight = static_cast<float>(inputs[0].size[2]) / outHeight;
if (alignCorners && outWidth > 1)
scaleWidth = static_cast<float>(inputs[0]->size[3] - 1) / (outWidth - 1);
scaleWidth = static_cast<float>(inputs[0].size[3] - 1) / (outWidth - 1);
else
scaleWidth = static_cast<float>(inputs[0]->size[3]) / outWidth;
scaleWidth = static_cast<float>(inputs[0].size[3]) / outWidth;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
@ -80,24 +84,27 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
if (outHeight == inputs[0]->size[2] && outWidth == inputs[0]->size[3])
if (outHeight == inputs[0].size[2] && outWidth == inputs[0].size[3])
return;
Mat& inp = *inputs[0];
Mat& inp = inputs[0];
Mat& out = outputs[0];
if (interpolation == "nearest")
{
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
for (size_t n = 0; n < inputs[0].size[0]; ++n)
{
for (size_t ch = 0; ch < inputs[0]->size[1]; ++ch)
for (size_t ch = 0; ch < inputs[0].size[1]; ++ch)
{
resize(getPlane(inp, n, ch), getPlane(out, n, ch),
Size(outWidth, outHeight), 0, 0, INTER_NEAREST);
@ -203,15 +210,19 @@ public:
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
int inpHeight = inputs[0]->size[2];
int inpWidth = inputs[0]->size[3];
int inpHeight = inputs[0].size[2];
int inpWidth = inputs[0].size[3];
scaleHeight = (outHeight > 1) ? (static_cast<float>(inpHeight - 1) / (outHeight - 1)) : 0.f;
scaleWidth = (outWidth > 1) ? (static_cast<float>(inpWidth - 1) / (outWidth - 1)) : 0.f;
}

View File

@ -40,8 +40,10 @@ public:
return true;
}
virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);
}
@ -57,20 +59,23 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_Assert_N(outputs.size() == 1, !blobs.empty() || inputs.size() == 2);
Mat &inpBlob = *inputs[0];
Mat &inpBlob = inputs[0];
Mat &outBlob = outputs[0];
// There is a mode when we multiply a first blob by a second one
// instead of trainable weights.
Mat weights = blobs.empty() ? *inputs[1] : (hasWeights ? blobs[0] : Mat());
Mat weights = blobs.empty() ? inputs[1] : (hasWeights ? blobs[0] : Mat());
Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();
if (!weights.empty())
weights = weights.reshape(1, 1);

View File

@ -28,17 +28,21 @@ public:
return group == 1;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
if (group != 1)
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
LayerParams lp;
float order[] = {0, 2, 1, 3};
lp.set("order", DictValue::arrayInt(&order[0], 4));
permute = PermuteLayer::create(lp);
Mat inp = *inputs[0];
Mat out = outputs[0];
const Mat& inp = inputs[0];
const Mat& out = outputs[0];
permuteInpShape.resize(4);
permuteInpShape[0] = inp.size[0];
@ -52,11 +56,8 @@ public:
permuteOutShape[2] = permuteInpShape[1];
permuteOutShape[3] = permuteInpShape[3];
inp = inp.reshape(1, permuteInpShape);
out = out.reshape(1, permuteOutShape);
std::vector<Mat*> permuteInputs(1, &inp);
std::vector<Mat> permuteOutputs(1, out);
std::vector<Mat> permuteInputs(1, inp.reshape(1, permuteInpShape));
std::vector<Mat> permuteOutputs(1, out.reshape(1, permuteOutShape));
permute->finalize(permuteInputs, permuteOutputs);
}
}
@ -66,15 +67,18 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
Mat inp = *inputs[0];
Mat inp = inputs[0];
Mat out = outputs[0];
if (inp.data != out.data)
{
@ -82,7 +86,7 @@ public:
{
inp = inp.reshape(1, permuteInpShape);
out = out.reshape(1, permuteOutShape);
std::vector<Mat*> permuteInputs(1, &inp);
std::vector<Mat> permuteInputs(1, inp);
std::vector<Mat> permuteOutputs(1, out);
permute->forward(permuteInputs, permuteOutputs, internals);
}

View File

@ -144,10 +144,14 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 1);
const MatSize& inpShape = inputs[0]->size;
const MatSize& inpShape = inputs[0].size;
if (sliceRanges.empty())
{
@ -239,15 +243,17 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
const Mat& inpMat = *inputs[0];
const Mat& inpMat = inputs[0];
CV_Assert(outputs.size() == sliceRanges.size());
for (size_t i = 0; i < outputs.size(); i++)
{

View File

@ -191,15 +191,18 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
const Mat &src = *inputs[0];
const Mat &src = inputs[0];
Mat &dst = outputs[0];
int axis = clamp(axisRaw, src.dims);

View File

@ -83,18 +83,19 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < outputs.size(); i++)
{
CV_Assert(inputs[0]->total() == outputs[i].total());
inputs[0]->copyTo(outputs[i]);
CV_Assert(inputs[0].total() == outputs[i].total());
inputs[0].copyTo(outputs[i]);
}
}
};

View File

@ -551,12 +551,6 @@ bool InfEngineBackendLayer::supportBackend(int backendId)
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
void InfEngineBackendLayer::forward(std::vector<Mat*> &input, std::vector<Mat> &output,
std::vector<Mat> &internals)
{
CV_Error(Error::StsError, "Choose Inference Engine as a preferable backend.");
}
void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals)
{

View File

@ -196,9 +196,6 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE;
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
std::vector<Mat> &internals) CV_OVERRIDE;
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals) CV_OVERRIDE;

View File

@ -391,7 +391,7 @@ TEST_P(Test_Caffe_nets, Colorization)
Mat out = net.forward();
// Reference output values are in range [-29.1, 69.5]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.21 : 4e-4;
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.25 : 4e-4;
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 5.3 : 3e-3;
normAssert(out, ref, "", l1, lInf);
}

View File

@ -61,16 +61,13 @@ static String _tf(TString filename)
void runLayer(Ptr<Layer> layer, std::vector<Mat> &inpBlobs, std::vector<Mat> &outBlobs)
{
size_t ninputs = inpBlobs.size();
std::vector<Mat> inp_(ninputs);
std::vector<Mat*> inp(ninputs);
std::vector<Mat> outp, intp;
std::vector<Mat> inp(ninputs), outp, intp;
std::vector<MatShape> inputs, outputs, internals;
for (size_t i = 0; i < ninputs; i++)
{
inp_[i] = inpBlobs[i].clone();
inp[i] = &inp_[i];
inputs.push_back(shape(inp_[i]));
inp[i] = inpBlobs[i].clone();
inputs.push_back(shape(inp[i]));
}
layer->getMemoryShapes(inputs, 0, outputs, internals);
@ -1052,8 +1049,6 @@ public:
return backendId == DNN_BACKEND_OPENCV;
}
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE {}
virtual void forward(cv::InputArrayOfArrays inputs, cv::OutputArrayOfArrays outputs, cv::OutputArrayOfArrays internals) CV_OVERRIDE {}
};
@ -1151,8 +1146,11 @@ public:
return false;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
@ -1161,9 +1159,22 @@ public:
}
// Implementation of this custom layer is based on https://github.com/cdmh/deeplab-public/blob/master/src/caffe/layers/interp_layer.cpp
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
Mat& inp = *inputs[0];
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat& inp = inputs[0];
Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;

View File

@ -6,7 +6,8 @@
// Third party copyrights are property of their respective owners.
#include "test_precomp.hpp"
#include <opencv2/core/ocl.hpp>
#include <opencv2/core/opencl/ocl_defs.hpp>
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
namespace opencv_test { namespace {
@ -87,9 +88,13 @@ public:
return Ptr<Layer>(new FirstCustomLayer(params));
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
void forward(InputArrayOfArrays, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
outputs[0].setTo(1);
}
};
@ -104,9 +109,13 @@ public:
return Ptr<Layer>(new SecondCustomLayer(params));
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
void forward(InputArrayOfArrays, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
outputs[0].setTo(2);
}
};
@ -178,4 +187,125 @@ INSTANTIATE_TEST_CASE_P(/**/, setInput, Combine(
dnnBackendsAndTargets()
));
class CustomLayerWithDeprecatedForward CV_FINAL : public Layer
{
public:
CustomLayerWithDeprecatedForward(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new CustomLayerWithDeprecatedForward(params));
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_Assert_N(inputs[0]->depth() == CV_32F, outputs[0].depth() == CV_32F);
cv::add(*inputs[0], 0.5f, outputs[0]);
}
};
class CustomLayerWithDeprecatedForwardAndFallback CV_FINAL : public Layer
{
public:
CustomLayerWithDeprecatedForwardAndFallback(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new CustomLayerWithDeprecatedForwardAndFallback(params));
}
void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_OCL_RUN(preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16,
forward_ocl(inputs, outputs, internals));
Layer::forward_fallback(inputs, outputs, internals);
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_Assert_N(inputs[0]->depth() == CV_32F, outputs[0].depth() == CV_32F);
cv::add(*inputs[0], 0.5f, outputs[0]);
}
#ifdef HAVE_OPENCL
bool forward_ocl(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
{
if (inputs_arr.depth() != CV_32F)
return false;
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inputs_arr.getUMatVector(inputs);
outputs_arr.getUMatVector(outputs);
cv::add(inputs[0], 0.5f, outputs[0]);
return true;
}
#endif
};
typedef testing::TestWithParam<tuple<Backend, Target> > DeprecatedForward;
TEST_P(DeprecatedForward, CustomLayer)
{
const int backend = get<0>(GetParam());
const int target = get<1>(GetParam());
Mat inp(5, 5, CV_32FC1);
randu(inp, -1.0f, 1.0f);
inp = blobFromImage(inp);
CV_DNN_REGISTER_LAYER_CLASS(CustomType, CustomLayerWithDeprecatedForward);
try
{
LayerParams lp;
Net net;
net.addLayerToPrev("testLayer", "CustomType", lp);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(inp);
Mat out = net.forward();
normAssert(out, inp + 0.5f, "", 2e-4, 7e-4);
}
catch (...)
{
LayerFactory::unregisterLayer("CustomType");
throw;
}
LayerFactory::unregisterLayer("CustomType");
}
TEST_P(DeprecatedForward, CustomLayerWithFallback)
{
const int backend = get<0>(GetParam());
const int target = get<1>(GetParam());
Mat inp(5, 5, CV_32FC1);
randu(inp, -1.0f, 1.0f);
inp = blobFromImage(inp);
CV_DNN_REGISTER_LAYER_CLASS(CustomType, CustomLayerWithDeprecatedForwardAndFallback);
try
{
LayerParams lp;
Net net;
net.addLayerToPrev("testLayer", "CustomType", lp);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(inp);
Mat out = net.forward();
normAssert(out, inp + 0.5f, "", 2e-4, 7e-4);
}
catch (...)
{
LayerFactory::unregisterLayer("CustomType");
throw;
}
LayerFactory::unregisterLayer("CustomType");
}
INSTANTIATE_TEST_CASE_P(/**/, DeprecatedForward, dnnBackendsAndTargets());
}} // namespace

View File

@ -313,14 +313,14 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
// Due to numerical instability in Pooling-Unpooling layers (indexes jittering)
// thresholds for ENet must be changed. Accuracy of results was checked on
// Cityscapes dataset and difference in mIOU with Torch is 10E-4%
normAssert(ref, out, "", 0.00044, target == DNN_TARGET_CPU ? 0.453 : 0.44);
normAssert(ref, out, "", 0.00044, /*target == DNN_TARGET_CPU ? 0.453 : */0.5);
const int N = 3;
for (int i = 0; i < N; i++)
{
net.setInput(inputBlob, "");
Mat out = net.forward();
normAssert(ref, out, "", 0.00044, target == DNN_TARGET_CPU ? 0.453 : 0.44);
normAssert(ref, out, "", 0.00044, /*target == DNN_TARGET_CPU ? 0.453 : */0.5);
}
}
@ -411,15 +411,22 @@ public:
return false;
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
Mat& inp = *inputs[0];
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat& inp = inputs[0];
Mat& out = outputs[0];
const int outHeight = out.size[2];
const int outWidth = out.size[3];
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
for (size_t n = 0; n < inp.size[0]; ++n)
{
for (size_t ch = 0; ch < inputs[0]->size[1]; ++ch)
for (size_t ch = 0; ch < inp.size[1]; ++ch)
{
resize(getPlane(inp, n, ch), getPlane(out, n, ch),
Size(outWidth, outHeight), 0, 0, INTER_NEAREST);

View File

@ -356,7 +356,7 @@ void cv::imshow( const String& winname, InputArray _img )
CV_Assert(size.width>0 && size.height>0);
{
Mat img = _img.getMat();
CvMat c_img = img;
CvMat c_img = cvMat(img);
cvShowImage(winname.c_str(), &c_img);
}
#else

View File

@ -1755,8 +1755,8 @@ static gboolean icvOnMouse( GtkWidget *widget, GdkEvent *event, gpointer user_da
{
// TODO move this logic to CvImageWidget
CvWindow* window = (CvWindow*)user_data;
CvPoint2D32f pt32f(-1., -1.);
CvPoint pt(-1,-1);
CvPoint2D32f pt32f = {-1., -1.};
CvPoint pt = {-1,-1};
int cv_event = -1, state = 0, flags = 0;
CvImageWidget * image_widget = CV_IMAGE_WIDGET( widget );

View File

@ -491,7 +491,7 @@ imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 )
}
else
{
image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) );
image = cvCreateImage(cvSize(size), cvIplDepth(type), CV_MAT_CN(type));
temp = cvarrToMat( image );
}
@ -838,7 +838,7 @@ imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 )
}
else
{
image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) );
image = cvCreateImage(cvSize(size), cvIplDepth(type), CV_MAT_CN(type));
temp = cvarrToMat(image);
}

View File

@ -652,7 +652,7 @@ cvConvertImage( const CvArr* srcarr, CvArr* dstarr, int flags )
uchar *s = src->data.ptr, *d = dst->data.ptr;
int s_step = src->step, d_step = dst->step;
int code = src_cn*10 + dst_cn;
CvSize size(src->cols, src->rows);
CvSize size = {src->cols, src->rows};
if( CV_IS_MAT_CONT(src->type & dst->type) )
{

View File

@ -1036,9 +1036,10 @@ CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color,
int thickness CV_DEFAULT(1),
int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) )
{
CvSize axes;
axes.width = cvRound(box.size.width*0.5);
axes.height = cvRound(box.size.height*0.5);
CvSize axes = cvSize(
cvRound(box.size.width*0.5),
cvRound(box.size.height*0.5)
);
cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle,
0, 360, color, thickness, line_type, shift );

View File

@ -410,7 +410,7 @@ typedef struct CvMoments
double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /**< central moments */
double inv_sqrt_m00; /**< m00 != 0 ? 1/sqrt(m00) : 0 */
#ifdef __cplusplus
#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)
CvMoments(){}
CvMoments(const cv::Moments& m)
{
@ -430,6 +430,36 @@ typedef struct CvMoments
}
CvMoments;
#ifdef __cplusplus
} // extern "C"
CV_INLINE CvMoments cvMoments()
{
#if !defined(CV__ENABLE_C_API_CTORS)
CvMoments self = CV_STRUCT_INITIALIZER; return self;
#else
return CvMoments();
#endif
}
CV_INLINE CvMoments cvMoments(const cv::Moments& m)
{
#if !defined(CV__ENABLE_C_API_CTORS)
double am00 = std::abs(m.m00);
CvMoments self = {
m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03,
m.mu20, m.mu11, m.mu02, m.mu30, m.mu21, m.mu12, m.mu03,
am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0
};
return self;
#else
return CvMoments(m);
#endif
}
extern "C" {
#endif // __cplusplus
/** Hu invariants */
typedef struct CvHuMoments
{

View File

@ -135,7 +135,7 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
Determines support region for all the remained points */
do
{
CvPoint pt0;
cv::Point2i pt0;
int k, l = 0, d_num = 0;
i = (int)(current - array);

View File

@ -49,7 +49,7 @@
(deltas)[6] = (step), (deltas)[7] = (step) + (nch))
static const CvPoint icvCodeDeltas[8] =
{ CvPoint(1, 0), CvPoint(1, -1), CvPoint(0, -1), CvPoint(-1, -1), CvPoint(-1, 0), CvPoint(-1, 1), CvPoint(0, 1), CvPoint(1, 1) };
{ {1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1} };
CV_IMPL void
cvStartReadChainPoints( CvChain * chain, CvChainPtReader * reader )
@ -77,19 +77,15 @@ cvStartReadChainPoints( CvChain * chain, CvChainPtReader * reader )
CV_IMPL CvPoint
cvReadChainPoint( CvChainPtReader * reader )
{
schar *ptr;
int code;
CvPoint pt;
if( !reader )
CV_Error( CV_StsNullPtr, "" );
pt = reader->pt;
cv::Point2i pt = reader->pt;
ptr = reader->ptr;
if( ptr )
schar *ptr = reader->ptr;
if (ptr)
{
code = *ptr++;
int code = *ptr++;
if( ptr >= reader->block_max )
{
@ -104,7 +100,7 @@ cvReadChainPoint( CvChainPtReader * reader )
reader->pt.y = pt.y + icvCodeDeltas[code].y;
}
return pt;
return cvPoint(pt);
}
@ -209,14 +205,7 @@ cvStartFindContours_Impl( void* _img, CvMemStorage* storage,
CV_Error( CV_StsBadSize, "" );
CvContourScanner scanner = (CvContourScanner)cvAlloc( sizeof( *scanner ));
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
memset( scanner, 0, sizeof(*scanner) );
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic pop
#endif
scanner->storage1 = scanner->storage2 = storage;
scanner->img0 = (schar *) img;
@ -700,7 +689,7 @@ icvFetchContourEx( schar* ptr,
int deltas[MAX_SIZE];
CvSeqWriter writer;
schar *i0 = ptr, *i1, *i3, *i4 = NULL;
CvRect rect;
cv::Rect rect;
int prev_s = -1, s, s_end;
int method = _method - 1;
@ -810,14 +799,14 @@ icvFetchContourEx( schar* ptr,
cvEndWriteSeq( &writer );
if( _method != CV_CHAIN_CODE )
((CvContour*)contour)->rect = rect;
((CvContour*)contour)->rect = cvRect(rect);
CV_DbgAssert( (writer.seq->total == 0 && writer.seq->first == 0) ||
writer.seq->total > writer.seq->first->count ||
(writer.seq->first->prev == writer.seq->first &&
writer.seq->first->next == writer.seq->first) );
if( _rect ) *_rect = rect;
if( _rect ) *_rect = cvRect(rect);
}
@ -888,7 +877,7 @@ icvFetchContourEx_32s( int* ptr,
int deltas[MAX_SIZE];
CvSeqWriter writer;
int *i0 = ptr, *i1, *i3, *i4;
CvRect rect;
cv::Rect rect;
int prev_s = -1, s, s_end;
int method = _method - 1;
const int right_flag = INT_MIN;
@ -1000,14 +989,14 @@ icvFetchContourEx_32s( int* ptr,
cvEndWriteSeq( &writer );
if( _method != CV_CHAIN_CODE )
((CvContour*)contour)->rect = rect;
((CvContour*)contour)->rect = cvRect(rect);
CV_DbgAssert( (writer.seq->total == 0 && writer.seq->first == 0) ||
writer.seq->total > writer.seq->first->count ||
(writer.seq->first->prev == writer.seq->first &&
writer.seq->first->next == writer.seq->first) );
if( _rect ) *_rect = rect;
if (_rect) *_rect = cvRect(rect);
}
@ -1035,7 +1024,7 @@ cvFindNextContour( CvContourScanner scanner )
int width = scanner->img_size.width;
int height = scanner->img_size.height;
int mode = scanner->mode;
CvPoint lnbd = scanner->lnbd;
cv::Point2i lnbd = scanner->lnbd;
int nbd = scanner->nbd;
int prev = img[x - 1];
int new_mask = -2;
@ -1125,7 +1114,7 @@ cvFindNextContour( CvContourScanner scanner )
_CvContourInfo *par_info = 0;
CvSeq *seq = 0;
int is_hole = 0;
CvPoint origin;
cv::Point2i origin;
/* if not external contour */
if( (!img_i && !(prev == 0 && p == 1)) ||
@ -1259,7 +1248,7 @@ cvFindNextContour( CvContourScanner scanner )
l_cinfo->is_hole = is_hole;
l_cinfo->contour = seq;
l_cinfo->origin = origin;
l_cinfo->origin = cvPoint(origin);
l_cinfo->parent = par_info;
if( scanner->approx_method1 != scanner->approx_method2 )
@ -1292,7 +1281,7 @@ cvFindNextContour( CvContourScanner scanner )
scanner->l_cinfo = l_cinfo;
scanner->pt.x = !img_i ? x + 1 : x + 1 - is_hole;
scanner->pt.y = y;
scanner->lnbd = lnbd;
scanner->lnbd = cvPoint(lnbd);
scanner->img = (schar *) img;
scanner->nbd = nbd;
return l_cinfo->contour;
@ -1480,7 +1469,7 @@ icvFindContoursInInterval( const CvArr* src,
uchar* src_data = 0;
int img_step = 0;
CvSize img_size;
cv::Size img_size;
int connect_flag;
int lower_total;
@ -1529,7 +1518,7 @@ icvFindContoursInInterval( const CvArr* src,
CV_Error( CV_StsBadArg, "Input array must be 8uC1 or 8sC1" );
src_data = mat->data.ptr;
img_step = mat->step;
img_size = cvGetMatSize( mat );
img_size = cvGetMatSize(mat);
// Create temporary sequences
runs = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvLinkedRunPoint), storage00 );
@ -1550,7 +1539,7 @@ icvFindContoursInInterval( const CvArr* src,
tmp_prev = upper_line;
for( j = 0; j < img_size.width; )
{
j = findStartContourPoint(src_data, img_size, j, haveSIMD);
j = findStartContourPoint(src_data, cvSize(img_size), j, haveSIMD);
if( j == img_size.width )
break;
@ -1560,7 +1549,7 @@ icvFindContoursInInterval( const CvArr* src,
tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );
tmp_prev = tmp_prev->next;
j = findEndContourPoint(src_data, img_size, j + 1, haveSIMD);
j = findEndContourPoint(src_data, cvSize(img_size), j + 1, haveSIMD);
tmp.pt.x = j - 1;
CV_WRITE_SEQ_ELEM( tmp, writer );
@ -1584,7 +1573,7 @@ icvFindContoursInInterval( const CvArr* src,
all_total = runs->total;
for( j = 0; j < img_size.width; )
{
j = findStartContourPoint(src_data, img_size, j, haveSIMD);
j = findStartContourPoint(src_data, cvSize(img_size), j, haveSIMD);
if( j == img_size.width ) break;
@ -1593,7 +1582,7 @@ icvFindContoursInInterval( const CvArr* src,
tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer );
tmp_prev = tmp_prev->next;
j = findEndContourPoint(src_data, img_size, j + 1, haveSIMD);
j = findEndContourPoint(src_data, cvSize(img_size), j + 1, haveSIMD);
tmp.pt.x = j - 1;
CV_WRITE_SEQ_ELEM( tmp, writer );
@ -1908,11 +1897,11 @@ void cv::findContours( InputOutputArray _image, OutputArrayOfArrays _contours,
image = image0;
}
MemStorage storage(cvCreateMemStorage());
CvMat _cimage = image;
CvMat _cimage = cvMat(image);
CvSeq* _ccontours = 0;
if( _hierarchy.needed() )
_hierarchy.clear();
cvFindContours_Impl(&_cimage, storage, &_ccontours, sizeof(CvContour), mode, method, offset + offset0, 0);
cvFindContours_Impl(&_cimage, storage, &_ccontours, sizeof(CvContour), mode, method, cvPoint(offset0 + offset), 0);
if( !_ccontours )
{
_contours.clear();

View File

@ -2478,7 +2478,7 @@ void cv::drawContours( InputOutputArray _image, InputArrayOfArrays _contours,
CV_INSTRUMENT_REGION()
Mat image = _image.getMat(), hierarchy = _hierarchy.getMat();
CvMat _cimage = image;
CvMat _cimage = cvMat(image);
size_t ncontours = _contours.total();
size_t i = 0, first = 0, last = ncontours;
@ -2547,8 +2547,8 @@ void cv::drawContours( InputOutputArray _image, InputArrayOfArrays _contours,
}
}
cvDrawContours( &_cimage, &seq[first], color, color, contourIdx >= 0 ?
-maxLevel : maxLevel, thickness, lineType, offset );
cvDrawContours( &_cimage, &seq[first], cvScalar(color), cvScalar(color), contourIdx >= 0 ?
-maxLevel : maxLevel, thickness, lineType, cvPoint(offset) );
}
@ -2559,11 +2559,6 @@ static const int CodeDeltas[8][2] =
#define CV_ADJUST_EDGE_COUNT( count, seq ) \
((count) -= ((count) == (seq)->total && !CV_IS_SEQ_CLOSED(seq)))
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
CV_IMPL void
cvDrawContours( void* _img, CvSeq* contour,
CvScalar _externalColor, CvScalar _holeColor,
@ -2657,14 +2652,14 @@ cvDrawContours( void* _img, CvSeq* contour,
int shift = 0;
count -= !CV_IS_SEQ_CLOSED(contour);
CV_READ_SEQ_ELEM( pt1, reader );
{ CvPoint pt_ = CV_STRUCT_INITIALIZER; CV_READ_SEQ_ELEM(pt_, reader); pt1 = pt_; }
pt1 += offset;
if( thickness < 0 )
pts.push_back(pt1);
for( i = 0; i < count; i++ )
{
CV_READ_SEQ_ELEM( pt2, reader );
{ CvPoint pt_ = CV_STRUCT_INITIALIZER; CV_READ_SEQ_ELEM(pt_, reader); pt2 = pt_; }
pt2 += offset;
if( thickness >= 0 )
cv::ThickLine( img, pt1, pt2, clr, thickness, line_type, 2, shift );
@ -2706,7 +2701,7 @@ cvEllipse2Poly( CvPoint center, CvSize axes, int angle,
CV_IMPL CvScalar
cvColorToScalar( double packed_color, int type )
{
CvScalar scalar;
cv::Scalar scalar;
if( CV_MAT_DEPTH( type ) == CV_8U )
{
@ -2764,7 +2759,7 @@ cvColorToScalar( double packed_color, int type )
}
}
return scalar;
return cvScalar(scalar);
}
CV_IMPL int
@ -2892,11 +2887,7 @@ cvGetTextSize( const char *text, const CvFont *_font, CvSize *_size, int *_base_
cv::Size size = cv::getTextSize( text, _font->font_face, (_font->hscale + _font->vscale)*0.5,
_font->thickness, _base_line );
if( _size )
*_size = size;
*_size = cvSize(size);
}
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic pop // "-Wclass-memaccess"
#endif
/* End of file. */

Some files were not shown because too many files have changed in this diff Show More