Merge pull request #22150 from savuor:warpFrameTests

warpFrame() fixed & covered by tests
This commit is contained in:
Alexander Smorkalov 2022-08-03 12:27:19 +03:00 committed by GitHub
commit 99d216c8d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 758 additions and 397 deletions

View File

@ -124,21 +124,20 @@ CV_EXPORTS_W void depthTo3d(InputArray depth, InputArray K, OutputArray points3d
*/
CV_EXPORTS_W void rescaleDepth(InputArray in, int type, OutputArray out, double depth_factor = 1000.0);
/** Warp the image: compute 3d points from the depth, transform them using given transformation,
* then project color point cloud to an image plane.
* This function can be used to visualize results of the Odometry algorithm.
* @param image The image (of CV_8UC1 or CV_8UC3 type)
* @param depth The depth (of type used in depthTo3d fuction)
* @param mask The mask of used pixels (of CV_8UC1), it can be empty
* @param Rt The transformation that will be applied to the 3d points computed from the depth
* @param cameraMatrix Camera matrix
* @param distCoeff Distortion coefficients
* @param warpedImage The warped image.
* @param warpedDepth The warped depth.
* @param warpedMask The warped mask.
/** Warps depth or RGB-D image by reprojecting it in 3d, applying Rt transformation
* and then projecting it back onto the image plane.
* This function can be used to visualize the results of the Odometry algorithm.
* @param depth Depth data, should be 1-channel CV_16U, CV_16S, CV_32F or CV_64F
* @param image RGB image (optional), should be 1-, 3- or 4-channel CV_8U
* @param mask Mask of used pixels (optional), should be CV_8UC1
* @param Rt Rotation+translation matrix (3x4 or 4x4) to be applied to depth points
* @param cameraMatrix Camera intrinsics matrix (3x3)
* @param warpedDepth The warped depth data (optional)
* @param warpedImage The warped RGB image (optional)
* @param warpedMask The mask of valid pixels in warped image (optional)
*/
CV_EXPORTS_W void warpFrame(InputArray image, InputArray depth, InputArray mask, InputArray Rt, InputArray cameraMatrix, InputArray distCoeff,
OutputArray warpedImage, OutputArray warpedDepth = noArray(), OutputArray warpedMask = noArray());
CV_EXPORTS_W void warpFrame(InputArray depth, InputArray image, InputArray mask, InputArray Rt, InputArray cameraMatrix,
OutputArray warpedDepth = noArray(), OutputArray warpedImage = noArray(), OutputArray warpedMask = noArray());
enum RgbdPlaneMethod
{

View File

@ -16,7 +16,7 @@ namespace cv
/** These constants are used to set a type of data which odometry will use
* @param DEPTH only depth data
* @param RGB only rgb image
* @param RGB_DEPTH only depth and rgb data simultaneously
* @param RGB_DEPTH depth and rgb data simultaneously
*/
enum OdometryType
{
@ -26,8 +26,8 @@ enum OdometryType
};
/** These constants are used to set the speed and accuracy of odometry
* @param COMMON only accurate but not so fast
* @param FAST only less accurate but faster
* @param COMMON accurate but not so fast
* @param FAST less accurate but faster
*/
enum class OdometryAlgoType
{
@ -62,9 +62,9 @@ public:
*/
void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame);
/** Prepare frame for odometry calculation
/** Compute Rigid Transformation between two frames so that Rt * src = dst
* @param srcFrame src frame ("original" image)
* @param dstFrame dsr frame ("rotated" image)
* @param dstFrame dst frame ("rotated" image)
* @param Rt Rigid transformation, which will be calculated, in form:
* { R_11 R_12 R_13 t_1
* R_21 R_22 R_23 t_2

View File

@ -85,10 +85,10 @@ bool OdometryICP::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
for (int i = 0; i < miterCounts.size().height; i++)
iterCounts.push_back(miterCounts.at<int>(i));
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
iterCounts, this->settings.getMaxTranslation(),
this->settings.getMaxRotation(), settings.getSobelScale(),
OdometryType::DEPTH, OdometryTransformType::RIGID_TRANSFORMATION, this->algtype);
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
iterCounts, this->settings.getMaxTranslation(),
this->settings.getMaxRotation(), settings.getSobelScale(),
OdometryType::DEPTH, OdometryTransformType::RIGID_TRANSFORMATION, this->algtype);
return isCorrect;
}
@ -107,7 +107,7 @@ bool OdometryICP::compute(InputArray _srcFrame, InputArray _dstFrame, OutputArra
bool OdometryICP::compute(InputArray, InputArray, InputArray, InputArray, OutputArray) const
{
CV_Error(cv::Error::StsBadFunc, "This volume does not work with depth and rgb data simultaneously");
CV_Error(cv::Error::StsBadFunc, "This odometry does not work with depth and rgb data simultaneously");
}
class OdometryRGB : public Odometry::Impl
@ -165,12 +165,13 @@ bool OdometryRGB::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
for (int i = 0; i < miterCounts.size().height; i++)
iterCounts.push_back(miterCounts.at<int>(i));
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
iterCounts, this->settings.getMaxTranslation(),
this->settings.getMaxRotation(), settings.getSobelScale(),
OdometryType::RGB, OdometryTransformType::RIGID_TRANSFORMATION, this->algtype);
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
iterCounts, this->settings.getMaxTranslation(),
this->settings.getMaxRotation(), settings.getSobelScale(),
OdometryType::RGB, OdometryTransformType::RIGID_TRANSFORMATION, this->algtype);
return isCorrect;
}
bool OdometryRGB::compute(InputArray _srcFrame, InputArray _dstFrame, OutputArray Rt) const
{
OdometryFrame srcFrame = this->createOdometryFrame();
@ -186,7 +187,7 @@ bool OdometryRGB::compute(InputArray _srcFrame, InputArray _dstFrame, OutputArra
bool OdometryRGB::compute(InputArray, InputArray, InputArray, InputArray, OutputArray) const
{
CV_Error(cv::Error::StsBadFunc, "This volume does not work with depth and rgb data simultaneously");
CV_Error(cv::Error::StsBadFunc, "This odometry does not work with depth and rgb data simultaneously");
}
class OdometryRGBD : public Odometry::Impl
@ -243,10 +244,10 @@ bool OdometryRGBD::compute(const OdometryFrame& srcFrame, const OdometryFrame& d
for (int i = 0; i < miterCounts.size().height; i++)
iterCounts.push_back(miterCounts.at<int>(i));
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
iterCounts, this->settings.getMaxTranslation(),
this->settings.getMaxRotation(), settings.getSobelScale(),
OdometryType::RGB_DEPTH, OdometryTransformType::RIGID_TRANSFORMATION, this->algtype);
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
iterCounts, this->settings.getMaxTranslation(),
this->settings.getMaxRotation(), settings.getSobelScale(),
OdometryType::RGB_DEPTH, OdometryTransformType::RIGID_TRANSFORMATION, this->algtype);
return isCorrect;
}
@ -256,7 +257,7 @@ bool OdometryRGBD::compute(InputArray, InputArray, OutputArray) const
}
bool OdometryRGBD::compute(InputArray _srcDepthFrame, InputArray _srcRGBFrame,
InputArray _dstDepthFrame, InputArray _dstRGBFrame, OutputArray Rt) const
InputArray _dstDepthFrame, InputArray _dstRGBFrame, OutputArray Rt) const
{
OdometryFrame srcFrame = this->createOdometryFrame();
OdometryFrame dstFrame = this->createOdometryFrame();
@ -266,7 +267,6 @@ bool OdometryRGBD::compute(InputArray _srcDepthFrame, InputArray _srcRGBFrame,
dstFrame.setImage(_dstRGBFrame);
prepareRGBDFrame(srcFrame, dstFrame, this->settings, this->algtype);
bool isCorrect = compute(srcFrame, dstFrame, Rt);
return isCorrect;
}
@ -345,15 +345,16 @@ void Odometry::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame)
bool Odometry::compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt)
{
//this->prepareFrames(srcFrame, dstFrame);
return this->impl->compute(srcFrame, dstFrame, Rt);
}
bool Odometry::compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const
{
return this->impl->compute(srcFrame, dstFrame, Rt);
}
bool Odometry::compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const
{
@ -361,82 +362,219 @@ bool Odometry::compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
}
template<class ImageElemType>
static void
warpFrameImpl(InputArray _image, InputArray depth, InputArray _mask,
const Mat& Rt, const Mat& cameraMatrix, const Mat& distCoeff,
OutputArray _warpedImage, OutputArray warpedDepth, OutputArray warpedMask)
void warpFrame(InputArray depth, InputArray image, InputArray mask,
InputArray Rt, InputArray cameraMatrix,
OutputArray warpedDepth, OutputArray warpedImage, OutputArray warpedMask)
{
CV_Assert(_image.size() == depth.size());
Mat cloud;
depthTo3d(depth, cameraMatrix, cloud);
Mat cloud3;
cloud3.create(cloud.size(), CV_32FC3);
for (int y = 0; y < cloud3.rows; y++)
CV_Assert(cameraMatrix.size() == Size(3, 3));
CV_Assert(cameraMatrix.depth() == CV_32F || cameraMatrix.depth() == CV_64F);
Matx33d K, Kinv;
cameraMatrix.getMat().convertTo(K, CV_64F);
std::vector<bool> camPlaces { /* fx */ true, false, /* cx */ true, false, /* fy */ true, /* cy */ true, false, false, /* 1 */ true};
for (int i = 0; i < 9; i++)
{
for (int x = 0; x < cloud3.cols; x++)
CV_Assert(camPlaces[i] == (K.val[i] > DBL_EPSILON));
}
Kinv = K.inv();
CV_Assert((Rt.cols() == 4) && (Rt.rows() == 3 || Rt.rows() == 4));
CV_Assert(Rt.depth() == CV_32F || Rt.depth() == CV_64F);
Mat rtmat;
Rt.getMat().convertTo(rtmat, CV_64F);
Affine3d rt(rtmat);
CV_Assert(!depth.empty());
CV_Assert(depth.channels() == 1);
double maxDepth = 0;
int depthDepth = depth.depth();
switch (depthDepth)
{
case CV_16U:
maxDepth = std::numeric_limits<unsigned short>::max();
break;
case CV_32F:
maxDepth = std::numeric_limits<float>::max();
break;
case CV_64F:
maxDepth = std::numeric_limits<double>::max();
break;
default:
CV_Error(Error::StsBadArg, "Unsupported depth data type");
}
Mat_<double> depthDbl;
depth.getMat().convertTo(depthDbl, CV_64F);
Size sz = depth.size();
Mat_<uchar> maskMat;
if (!mask.empty())
{
CV_Assert(mask.type() == CV_8UC1);
CV_Assert(mask.size() == sz);
maskMat = mask.getMat();
}
int imageType = -1;
Mat imageMat;
if (!image.empty())
{
imageType = image.type();
CV_Assert(imageType == CV_8UC1 || imageType == CV_8UC3 || imageType == CV_8UC4);
CV_Assert(image.size() == sz);
CV_Assert(warpedImage.needed());
imageMat = image.getMat();
}
CV_Assert(warpedDepth.needed() || warpedImage.needed() || warpedMask.needed());
// Getting new coords for depth point
// see the explanation in the loop below
Matx33d krki = K * rt.rotation() * Kinv;
Matx32d krki_cols01 = krki.get_minor<3, 2>(0, 0);
Vec3d krki_col2(krki.col(2).val);
Vec3d ktmat = K * rt.translation();
Mat_<Vec3d> reprojBack(depth.size());
for (int y = 0; y < sz.height; y++)
{
const uchar* maskRow = maskMat.empty() ? nullptr : maskMat[y];
const double* depthRow = depthDbl[y];
Vec3d* reprojRow = reprojBack[y];
for (int x = 0; x < sz.width; x++)
{
Vec4f p = cloud.at<Vec4f>(y, x);
cloud3.at<Vec3f>(y, x) = Vec3f(p[0], p[1], p[2]);
double z = depthRow[x];
bool badz = cvIsNaN(z) || cvIsInf(z) || z <= 0 || z >= maxDepth || (maskRow && !maskRow[x]);
Vec3d v;
if (!badz)
{
// Reproject pixel (x, y) using known z, rotate+translate and project back
// getting new pixel in projective coordinates:
// v = K * Rt * K^-1 * ([x, y, 1] * z) = [new_x*new_z, new_y*new_z, new_z]
// v = K * (R * K^-1 * ([x, y, 1] * z) + t) =
// v = krki * [x, y, 1] * z + ktmat =
// v = (krki_cols01 * [x, y] + krki_col2) * z + K * t
v = (krki_cols01 * Vec2d(x, y) + krki_col2) * z + ktmat;
}
else
{
v = Vec3d();
}
reprojRow[x] = v;
}
}
std::vector<Point2f> points2d;
Mat transformedCloud;
perspectiveTransform(cloud3, transformedCloud, Rt);
projectPoints(transformedCloud.reshape(3, 1), Mat::eye(3, 3, CV_64FC1), Mat::zeros(3, 1, CV_64FC1), cameraMatrix,
distCoeff, points2d);
// Draw new depth in z-buffer manner
Mat image = _image.getMat();
Size sz = _image.size();
Mat mask = _mask.getMat();
_warpedImage.create(sz, image.type());
Mat warpedImage = _warpedImage.getMat();
Mat warpedImageMat;
if (warpedImage.needed())
{
warpedImage.create(sz, imageType);
warpedImage.setZero();
warpedImageMat = warpedImage.getMat();
}
const double infinity = std::numeric_limits<double>::max();
Mat zBuffer(sz, CV_32FC1, infinity);
Mat zBuffer(sz, CV_32FC1, std::numeric_limits<float>::max());
const Rect rect = Rect(Point(), sz);
for (int y = 0; y < sz.height; y++)
{
//const Point3f* cloud_row = cloud.ptr<Point3f>(y);
const Point3f* transformedCloud_row = transformedCloud.ptr<Point3f>(y);
const Point2f* points2d_row = &points2d[y * sz.width];
const ImageElemType* image_row = image.ptr<ImageElemType>(y);
const uchar* mask_row = mask.empty() ? 0 : mask.ptr<uchar>(y);
uchar* imageRow1ch = nullptr;
Vec3b* imageRow3ch = nullptr;
Vec4b* imageRow4ch = nullptr;
switch (imageType)
{
case -1:
break;
case CV_8UC1:
imageRow1ch = imageMat.ptr<uchar>(y);
break;
case CV_8UC3:
imageRow3ch = imageMat.ptr<Vec3b>(y);
break;
case CV_8UC4:
imageRow4ch = imageMat.ptr<Vec4b>(y);
break;
default:
break;
}
const Vec3d* reprojRow = reprojBack[y];
for (int x = 0; x < sz.width; x++)
{
const float transformed_z = transformedCloud_row[x].z;
const Point2i p2d = points2d_row[x];
if ((!mask_row || mask_row[x]) && transformed_z > 0 && rect.contains(p2d) && /*!cvIsNaN(cloud_row[x].z) && */zBuffer.at<float>(p2d) > transformed_z)
Vec3d v = reprojRow[x];
double z = v[2];
if (z > 0)
{
warpedImage.at<ImageElemType>(p2d) = image_row[x];
zBuffer.at<float>(p2d) = transformed_z;
Point uv(cvFloor(v[0] / z), cvFloor(v[1] / z));
if (rect.contains(uv))
{
float oldz = zBuffer.at<float>(uv);
if (z < oldz)
{
zBuffer.at<float>(uv) = z;
switch (imageType)
{
case -1:
break;
case CV_8UC1:
warpedImageMat.at<uchar>(uv) = imageRow1ch[x];
break;
case CV_8UC3:
warpedImageMat.at<Vec3b>(uv) = imageRow3ch[x];
break;
case CV_8UC4:
warpedImageMat.at<Vec4b>(uv) = imageRow4ch[x];
break;
default:
break;
}
}
}
}
}
}
if (warpedMask.needed())
Mat(zBuffer != std::numeric_limits<float>::max()).copyTo(warpedMask);
if (warpedDepth.needed())
if (warpedDepth.needed() || warpedMask.needed())
{
zBuffer.setTo(std::numeric_limits<float>::quiet_NaN(), zBuffer == std::numeric_limits<float>::max());
zBuffer.copyTo(warpedDepth);
Mat goodMask = (zBuffer < infinity);
if (warpedDepth.needed())
{
warpedDepth.create(sz, depthDepth);
double badVal;
switch (depthDepth)
{
case CV_16U:
badVal = 0;
break;
case CV_32F:
badVal = std::numeric_limits<float>::quiet_NaN();
break;
case CV_64F:
badVal = std::numeric_limits<double>::quiet_NaN();
break;
default:
break;
}
zBuffer.convertTo(warpedDepth, depthDepth);
warpedDepth.setTo(badVal, ~goodMask);
}
if (warpedMask.needed())
{
warpedMask.create(sz, CV_8UC1);
goodMask.copyTo(warpedMask);
}
}
}
void warpFrame(InputArray image, InputArray depth, InputArray mask,
InputArray Rt, InputArray cameraMatrix, InputArray distCoeff,
OutputArray warpedImage, OutputArray warpedDepth, OutputArray warpedMask)
{
if (image.type() == CV_8UC1)
warpFrameImpl<uchar>(image, depth, mask, Rt.getMat(), cameraMatrix.getMat(), distCoeff.getMat(), warpedImage, warpedDepth, warpedMask);
else if (image.type() == CV_8UC3)
warpFrameImpl<Point3_<uchar> >(image, depth, mask, Rt.getMat(), cameraMatrix.getMat(), distCoeff.getMat(), warpedImage, warpedDepth, warpedMask);
else
CV_Error(Error::StsBadArg, "Image has to be type of CV_8UC1 or CV_8UC3");
}
}

View File

@ -359,8 +359,8 @@ void preparePyramidImage(InputArray image, InputOutputArrayOfArrays pyramidImage
template<typename TMat>
void preparePyramidMask(InputArray mask, InputArrayOfArrays pyramidDepth, float minDepth, float maxDepth,
InputArrayOfArrays pyramidNormal,
InputOutputArrayOfArrays pyramidMask)
InputArrayOfArrays pyramidNormal,
InputOutputArrayOfArrays pyramidMask)
{
minDepth = std::max(0.f, minDepth);
@ -492,8 +492,8 @@ void preparePyramidSobel(InputArrayOfArrays pyramidImage, int dx, int dy, InputO
}
void preparePyramidTexturedMask(InputArrayOfArrays pyramid_dI_dx, InputArrayOfArrays pyramid_dI_dy,
InputArray minGradMagnitudes, InputArrayOfArrays pyramidMask, double maxPointsPart,
InputOutputArrayOfArrays pyramidTexturedMask, double sobelScale)
InputArray minGradMagnitudes, InputArrayOfArrays pyramidMask, double maxPointsPart,
InputOutputArrayOfArrays pyramidTexturedMask, double sobelScale)
{
size_t didxLevels = pyramid_dI_dx.size(-1).width;
size_t texLevels = pyramidTexturedMask.size(-1).width;
@ -606,7 +606,7 @@ void preparePyramidNormals(InputArray normals, InputArrayOfArrays pyramidDepth,
}
void preparePyramidNormalsMask(InputArray pyramidNormals, InputArray pyramidMask, double maxPointsPart,
InputOutputArrayOfArrays /*std::vector<Mat>&*/ pyramidNormalsMask)
InputOutputArrayOfArrays /*std::vector<Mat>&*/ pyramidNormalsMask)
{
size_t maskLevels = pyramidMask.size(-1).width;
size_t norMaskLevels = pyramidNormalsMask.size(-1).width;
@ -648,38 +648,15 @@ void preparePyramidNormalsMask(InputArray pyramidNormals, InputArray pyramidMask
}
}
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
const OdometryFrame srcFrame,
const OdometryFrame dstFrame,
const Matx33f& cameraMatrix,
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
double maxTranslation, double maxRotation, double sobelScale,
OdometryType method, OdometryTransformType transfromType, OdometryAlgoType algtype)
OdometryType method, OdometryTransformType transformType, OdometryAlgoType algtype)
{
int transformDim = -1;
CalcRgbdEquationCoeffsPtr rgbdEquationFuncPtr = 0;
CalcICPEquationCoeffsPtr icpEquationFuncPtr = 0;
switch(transfromType)
{
case OdometryTransformType::RIGID_TRANSFORMATION:
transformDim = 6;
rgbdEquationFuncPtr = calcRgbdEquationCoeffs;
icpEquationFuncPtr = calcICPEquationCoeffs;
break;
case OdometryTransformType::ROTATION:
transformDim = 3;
rgbdEquationFuncPtr = calcRgbdEquationCoeffsRotation;
icpEquationFuncPtr = calcICPEquationCoeffsRotation;
break;
case OdometryTransformType::TRANSLATION:
transformDim = 3;
rgbdEquationFuncPtr = calcRgbdEquationCoeffsTranslation;
icpEquationFuncPtr = calcICPEquationCoeffsTranslation;
break;
default:
CV_Error(Error::StsBadArg, "Incorrect transformation type");
}
int transformDim = getTransformDim(transformType);
const int minOverdetermScale = 20;
const int minCorrespsCount = minOverdetermScale * transformDim;
@ -695,7 +672,6 @@ bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
for(int level = (int)iterCounts.size() - 1; level >= 0; level--)
{
const Matx33f& levelCameraMatrix = pyramidCameraMatrix[level];
const Matx33f& levelCameraMatrix_inv = levelCameraMatrix.inv(DECOMP_SVD);
const Mat srcLevelDepth, dstLevelDepth;
const Mat srcLevelImage, dstLevelImage;
srcFrame.getPyramidAt(srcLevelDepth, OdometryFramePyramidType::PYR_DEPTH, level);
@ -717,32 +693,33 @@ bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
for(int iter = 0; iter < iterCounts[level]; iter ++)
{
Mat resultRt_inv = resultRt.inv(DECOMP_SVD);
Mat corresps_rgbd, corresps_icp, diffs_rgbd, diffs_icp;
double sigma_rgbd = 0, sigma_icp = 0;
Mat corresps_rgbd, corresps_icp, diffs_rgbd;
Mat dummy;
double sigma_rgbd = 0, dummyFloat = 0;
const Mat pyramidMask;
srcFrame.getPyramidAt(pyramidMask, OdometryFramePyramidType::PYR_MASK, level);
if(method != OdometryType::DEPTH)// RGB
if(method != OdometryType::DEPTH) // RGB
{
const Mat pyramidTexturedMask;
dstFrame.getPyramidAt(pyramidTexturedMask, OdometryFramePyramidType::PYR_TEXMASK, level);
computeCorresps(levelCameraMatrix, levelCameraMatrix_inv, resultRt_inv,
computeCorresps(levelCameraMatrix, resultRt,
srcLevelImage, srcLevelDepth, pyramidMask,
dstLevelImage, dstLevelDepth, pyramidTexturedMask, maxDepthDiff,
corresps_rgbd, diffs_rgbd, sigma_rgbd, OdometryType::RGB);
}
if(method != OdometryType::RGB)// ICP
if(method != OdometryType::RGB) // ICP
{
if (algtype == OdometryAlgoType::COMMON)
{
const Mat pyramidNormalsMask;
dstFrame.getPyramidAt(pyramidNormalsMask, OdometryFramePyramidType::PYR_NORMMASK, level);
computeCorresps(levelCameraMatrix, levelCameraMatrix_inv, resultRt_inv,
Mat(), srcLevelDepth, pyramidMask,
Mat(), dstLevelDepth, pyramidNormalsMask, maxDepthDiff,
corresps_icp, diffs_icp, sigma_icp, OdometryType::DEPTH);
computeCorresps(levelCameraMatrix, resultRt,
Mat(), srcLevelDepth, pyramidMask,
Mat(), dstLevelDepth, pyramidNormalsMask, maxDepthDiff,
corresps_icp, dummy, dummyFloat, OdometryType::DEPTH);
}
}
@ -763,7 +740,7 @@ bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
dstFrame.getPyramidAt(dstPyrIdy, OdometryFramePyramidType::PYR_DIY, level);
calcRgbdLsmMatrices(srcPyrCloud, resultRt, dstPyrIdx, dstPyrIdy,
corresps_rgbd, diffs_rgbd, sigma_rgbd, fx, fy, sobelScale,
AtA_rgbd, AtB_rgbd, rgbdEquationFuncPtr, transformDim);
AtA_rgbd, AtB_rgbd, transformType);
AtA += AtA_rgbd;
AtB += AtB_rgbd;
}
@ -776,7 +753,7 @@ bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
if (algtype == OdometryAlgoType::COMMON)
{
calcICPLsmMatrices(srcPyrCloud, resultRt, dstPyrCloud, dstPyrNormals,
corresps_icp, AtA_icp, AtB_icp, icpEquationFuncPtr, transformDim);
corresps_icp, AtA_icp, AtB_icp, transformType);
}
else
{
@ -798,27 +775,28 @@ bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
{
break;
}
if(transfromType == OdometryTransformType::ROTATION)
Mat tmp61(6, 1, CV_64FC1, Scalar(0));
if(transformType == OdometryTransformType::ROTATION)
{
Mat tmp(6, 1, CV_64FC1, Scalar(0));
ksi.copyTo(tmp.rowRange(0,3));
ksi = tmp;
ksi.copyTo(tmp61.rowRange(0,3));
ksi = tmp61;
}
else if(transfromType == OdometryTransformType::TRANSLATION)
else if(transformType == OdometryTransformType::TRANSLATION)
{
Mat tmp(6, 1, CV_64FC1, Scalar(0));
ksi.copyTo(tmp.rowRange(3,6));
ksi = tmp;
ksi.copyTo(tmp61.rowRange(3,6));
ksi = tmp61;
}
computeProjectiveMatrix(ksi, currRt);
resultRt = currRt * resultRt;
//TODO: fixit, transform is used for Fast ICP only
Vec6f x(ksi);
Affine3f tinc(Vec3f(x.val), Vec3f(x.val + 3));
transform = tinc * transform;
isOk = true;
}
}
@ -826,7 +804,6 @@ bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
_Rt.create(resultRt.size(), resultRt.type());
Mat Rt = _Rt.getMat();
resultRt.copyTo(Rt);
if(isOk)
{
Mat deltaRt;
@ -841,105 +818,112 @@ bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
return isOk;
}
// Rotate dst by RtInv to get corresponding src pixels
// In RGB case compute sigma and diffs too
void computeCorresps(const Matx33f& _K, const Matx33f& _K_inv, const Mat& Rt,
const Mat& image0, const Mat& depth0, const Mat& validMask0,
const Mat& image1, const Mat& depth1, const Mat& selectMask1, float maxDepthDiff,
Mat& _corresps, Mat& _diffs, double& _sigma, OdometryType method)
void computeCorresps(const Matx33f& _K, const Mat& rt,
const Mat& imageSrc, const Mat& depthSrc, const Mat& validMaskSrc,
const Mat& imageDst, const Mat& depthDst, const Mat& selectMaskDst, float maxDepthDiff,
Mat& _corresps, Mat& _diffs, double& _sigma, OdometryType method)
{
CV_Assert(Rt.type() == CV_64FC1);
Mat mrtInv = rt.inv(DECOMP_SVD);
Matx44d rtInv = mrtInv;
Mat corresps(depth1.size(), CV_16SC2, Scalar::all(-1));
Mat diffs(depth1.size(), CV_32F, Scalar::all(-1));
Mat corresps(depthDst.size(), CV_16SC2, Scalar::all(-1));
Mat diffs;
if (method == OdometryType::RGB)
diffs = Mat(depthDst.size(), CV_32F, Scalar::all(-1));
Matx33d K(_K), K_inv(_K_inv);
Rect r(0, 0, depth1.cols, depth1.rows);
Mat Kt = Rt(Rect(3, 0, 1, 3)).clone();
Kt = K * Kt;
const double* Kt_ptr = Kt.ptr<const double>();
// src_2d = K * src_3d, src_3d = K_inv * [src_2d | z]
//
AutoBuffer<float> buf(3 * (depth1.cols + depth1.rows));
Matx33d K(_K);
Matx33d K_inv = K.inv(DECOMP_SVD);
Rect r(0, 0, depthDst.cols, depthDst.rows);
Matx31d tinv = rtInv.get_minor<3, 1>(0, 3);
Matx31d ktinvm = K * tinv;
//const double* Kt_ptr = Kt.ptr<const double>();
Point3d ktinv(ktinvm(0, 0), ktinvm(1, 0), ktinvm(2, 0));
AutoBuffer<float> buf(3 * (depthDst.cols + depthDst.rows));
float* KRK_inv0_u1 = buf.data();
float* KRK_inv1_v1_plus_KRK_inv2 = KRK_inv0_u1 + depth1.cols;
float* KRK_inv3_u1 = KRK_inv1_v1_plus_KRK_inv2 + depth1.rows;
float* KRK_inv4_v1_plus_KRK_inv5 = KRK_inv3_u1 + depth1.cols;
float* KRK_inv6_u1 = KRK_inv4_v1_plus_KRK_inv5 + depth1.rows;
float* KRK_inv7_v1_plus_KRK_inv8 = KRK_inv6_u1 + depth1.cols;
float* KRK_inv1_v1_plus_KRK_inv2 = buf.data() + depthDst.cols;
float* KRK_inv3_u1 = buf.data() + depthDst.cols + depthDst.rows;
float* KRK_inv4_v1_plus_KRK_inv5 = buf.data() + depthDst.cols * 2 + depthDst.rows;
float* KRK_inv6_u1 = buf.data() + depthDst.cols * 2 + depthDst.rows * 2;
float* KRK_inv7_v1_plus_KRK_inv8 = buf.data() + depthDst.cols * 3 + depthDst.rows * 2;
{
Mat R = Rt(Rect(0, 0, 3, 3)).clone();
Matx33d rinv = rtInv.get_minor<3, 3>(0, 0);
Mat KRK_inv = K * R * K_inv;
const double* KRK_inv_ptr = KRK_inv.ptr<const double>();
for (int u1 = 0; u1 < depth1.cols; u1++)
Matx33d kriki = K * rinv * K_inv;
for (int udst = 0; udst < depthDst.cols; udst++)
{
KRK_inv0_u1[u1] = (float)(KRK_inv_ptr[0] * u1);
KRK_inv3_u1[u1] = (float)(KRK_inv_ptr[3] * u1);
KRK_inv6_u1[u1] = (float)(KRK_inv_ptr[6] * u1);
KRK_inv0_u1[udst] = (float)(kriki(0, 0) * udst);
KRK_inv3_u1[udst] = (float)(kriki(1, 0) * udst);
KRK_inv6_u1[udst] = (float)(kriki(2, 0) * udst);
}
for (int v1 = 0; v1 < depth1.rows; v1++)
for (int vdst = 0; vdst < depthDst.rows; vdst++)
{
KRK_inv1_v1_plus_KRK_inv2[v1] = (float)(KRK_inv_ptr[1] * v1 + KRK_inv_ptr[2]);
KRK_inv4_v1_plus_KRK_inv5[v1] = (float)(KRK_inv_ptr[4] * v1 + KRK_inv_ptr[5]);
KRK_inv7_v1_plus_KRK_inv8[v1] = (float)(KRK_inv_ptr[7] * v1 + KRK_inv_ptr[8]);
KRK_inv1_v1_plus_KRK_inv2[vdst] = (float)(kriki(0, 1) * vdst + kriki(0, 2));
KRK_inv4_v1_plus_KRK_inv5[vdst] = (float)(kriki(1, 1) * vdst + kriki(1, 2));
KRK_inv7_v1_plus_KRK_inv8[vdst] = (float)(kriki(2, 1) * vdst + kriki(2, 2));
}
}
double sigma = 0;
int correspCount = 0;
for (int v1 = 0; v1 < depth1.rows; v1++)
for (int vdst = 0; vdst < depthDst.rows; vdst++)
{
const float* depth1_row = depth1.ptr<float>(v1);
const uchar* mask1_row = selectMask1.ptr<uchar>(v1);
for (int u1 = 0; u1 < depth1.cols; u1++)
const float* depthDst_row = depthDst.ptr<float>(vdst);
const uchar* maskDst_row = selectMaskDst.ptr<uchar>(vdst);
for (int udst = 0; udst < depthDst.cols; udst++)
{
float d1 = depth1_row[u1];
if (mask1_row[u1] && !cvIsNaN(d1))
{
//CV_DbgAssert(!cvIsNaN(d1));
float transformed_d1 = static_cast<float>(d1 * (KRK_inv6_u1[u1] + KRK_inv7_v1_plus_KRK_inv8[v1]) +
Kt_ptr[2]);
float ddst = depthDst_row[udst];
if (transformed_d1 > 0)
if (maskDst_row[udst] && !cvIsNaN(ddst))
{
float transformed_ddst = static_cast<float>(ddst * (KRK_inv6_u1[udst] + KRK_inv7_v1_plus_KRK_inv8[vdst]) + ktinv.z);
if (transformed_ddst > 0)
{
float transformed_d1_inv = 1.f / transformed_d1;
int u0 = cvRound(transformed_d1_inv * (d1 * (KRK_inv0_u1[u1] + KRK_inv1_v1_plus_KRK_inv2[v1]) +
Kt_ptr[0]));
int v0 = cvRound(transformed_d1_inv * (d1 * (KRK_inv3_u1[u1] + KRK_inv4_v1_plus_KRK_inv5[v1]) +
Kt_ptr[1]));
if (r.contains(Point(u0, v0)))
float transformed_ddst_inv = 1.f / transformed_ddst;
int usrc = cvRound(transformed_ddst_inv * (ddst * (KRK_inv0_u1[udst] + KRK_inv1_v1_plus_KRK_inv2[vdst]) + ktinv.x));
int vsrc = cvRound(transformed_ddst_inv * (ddst * (KRK_inv3_u1[udst] + KRK_inv4_v1_plus_KRK_inv5[vdst]) + ktinv.y));
if (r.contains(Point(usrc, vsrc)))
{
float d0 = depth0.at<float>(v0, u0);
if (validMask0.at<uchar>(v0, u0) && std::abs(transformed_d1 - d0) <= maxDepthDiff)
float dsrc = depthSrc.at<float>(vsrc, usrc);
if (validMaskSrc.at<uchar>(vsrc, usrc) && std::abs(transformed_ddst - dsrc) <= maxDepthDiff)
{
CV_DbgAssert(!cvIsNaN(d0));
Vec2s& c = corresps.at<Vec2s>(v0, u0);
float& d = diffs.at<float>(v0, u0);
CV_DbgAssert(!cvIsNaN(dsrc));
Vec2s& c = corresps.at<Vec2s>(vsrc, usrc);
float diff = 0;
if (c[0] != -1)
{
diff = 0;
int exist_u1 = c[0], exist_v1 = c[1];
float exist_d1 = (float)(depth1.at<float>(exist_v1, exist_u1) *
(KRK_inv6_u1[exist_u1] + KRK_inv7_v1_plus_KRK_inv8[exist_v1]) + Kt_ptr[2]);
float exist_d1 = (float)(depthDst.at<float>(exist_v1, exist_u1) *
(KRK_inv6_u1[exist_u1] + KRK_inv7_v1_plus_KRK_inv8[exist_v1]) + ktinv.z);
if (transformed_d1 > exist_d1)
if (transformed_ddst > exist_d1)
continue;
if (method == OdometryType::RGB)
diff = static_cast<float>(static_cast<int>(image0.at<uchar>(v0, u0)) -
static_cast<int>(image1.at<uchar>(v1, u1)));
diff = static_cast<float>(static_cast<int>(imageSrc.at<uchar>(vsrc, usrc)) -
static_cast<int>(imageDst.at<uchar>(vdst, udst)));
}
else
{
if (method == OdometryType::RGB)
diff = static_cast<float>(static_cast<int>(image0.at<uchar>(v0, u0)) -
static_cast<int>(image1.at<uchar>(v1, u1)));
diff = static_cast<float>(static_cast<int>(imageSrc.at<uchar>(vsrc, usrc)) -
static_cast<int>(imageDst.at<uchar>(vdst, udst)));
correspCount++;
}
c = Vec2s((short)u1, (short)v1);
d = diff;
sigma += diff * diff;
c = Vec2s((short)udst, (short)vdst);
if (method == OdometryType::RGB)
{
diffs.at<float>(vsrc, usrc) = diff;
sigma += diff * diff;
}
}
}
}
@ -950,20 +934,26 @@ void computeCorresps(const Matx33f& _K, const Matx33f& _K_inv, const Mat& Rt,
_sigma = std::sqrt(sigma / double(correspCount));
_corresps.create(correspCount, 1, CV_32SC4);
_diffs.create(correspCount, 1, CV_32F);
Vec4i* corresps_ptr = _corresps.ptr<Vec4i>();
float* diffs_ptr = _diffs.ptr<float>();
for (int v0 = 0, i = 0; v0 < corresps.rows; v0++)
float* diffs_ptr;
if (method == OdometryType::RGB)
{
const Vec2s* corresps_row = corresps.ptr<Vec2s>(v0);
const float* diffs_row = diffs.ptr<float>(v0);
for (int u0 = 0; u0 < corresps.cols; u0++)
_diffs.create(correspCount, 1, CV_32F);
diffs_ptr = _diffs.ptr<float>();
}
for (int vsrc = 0, i = 0; vsrc < corresps.rows; vsrc++)
{
const Vec2s* corresps_row = corresps.ptr<Vec2s>(vsrc);
const float* diffs_row;
if (method == OdometryType::RGB)
diffs_row = diffs.ptr<float>(vsrc);
for (int usrc = 0; usrc < corresps.cols; usrc++)
{
const Vec2s& c = corresps_row[u0];
const float& d = diffs_row[u0];
const Vec2s& c = corresps_row[usrc];
const float& d = diffs_row[usrc];
if (c[0] != -1)
{
corresps_ptr[i] = Vec4i(u0, v0, c[0], c[1]);
corresps_ptr[i] = Vec4i(usrc, vsrc, c[0], c[1]);
if (method == OdometryType::RGB)
diffs_ptr[i] = d;
i++;
@ -973,18 +963,18 @@ void computeCorresps(const Matx33f& _K, const Matx33f& _K_inv, const Mat& Rt,
}
void calcRgbdLsmMatrices(const Mat& cloud0, const Mat& Rt,
const Mat& dI_dx1, const Mat& dI_dy1,
const Mat& corresps, const Mat& _diffs, const double _sigma,
double fx, double fy, double sobelScaleIn,
Mat& AtA, Mat& AtB, CalcRgbdEquationCoeffsPtr func, int transformDim)
const Mat& dI_dx1, const Mat& dI_dy1,
const Mat& corresps, const Mat& _diffs, const double _sigma,
double fx, double fy, double sobelScaleIn,
Mat& AtA, Mat& AtB, OdometryTransformType transformType)
{
int transformDim = getTransformDim(transformType);
AtA = Mat(transformDim, transformDim, CV_64FC1, Scalar(0));
AtB = Mat(transformDim, 1, CV_64FC1, Scalar(0));
double* AtB_ptr = AtB.ptr<double>();
CV_Assert(Rt.type() == CV_64FC1);
const double* Rt_ptr = Rt.ptr<const double>();
Affine3d rtmat(Rt);
const float* diffs_ptr = _diffs.ptr<float>();
const Vec4i* corresps_ptr = corresps.ptr<Vec4i>();
@ -1005,15 +995,13 @@ void calcRgbdLsmMatrices(const Mat& cloud0, const Mat& Rt,
double w_sobelScale = w * sobelScaleIn;
const Vec4f& p0 = cloud0.at<Vec4f>(v0, u0);
Point3f tp0;
tp0.x = (float)(p0[0] * Rt_ptr[0] + p0[1] * Rt_ptr[1] + p0[2] * Rt_ptr[2] + Rt_ptr[3]);
tp0.y = (float)(p0[0] * Rt_ptr[4] + p0[1] * Rt_ptr[5] + p0[2] * Rt_ptr[6] + Rt_ptr[7]);
tp0.z = (float)(p0[0] * Rt_ptr[8] + p0[1] * Rt_ptr[9] + p0[2] * Rt_ptr[10] + Rt_ptr[11]);
Point3f tp0 = rtmat * Point3f(p0[0], p0[1], p0[2]);
func(A_ptr,
w_sobelScale * dI_dx1.at<short int>(v1, u1),
w_sobelScale * dI_dy1.at<short int>(v1, u1),
tp0, fx, fy);
rgbdCoeffsFunc(transformType,
A_ptr,
w_sobelScale * dI_dx1.at<short int>(v1, u1),
w_sobelScale * dI_dy1.at<short int>(v1, u1),
tp0, fx, fy);
for (int y = 0; y < transformDim; y++)
{
@ -1031,11 +1019,13 @@ void calcRgbdLsmMatrices(const Mat& cloud0, const Mat& Rt,
AtA.at<double>(x, y) = AtA.at<double>(y, x);
}
void calcICPLsmMatrices(const Mat& cloud0, const Mat& Rt,
const Mat& cloud1, const Mat& normals1,
const Mat& corresps,
Mat& AtA, Mat& AtB, CalcICPEquationCoeffsPtr func, int transformDim)
const Mat& cloud1, const Mat& normals1,
const Mat& corresps,
Mat& AtA, Mat& AtB, OdometryTransformType transformType)
{
int transformDim = getTransformDim(transformType);
AtA = Mat(transformDim, transformDim, CV_64FC1, Scalar(0));
AtB = Mat(transformDim, 1, CV_64FC1, Scalar(0));
double* AtB_ptr = AtB.ptr<double>();
@ -1084,18 +1074,21 @@ void calcICPLsmMatrices(const Mat& cloud0, const Mat& Rt,
const Vec4i& c = corresps_ptr[correspIndex];
int u1 = c[2], v1 = c[3];
double w = sigma + std::abs(diffs_ptr[correspIndex]);
double w = sigma +std::abs(diffs_ptr[correspIndex]);
w = w > DBL_EPSILON ? 1. / w : 1.;
Vec4f n4 = normals1.at<Vec4f>(v1, u1);
func(A_ptr, tps0_ptr[correspIndex], Vec3f(n4[0], n4[1], n4[2]) * w);
Vec4f p1 = cloud1.at<Vec4f>(v1, u1);
icpCoeffsFunc(transformType,
A_ptr, tps0_ptr[correspIndex], Point3f(p1[0], p1[1], p1[2]), Vec3f(n4[0], n4[1], n4[2]) * w);
for (int y = 0; y < transformDim; y++)
{
double* AtA_ptr = AtA.ptr<double>(y);
for (int x = y; x < transformDim; x++)
{
AtA_ptr[x] += A_ptr[y] * A_ptr[x];
}
AtB_ptr[y] += A_ptr[y] * w * diffs_ptr[correspIndex];
}
}
@ -1112,7 +1105,7 @@ void computeProjectiveMatrix(const Mat& ksi, Mat& Rt)
const double* ksi_ptr = ksi.ptr<const double>();
// 0.5 multiplication is here because (dual) quaternions keep half an angle/twist inside
Matx44d matdq = (DualQuatd(0, ksi_ptr[0], ksi_ptr[1], ksi_ptr[2],
0, ksi_ptr[3], ksi_ptr[4], ksi_ptr[5]) * 0.5).exp().toMat(QUAT_ASSUME_UNIT);
0, ksi_ptr[3], ksi_ptr[4], ksi_ptr[5]) * 0.5).exp().toMat(QUAT_ASSUME_UNIT);
Mat(matdq).copyTo(Rt);
}

View File

@ -12,9 +12,24 @@ namespace cv
{
enum class OdometryTransformType
{
// rotation, translation, rotation+translation
ROTATION = 1, TRANSLATION = 2, RIGID_TRANSFORMATION = 4
};
static inline int getTransformDim(OdometryTransformType transformType)
{
switch(transformType)
{
case OdometryTransformType::RIGID_TRANSFORMATION:
return 6;
case OdometryTransformType::ROTATION:
case OdometryTransformType::TRANSLATION:
return 3;
default:
CV_Error(Error::StsBadArg, "Incorrect transformation type");
}
}
static inline
void checkImage(InputArray image)
{
@ -23,6 +38,7 @@ void checkImage(InputArray image)
if (image.type() != CV_8UC1)
CV_Error(Error::StsBadSize, "Image type has to be CV_8UC1.");
}
static inline
void checkDepth(InputArray depth, const Size& imageSize)
{
@ -57,77 +73,127 @@ void checkNormals(InputArray normals, const Size& depthSize)
static inline
void calcRgbdEquationCoeffs(double* C, double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
Vec6d calcRgbdEquationCoeffs(double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
{
double invz = 1. / p3d.z,
v0 = dIdx * fx * invz,
v1 = dIdy * fy * invz,
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
v0 = dIdx * fx * invz,
v1 = dIdy * fy * invz,
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
Point3d v(v0, v1, v2);
Point3d pxv = p3d.cross(v);
C[0] = -p3d.z * v1 + p3d.y * v2;
C[1] = p3d.z * v0 - p3d.x * v2;
C[2] = -p3d.y * v0 + p3d.x * v1;
C[3] = v0;
C[4] = v1;
C[5] = v2;
return Vec6d(pxv.x, pxv.y, pxv.z, v0, v1, v2);
}
static inline
void calcRgbdEquationCoeffsRotation(double* C, double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
Vec3d calcRgbdEquationCoeffsRotation(double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
{
double invz = 1. / p3d.z,
v0 = dIdx * fx * invz,
v1 = dIdy * fy * invz,
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
C[0] = -p3d.z * v1 + p3d.y * v2;
C[1] = p3d.z * v0 - p3d.x * v2;
C[2] = -p3d.y * v0 + p3d.x * v1;
v0 = dIdx * fx * invz,
v1 = dIdy * fy * invz,
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
Point3d v(v0, v1, v2);
Point3d pxv = p3d.cross(v);
return Vec3d(pxv);
}
static inline
void calcRgbdEquationCoeffsTranslation(double* C, double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
Vec3d calcRgbdEquationCoeffsTranslation(double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
{
double invz = 1. / p3d.z,
v0 = dIdx * fx * invz,
v1 = dIdy * fy * invz,
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
C[0] = v0;
C[1] = v1;
C[2] = v2;
v0 = dIdx * fx * invz,
v1 = dIdy * fy * invz,
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
return Vec3d(v0, v1, v2);
}
static inline void rgbdCoeffsFunc(OdometryTransformType transformType,
double* C, double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
{
int dim = getTransformDim(transformType);
Vec6d ret;
switch(transformType)
{
case OdometryTransformType::RIGID_TRANSFORMATION:
{
ret = calcRgbdEquationCoeffs(dIdx, dIdy, p3d, fx, fy);
break;
}
case OdometryTransformType::ROTATION:
{
Vec3d r = calcRgbdEquationCoeffsRotation(dIdx, dIdy, p3d, fx, fy);
ret = Vec6d(r[0], r[1], r[2], 0, 0, 0);
break;
}
case OdometryTransformType::TRANSLATION:
{
Vec3d r = calcRgbdEquationCoeffsTranslation(dIdx, dIdy, p3d, fx, fy);
ret = Vec6d(r[0], r[1], r[2], 0, 0, 0);
break;
}
default:
CV_Error(Error::StsBadArg, "Incorrect transformation type");
}
for (int i = 0; i < dim; i++)
C[i] = ret[i];
}
typedef
void (*CalcRgbdEquationCoeffsPtr)(double*, double, double, const Point3f&, double, double);
static inline
void calcICPEquationCoeffs(double* C, const Point3f& p0, const Vec3f& n1)
Vec6d calcICPEquationCoeffs(const Point3f& psrc, const Vec3f& ndst)
{
C[0] = -p0.z * n1[1] + p0.y * n1[2];
C[1] = p0.z * n1[0] - p0.x * n1[2];
C[2] = -p0.y * n1[0] + p0.x * n1[1];
C[3] = n1[0];
C[4] = n1[1];
C[5] = n1[2];
Point3d pxv = psrc.cross(Point3d(ndst));
return Vec6d(pxv.x, pxv.y, pxv.z, ndst[0], ndst[1], ndst[2]);
}
static inline
void calcICPEquationCoeffsRotation(double* C, const Point3f& p0, const Vec3f& n1)
Vec3d calcICPEquationCoeffsRotation(const Point3f& psrc, const Vec3f& ndst)
{
C[0] = -p0.z * n1[1] + p0.y * n1[2];
C[1] = p0.z * n1[0] - p0.x * n1[2];
C[2] = -p0.y * n1[0] + p0.x * n1[1];
Point3d pxv = psrc.cross(Point3d(ndst));
return Vec3d(pxv);
}
static inline
void calcICPEquationCoeffsTranslation(double* C, const Point3f& /*p0*/, const Vec3f& n1)
Vec3d calcICPEquationCoeffsTranslation( const Point3f& /*p0*/, const Vec3f& ndst)
{
C[0] = n1[0];
C[1] = n1[1];
C[2] = n1[2];
return Vec3d(ndst);
}
typedef
void (*CalcICPEquationCoeffsPtr)(double*, const Point3f&, const Vec3f&);
static inline
void icpCoeffsFunc(OdometryTransformType transformType, double* C, const Point3f& p0, const Point3f& /*p1*/, const Vec3f& n1)
{
int dim = getTransformDim(transformType);
Vec6d ret;
switch(transformType)
{
case OdometryTransformType::RIGID_TRANSFORMATION:
{
ret = calcICPEquationCoeffs(p0, n1);
break;
}
case OdometryTransformType::ROTATION:
{
Vec3d r = calcICPEquationCoeffsRotation(p0, n1);
ret = Vec6d(r[0], r[1], r[2], 0, 0, 0);
break;
}
case OdometryTransformType::TRANSLATION:
{
Vec3d r = calcICPEquationCoeffsTranslation(p0, n1);
ret = Vec6d(r[0], r[1], r[2], 0, 0, 0);
break;
}
default:
CV_Error(Error::StsBadArg, "Incorrect transformation type");
}
for (int i = 0; i < dim; i++)
C[i] = ret[i];
}
void prepareRGBDFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings, OdometryAlgoType algtype);
void prepareRGBFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings, bool useDepth);
@ -159,47 +225,47 @@ template<typename TMat>
void preparePyramidSobel(InputArrayOfArrays pyramidImage, int dx, int dy, InputOutputArrayOfArrays pyramidSobel, int sobelSize);
void preparePyramidTexturedMask(InputArrayOfArrays pyramid_dI_dx, InputArrayOfArrays pyramid_dI_dy,
InputArray minGradMagnitudes, InputArrayOfArrays pyramidMask, double maxPointsPart,
InputOutputArrayOfArrays pyramidTexturedMask, double sobelScale);
InputArray minGradMagnitudes, InputArrayOfArrays pyramidMask, double maxPointsPart,
InputOutputArrayOfArrays pyramidTexturedMask, double sobelScale);
void randomSubsetOfMask(InputOutputArray _mask, float part);
void preparePyramidNormals(InputArray normals, InputArrayOfArrays pyramidDepth, InputOutputArrayOfArrays pyramidNormals);
void preparePyramidNormalsMask(InputArray pyramidNormals, InputArray pyramidMask, double maxPointsPart,
InputOutputArrayOfArrays /*std::vector<Mat>&*/ pyramidNormalsMask);
InputOutputArrayOfArrays /*std::vector<Mat>&*/ pyramidNormalsMask);
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
const OdometryFrame srcFrame,
const OdometryFrame dstFrame,
const Matx33f& cameraMatrix,
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
double maxTranslation, double maxRotation, double sobelScale,
OdometryType method, OdometryTransformType transfromType, OdometryAlgoType algtype);
const OdometryFrame srcFrame,
const OdometryFrame dstFrame,
const Matx33f& cameraMatrix,
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
double maxTranslation, double maxRotation, double sobelScale,
OdometryType method, OdometryTransformType transfromType, OdometryAlgoType algtype);
void computeCorresps(const Matx33f& _K, const Matx33f& _K_inv, const Mat& Rt,
const Mat& image0, const Mat& depth0, const Mat& validMask0,
const Mat& image1, const Mat& depth1, const Mat& selectMask1, float maxDepthDiff,
Mat& _corresps, Mat& _diffs, double& _sigma, OdometryType method);
void computeCorresps(const Matx33f& _K, const Mat& Rt,
const Mat& image0, const Mat& depth0, const Mat& validMask0,
const Mat& image1, const Mat& depth1, const Mat& selectMask1, float maxDepthDiff,
Mat& _corresps, Mat& _diffs, double& _sigma, OdometryType method);
void calcRgbdLsmMatrices(const Mat& cloud0, const Mat& Rt,
const Mat& dI_dx1, const Mat& dI_dy1,
const Mat& corresps, const Mat& diffs, const double sigma,
double fx, double fy, double sobelScaleIn,
Mat& AtA, Mat& AtB, CalcRgbdEquationCoeffsPtr func, int transformDim);
const Mat& dI_dx1, const Mat& dI_dy1,
const Mat& corresps, const Mat& diffs, const double sigma,
double fx, double fy, double sobelScaleIn,
Mat& AtA, Mat& AtB, OdometryTransformType transformType);
void calcICPLsmMatrices(const Mat& cloud0, const Mat& Rt,
const Mat& cloud1, const Mat& normals1,
const Mat& corresps,
Mat& AtA, Mat& AtB, CalcICPEquationCoeffsPtr func, int transformDim);
const Mat& cloud1, const Mat& normals1,
const Mat& corresps,
Mat& AtA, Mat& AtB, OdometryTransformType transformType);
void calcICPLsmMatricesFast(Matx33f cameraMatrix, const Mat& oldPts, const Mat& oldNrm, const Mat& newPts, const Mat& newNrm,
cv::Affine3f pose, int level, float maxDepthDiff, float angleThreshold, cv::Matx66f& A, cv::Vec6f& b);
cv::Affine3f pose, int level, float maxDepthDiff, float angleThreshold, cv::Matx66f& A, cv::Vec6f& b);
#ifdef HAVE_OPENCL
bool ocl_calcICPLsmMatricesFast(Matx33f cameraMatrix, const UMat& oldPts, const UMat& oldNrm, const UMat& newPts, const UMat& newNrm,
cv::Affine3f pose, int level, float maxDepthDiff, float angleThreshold, cv::Matx66f& A, cv::Vec6f& b);
cv::Affine3f pose, int level, float maxDepthDiff, float angleThreshold, cv::Matx66f& A, cv::Vec6f& b);
#endif
void computeProjectiveMatrix(const Mat& ksi, Mat& Rt);

View File

@ -6,73 +6,6 @@
namespace opencv_test { namespace {
static
void warpFrame(const Mat& image, const Mat& depth, const Mat& rvec, const Mat& tvec, const Mat& K,
Mat& warpedImage, Mat& warpedDepth)
{
CV_Assert(!image.empty());
CV_Assert(image.type() == CV_8UC1);
CV_Assert(depth.size() == image.size());
CV_Assert(depth.type() == CV_32FC1);
CV_Assert(!rvec.empty());
CV_Assert(rvec.total() == 3);
CV_Assert(rvec.type() == CV_64FC1);
CV_Assert(!tvec.empty());
CV_Assert(tvec.size() == Size(1, 3));
CV_Assert(tvec.type() == CV_64FC1);
warpedImage.create(image.size(), CV_8UC1);
warpedImage = Scalar(0);
warpedDepth.create(image.size(), CV_32FC1);
warpedDepth = Scalar(FLT_MAX);
Mat cloud;
depthTo3d(depth, K, cloud);
Mat cloud3, channels[4];
cv::split(cloud, channels);
std::vector<Mat> merged = { channels[0], channels[1], channels[2] };
cv::merge(merged, cloud3);
Mat Rt = Mat::eye(4, 4, CV_64FC1);
{
Mat R, dst;
cv::Rodrigues(rvec, R);
dst = Rt(Rect(0,0,3,3));
R.copyTo(dst);
dst = Rt(Rect(3,0,1,3));
tvec.copyTo(dst);
}
Mat warpedCloud, warpedImagePoints;
perspectiveTransform(cloud3, warpedCloud, Rt);
projectPoints(warpedCloud.reshape(3, 1), Mat(3,1,CV_32FC1, Scalar(0)), Mat(3,1,CV_32FC1, Scalar(0)), K, Mat(1,5,CV_32FC1, Scalar(0)), warpedImagePoints);
warpedImagePoints = warpedImagePoints.reshape(2, cloud.rows);
Rect r(0, 0, image.cols, image.rows);
for(int y = 0; y < cloud.rows; y++)
{
for(int x = 0; x < cloud.cols; x++)
{
Point p = warpedImagePoints.at<Point2f>(y,x);
if(r.contains(p))
{
float curDepth = warpedDepth.at<float>(p.y, p.x);
float newDepth = warpedCloud.at<Point3f>(y, x).z;
if(newDepth < curDepth && newDepth > 0)
{
warpedImage.at<uchar>(p.y, p.x) = image.at<uchar>(y,x);
warpedDepth.at<float>(p.y, p.x) = newDepth;
}
}
}
}
warpedDepth.setTo(std::numeric_limits<float>::quiet_NaN(), warpedDepth > 100);
}
static
void dilateFrame(Mat& image, Mat& depth)
{
@ -167,14 +100,8 @@ void OdometryTest::readData(Mat& image, Mat& depth) const
image = imread(imageFilename, 0);
depth = imread(depthFilename, -1);
if(image.empty())
{
FAIL() << "Image " << imageFilename.c_str() << " can not be read" << std::endl;
}
if(depth.empty())
{
FAIL() << "Depth" << depthFilename.c_str() << "can not be read" << std::endl;
}
ASSERT_FALSE(image.empty()) << "Image " << imageFilename.c_str() << " can not be read" << std::endl;
ASSERT_FALSE(depth.empty()) << "Depth " << depthFilename.c_str() << "can not be read" << std::endl;
CV_DbgAssert(image.type() == CV_8UC1);
CV_DbgAssert(depth.type() == CV_16UC1);
@ -228,11 +155,7 @@ void OdometryTest::checkUMats()
bool isComputed = odometry.compute(odf, odf, calcRt);
ASSERT_TRUE(isComputed);
double diff = cv::norm(calcRt, Mat::eye(4, 4, CV_64FC1));
if (diff > idError)
{
FAIL() << "Incorrect transformation between the same frame (not the identity matrix), diff = " << diff << std::endl;
}
ASSERT_FALSE(diff > idError) << "Incorrect transformation between the same frame (not the identity matrix), diff = " << diff << std::endl;
}
void OdometryTest::run()
@ -255,15 +178,9 @@ void OdometryTest::run()
odometry.prepareFrame(odf);
bool isComputed = odometry.compute(odf, odf, calcRt);
if(!isComputed)
{
FAIL() << "Can not find Rt between the same frame" << std::endl;
}
ASSERT_TRUE(isComputed) << "Can not find Rt between the same frame" << std::endl;
double ndiff = cv::norm(calcRt, Mat::eye(4,4,CV_64FC1));
if(ndiff > idError)
{
FAIL() << "Incorrect transformation between the same frame (not the identity matrix), diff = " << ndiff << std::endl;
}
ASSERT_FALSE(ndiff > idError) << "Incorrect transformation between the same frame (not the identity matrix), diff = " << ndiff << std::endl;
// 2. Generate random rigid body motion in some ranges several times (iterCount).
// On each iteration an input frame is warped using generated transformation.
@ -278,13 +195,16 @@ void OdometryTest::run()
{
Mat rvec, tvec;
generateRandomTransformation(rvec, tvec);
Affine3d rt(rvec, tvec);
Mat warpedImage, warpedDepth;
warpFrame(image, depth, rvec, tvec, K, warpedImage, warpedDepth);
warpFrame(depth, image, noArray(), rt.matrix, K, warpedDepth, warpedImage);
dilateFrame(warpedImage, warpedDepth); // due to inaccuracy after warping
OdometryFrame odfSrc = odometry.createOdometryFrame();
OdometryFrame odfDst = odometry.createOdometryFrame();
odfSrc.setImage(image);
odfSrc.setDepth(depth);
odfDst.setImage(warpedImage);
@ -294,8 +214,11 @@ void OdometryTest::run()
isComputed = odometry.compute(odfSrc, odfDst, calcRt);
if (!isComputed)
{
CV_LOG_INFO(NULL, "Iter " << iter << "; Odometry compute returned false");
continue;
Mat calcR = calcRt(Rect(0,0,3,3)), calcRvec;
}
Mat calcR = calcRt(Rect(0, 0, 3, 3)), calcRvec;
cv::Rodrigues(calcR, calcRvec);
calcRvec = calcRvec.reshape(rvec.channels(), rvec.rows);
Mat calcTvec = calcRt(Rect(3,0,1,3));
@ -305,7 +228,8 @@ void OdometryTest::run()
imshow("image", image);
imshow("warpedImage", warpedImage);
Mat resultImage, resultDepth;
warpFrame(image, depth, calcRvec, calcTvec, K, resultImage, resultDepth);
warpFrame(depth, image, noArray(), calcRt, K, resultDepth, resultImage);
imshow("resultImage", resultImage);
waitKey(100);
}
@ -321,9 +245,7 @@ void OdometryTest::run()
double tdiffnorm = cv::norm(diff.translation());
if (rdiffnorm < possibleError && tdiffnorm < possibleError)
{
better_1time_count++;
}
if (5. * rdiffnorm < possibleError && 5 * tdiffnorm < possibleError)
better_5times_count++;
@ -462,4 +384,247 @@ TEST(RGBD_Odometry_FastICP, prepareFrame)
test.prepareFrameCheck();
}
struct WarpFrameTest
{
WarpFrameTest() :
srcDepth(), srcRgb(), srcMask(),
dstDepth(), dstRgb(), dstMask(),
warpedDepth(), warpedRgb(), warpedMask()
{}
void run(bool needRgb, bool scaleDown, bool checkMask, bool identityTransform, int depthType, int imageType);
Mat srcDepth, srcRgb, srcMask;
Mat dstDepth, dstRgb, dstMask;
Mat warpedDepth, warpedRgb, warpedMask;
};
void WarpFrameTest::run(bool needRgb, bool scaleDown, bool checkMask, bool identityTransform, int depthType, int rgbType)
{
std::string dataPath = cvtest::TS::ptr()->get_data_path();
std::string srcDepthFilename = dataPath + "/cv/rgbd/depth.png";
std::string srcRgbFilename = dataPath + "/cv/rgbd/rgb.png";
// The depth was generated using the script at testdata/cv/rgbd/warped_depth_generator/warp_test.py
std::string warpedDepthFilename = dataPath + "/cv/rgbd/warpedDepth.png";
std::string warpedRgbFilename = dataPath + "/cv/rgbd/warpedRgb.png";
srcDepth = imread(srcDepthFilename, IMREAD_UNCHANGED);
ASSERT_FALSE(srcDepth.empty()) << "Depth " << srcDepthFilename.c_str() << "can not be read" << std::endl;
if (identityTransform)
{
warpedDepth = srcDepth;
}
else
{
warpedDepth = imread(warpedDepthFilename, IMREAD_UNCHANGED);
ASSERT_FALSE(warpedDepth.empty()) << "Depth " << warpedDepthFilename.c_str() << "can not be read" << std::endl;
}
ASSERT_TRUE(srcDepth.type() == CV_16UC1);
ASSERT_TRUE(warpedDepth.type() == CV_16UC1);
Mat epsSrc = srcDepth > 0, epsWarped = warpedDepth > 0;
const double depthFactor = 5000.0;
// scale float types only
double depthScaleCoeff = scaleDown ? ( depthType == CV_16U ? 1. : 1./depthFactor ) : 1.;
double transScaleCoeff = scaleDown ? ( depthType == CV_16U ? depthFactor : 1. ) : depthFactor;
Mat srcDepthCvt, warpedDepthCvt;
srcDepth.convertTo(srcDepthCvt, depthType, depthScaleCoeff);
srcDepth = srcDepthCvt;
warpedDepth.convertTo(warpedDepthCvt, depthType, depthScaleCoeff);
warpedDepth = warpedDepthCvt;
Scalar badVal;
switch (depthType)
{
case CV_16U:
badVal = 0;
break;
case CV_32F:
badVal = std::numeric_limits<float>::quiet_NaN();
break;
case CV_64F:
badVal = std::numeric_limits<double>::quiet_NaN();
break;
default:
CV_Error(Error::StsBadArg, "Unsupported depth data type");
}
srcDepth.setTo(badVal, ~epsSrc);
warpedDepth.setTo(badVal, ~epsWarped);
if (checkMask)
{
srcMask = epsSrc; warpedMask = epsWarped;
}
else
{
srcMask = Mat(); warpedMask = Mat();
}
if (needRgb)
{
srcRgb = imread(srcRgbFilename, rgbType == CV_8UC1 ? IMREAD_GRAYSCALE : IMREAD_COLOR);
ASSERT_FALSE(srcRgb.empty()) << "Image " << srcRgbFilename.c_str() << "can not be read" << std::endl;
if (identityTransform)
{
srcRgb.copyTo(warpedRgb, epsSrc);
}
else
{
warpedRgb = imread(warpedRgbFilename, rgbType == CV_8UC1 ? IMREAD_GRAYSCALE : IMREAD_COLOR);
ASSERT_FALSE (warpedRgb.empty()) << "Image " << warpedRgbFilename.c_str() << "can not be read" << std::endl;
}
if (rgbType == CV_8UC4)
{
Mat newSrcRgb, newWarpedRgb;
cvtColor(srcRgb, newSrcRgb, COLOR_RGB2RGBA);
srcRgb = newSrcRgb;
// let's keep alpha channel
std::vector<Mat> warpedRgbChannels;
split(warpedRgb, warpedRgbChannels);
warpedRgbChannels.push_back(epsWarped);
merge(warpedRgbChannels, newWarpedRgb);
warpedRgb = newWarpedRgb;
}
ASSERT_TRUE(srcRgb.type() == rgbType);
ASSERT_TRUE(warpedRgb.type() == rgbType);
}
else
{
srcRgb = Mat(); warpedRgb = Mat();
}
// test data used to generate warped depth and rgb
// the script used to generate is in opencv_extra repo
// at testdata/cv/rgbd/warped_depth_generator/warp_test.py
double fx = 525.0, fy = 525.0,
cx = 319.5, cy = 239.5;
Matx33d K(fx, 0, cx,
0, fy, cy,
0, 0, 1);
cv::Affine3d rt;
cv::Vec3d tr(-0.04, 0.05, 0.6);
rt = identityTransform ? cv::Affine3d() : cv::Affine3d(cv::Vec3d(0.1, 0.2, 0.3), tr * transScaleCoeff);
warpFrame(srcDepth, srcRgb, srcMask, rt.matrix, K, dstDepth, dstRgb, dstMask);
}
typedef std::pair<int, int> WarpFrameInputTypes;
typedef testing::TestWithParam<WarpFrameInputTypes> WarpFrameInputs;
TEST_P(WarpFrameInputs, checkTypes)
{
const double shortl2diff = 233.983;
const double shortlidiff = 1;
const double floatl2diff = 0.038209;
const double floatlidiff = 0.00020004;
int depthType = GetParam().first;
int rgbType = GetParam().second;
WarpFrameTest w;
// scale down does not happen on CV_16U
// to avoid integer overflow
w.run(/* needRgb */ true, /* scaleDown*/ true,
/* checkMask */ true, /* identityTransform */ false, depthType, rgbType);
double rgbDiff = cv::norm(w.dstRgb, w.warpedRgb, NORM_L2);
double maskDiff = cv::norm(w.dstMask, w.warpedMask, NORM_L2);
EXPECT_EQ(0, maskDiff);
EXPECT_EQ(0, rgbDiff);
double l2diff = cv::norm(w.dstDepth, w.warpedDepth, NORM_L2, w.warpedMask);
double lidiff = cv::norm(w.dstDepth, w.warpedDepth, NORM_INF, w.warpedMask);
double l2threshold = depthType == CV_16U ? shortl2diff : floatl2diff;
double lithreshold = depthType == CV_16U ? shortlidiff : floatlidiff;
EXPECT_LE(l2diff, l2threshold);
EXPECT_LE(lidiff, lithreshold);
}
INSTANTIATE_TEST_CASE_P(RGBD_Odometry, WarpFrameInputs, ::testing::Values(
WarpFrameInputTypes { CV_16U, CV_8UC3 },
WarpFrameInputTypes { CV_32F, CV_8UC3 },
WarpFrameInputTypes { CV_64F, CV_8UC3 },
WarpFrameInputTypes { CV_32F, CV_8UC1 },
WarpFrameInputTypes { CV_32F, CV_8UC4 }));
TEST(RGBD_Odometry_WarpFrame, identity)
{
WarpFrameTest w;
w.run(/* needRgb */ true, /* scaleDown*/ true, /* checkMask */ true, /* identityTransform */ true, CV_32F, CV_8UC3);
double rgbDiff = cv::norm(w.dstRgb, w.warpedRgb, NORM_L2);
double maskDiff = cv::norm(w.dstMask, w.warpedMask, NORM_L2);
ASSERT_EQ(0, rgbDiff);
ASSERT_EQ(0, maskDiff);
double depthDiff = cv::norm(w.dstDepth, w.warpedDepth, NORM_L2, w.dstMask);
ASSERT_LE(depthDiff, DBL_EPSILON);
}
TEST(RGBD_Odometry_WarpFrame, noRgb)
{
WarpFrameTest w;
w.run(/* needRgb */ false, /* scaleDown*/ true, /* checkMask */ true, /* identityTransform */ false, CV_32F, CV_8UC3);
double maskDiff = cv::norm(w.dstMask, w.warpedMask, NORM_L2);
ASSERT_EQ(0, maskDiff);
double l2diff = cv::norm(w.dstDepth, w.warpedDepth, NORM_L2, w.warpedMask);
double lidiff = cv::norm(w.dstDepth, w.warpedDepth, NORM_INF, w.warpedMask);
ASSERT_LE(l2diff, 0.038209);
ASSERT_LE(lidiff, 0.00020004);
}
TEST(RGBD_Odometry_WarpFrame, nansAreMasked)
{
WarpFrameTest w;
w.run(/* needRgb */ true, /* scaleDown*/ true, /* checkMask */ false, /* identityTransform */ false, CV_32F, CV_8UC3);
double rgbDiff = cv::norm(w.dstRgb, w.warpedRgb, NORM_L2);
ASSERT_EQ(0, rgbDiff);
Mat goodVals = (w.warpedDepth == w.warpedDepth);
double l2diff = cv::norm(w.dstDepth, w.warpedDepth, NORM_L2, goodVals);
double lidiff = cv::norm(w.dstDepth, w.warpedDepth, NORM_INF, goodVals);
ASSERT_LE(l2diff, 0.038209);
ASSERT_LE(lidiff, 0.00020004);
}
TEST(RGBD_Odometry_WarpFrame, bigScale)
{
WarpFrameTest w;
w.run(/* needRgb */ true, /* scaleDown*/ false, /* checkMask */ true, /* identityTransform */ false, CV_32F, CV_8UC3);
double rgbDiff = cv::norm(w.dstRgb, w.warpedRgb, NORM_L2);
double maskDiff = cv::norm(w.dstMask, w.warpedMask, NORM_L2);
ASSERT_EQ(0, maskDiff);
ASSERT_EQ(0, rgbDiff);
double l2diff = cv::norm(w.dstDepth, w.warpedDepth, NORM_L2, w.warpedMask);
double lidiff = cv::norm(w.dstDepth, w.warpedDepth, NORM_INF, w.warpedMask);
ASSERT_LE(l2diff, 191.026565);
ASSERT_LE(lidiff, 0.99951172);
}
}} // namespace

View File

@ -13,6 +13,7 @@ def main():
help="""DEPTH - works with depth,
RGB - works with images,
RGB_DEPTH - works with all,
SCALE - works with depth and calculate Rt with scale,
default - runs all algos""",
default="")
parser.add_argument(
@ -50,18 +51,17 @@ def main():
odometry = cv.Odometry(cv.DEPTH)
Rt = np.zeros((4, 4))
odometry.compute(depth1, depth2, Rt)
print(Rt)
print("Rt:\n {}".format(Rt))
if args.algo == "RGB" or args.algo == "":
odometry = cv.Odometry(cv.RGB)
Rt = np.zeros((4, 4))
odometry.compute(rgb1, rgb2, Rt)
print(Rt)
print("Rt:\n {}".format(Rt))
if args.algo == "RGB_DEPTH" or args.algo == "":
odometry = cv.Odometry(cv.RGB_DEPTH)
Rt = np.zeros((4, 4))
odometry.compute(depth1, rgb1, depth2, rgb2, Rt)
print(Rt)
print("Rt:\n {}".format(Rt))
if __name__ == '__main__':