mirror of
https://github.com/opencv/opencv.git
synced 2024-12-12 07:09:12 +08:00
Merge pull request #22598 from savuor:icp_oframe_readonly
Complement PR: #3366@contrib Changes OdometryFrame losts its getters: a user can provide data at construction stage only, pyramids and other generated data is read-only now OdometryFrame is based on UMats: no TMat templates inside, CPU operations are done with UMat::getMat() method, chaining issues are solved ad-hoc No more Odometry::createOdometryFrame() method, frames are compatible with all odometry algorithms Normals computer is cached inside Odometry and exposed to API as well as its settings Volume::raycast() won't return the result in OdometryFrame anymore Added test for Odometry::prepareFrame*() & other test fixes Minor code improvements TODOs: fix TODOs in code lower acceptable accuracy errors
This commit is contained in:
parent
8358efaca1
commit
8b7e586faa
@ -104,13 +104,14 @@ CV_EXPORTS_W void registerDepth(InputArray unregisteredCameraMatrix, InputArray
|
||||
*/
|
||||
CV_EXPORTS_W void depthTo3dSparse(InputArray depth, InputArray in_K, InputArray in_points, OutputArray points3d);
|
||||
|
||||
/** Converts a depth image to an organized set of 3d points.
|
||||
/** Converts a depth image to 3d points. If the mask is empty then the resulting array has the same dimensions as `depth`,
|
||||
* otherwise it is 1d vector containing mask-enabled values only.
|
||||
* The coordinate system is x pointing left, y down and z away from the camera
|
||||
* @param depth the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
|
||||
* (as done with the Microsoft Kinect), otherwise, if given as CV_32F or CV_64F, it is assumed in meters)
|
||||
* @param K The calibration matrix
|
||||
* @param points3d the resulting 3d points (point is represented by 4 chanels value [x, y, z, 0]). They are of depth the same as `depth` if it is CV_32F or CV_64F, and the
|
||||
* depth of `K` if `depth` is of depth CV_U
|
||||
* @param points3d the resulting 3d points (point is represented by 4 channels value [x, y, z, 0]). They are of the same depth as `depth` if it is CV_32F or CV_64F, and the
|
||||
* depth of `K` if `depth` is of depth CV_16U or CV_16S
|
||||
* @param mask the mask of the points to consider (can be empty)
|
||||
*/
|
||||
CV_EXPORTS_W void depthTo3d(InputArray depth, InputArray K, OutputArray points3d, InputArray mask = noArray());
|
||||
|
@ -52,7 +52,7 @@ public:
|
||||
virtual ~Submap() = default;
|
||||
|
||||
virtual void integrate(InputArray _depth, const int currframeId);
|
||||
virtual void raycast(const Odometry& icp, const cv::Affine3f& cameraPose, cv::Size frameSize,
|
||||
virtual void raycast(const cv::Affine3f& cameraPose, cv::Size frameSize,
|
||||
OutputArray points = noArray(), OutputArray normals = noArray());
|
||||
|
||||
virtual int getTotalAllocatedBlocks() const { return int(volume.getTotalVolumeUnits()); };
|
||||
@ -112,25 +112,21 @@ void Submap<MatType>::integrate(InputArray _depth, const int currFrameId)
|
||||
}
|
||||
|
||||
template<typename MatType>
|
||||
void Submap<MatType>::raycast(const Odometry& icp, const cv::Affine3f& _cameraPose, cv::Size frameSize,
|
||||
void Submap<MatType>::raycast(const cv::Affine3f& _cameraPose, cv::Size frameSize,
|
||||
OutputArray points, OutputArray normals)
|
||||
{
|
||||
if (!points.needed() && !normals.needed())
|
||||
{
|
||||
MatType pts, nrm;
|
||||
|
||||
frame.getPyramidAt(pts, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
frame.getPyramidAt(nrm, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
//TODO: get depth instead of pts from raycast
|
||||
volume.raycast(_cameraPose.matrix, frameSize.height, frameSize.height, pts, nrm);
|
||||
frame.setPyramidAt(pts, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
frame.setPyramidAt(nrm, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
|
||||
std::vector<MatType> pch(3);
|
||||
split(pts, pch);
|
||||
|
||||
renderFrame = frame;
|
||||
|
||||
Mat depth;
|
||||
frame.getScaledDepth(depth);
|
||||
frame = icp.createOdometryFrame();
|
||||
frame.setDepth(depth);
|
||||
frame = OdometryFrame(noArray(), pch[2]);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -43,24 +43,16 @@ public:
|
||||
Odometry(OdometryType otype, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
~Odometry();
|
||||
|
||||
/** Create new odometry frame
|
||||
* The Type (Mat or UMat) depends on odometry type
|
||||
*/
|
||||
OdometryFrame createOdometryFrame() const;
|
||||
|
||||
// Deprecated
|
||||
OdometryFrame createOdometryFrame(OdometryFrameStoreType matType) const;
|
||||
|
||||
/** Prepare frame for odometry calculation
|
||||
* @param frame odometry prepare this frame as src frame and dst frame simultaneously
|
||||
*/
|
||||
void prepareFrame(OdometryFrame& frame);
|
||||
void prepareFrame(OdometryFrame& frame) const;
|
||||
|
||||
/** Prepare frame for odometry calculation
|
||||
* @param srcFrame frame will be prepared as src frame ("original" image)
|
||||
* @param dstFrame frame will be prepared as dsr frame ("rotated" image)
|
||||
*/
|
||||
void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame);
|
||||
void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const;
|
||||
|
||||
/** Compute Rigid Transformation between two frames so that Rt * src = dst
|
||||
* @param srcFrame src frame ("original" image)
|
||||
@ -71,11 +63,15 @@ public:
|
||||
* R_31 R_32 R_33 t_3
|
||||
* 0 0 0 1 }
|
||||
*/
|
||||
bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt);
|
||||
bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const;
|
||||
|
||||
CV_WRAP bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const;
|
||||
CV_WRAP bool compute(InputArray srcDepthFrame, InputArray dstDepthFrame, OutputArray Rt) const;
|
||||
CV_WRAP bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame, InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const;
|
||||
|
||||
//TODO: document it
|
||||
//requires frame size, initialized at prepareFrame stage()
|
||||
Ptr<RgbdNormals> getNormalsComputer() const;
|
||||
|
||||
class Impl;
|
||||
private:
|
||||
Ptr<Impl> impl;
|
||||
|
@ -9,62 +9,45 @@
|
||||
|
||||
namespace cv
|
||||
{
|
||||
/** Indicates what pyramid is to access using get/setPyramid... methods:
|
||||
* @param PYR_IMAGE The pyramid of RGB images
|
||||
* @param PYR_DEPTH The pyramid of depth images
|
||||
* @param PYR_MASK The pyramid of masks
|
||||
* @param PYR_CLOUD The pyramid of point clouds, produced from the pyramid of depths
|
||||
* @param PYR_DIX The pyramid of dI/dx derivative images
|
||||
* @param PYR_DIY The pyramid of dI/dy derivative images
|
||||
* @param PYR_TEXMASK The pyramid of textured masks
|
||||
* @param PYR_NORM The pyramid of normals
|
||||
* @param PYR_NORMMASK The pyramid of normals masks
|
||||
/** Indicates what pyramid is to access using getPyramidAt() method:
|
||||
*/
|
||||
|
||||
enum OdometryFramePyramidType
|
||||
{
|
||||
PYR_IMAGE = 0,
|
||||
PYR_DEPTH = 1,
|
||||
PYR_MASK = 2,
|
||||
PYR_CLOUD = 3,
|
||||
PYR_DIX = 4,
|
||||
PYR_DIY = 5,
|
||||
PYR_TEXMASK = 6,
|
||||
PYR_NORM = 7,
|
||||
PYR_NORMMASK = 8,
|
||||
PYR_IMAGE = 0, //!< The pyramid of grayscale images
|
||||
PYR_DEPTH = 1, //!< The pyramid of depth images
|
||||
PYR_MASK = 2, //!< The pyramid of masks
|
||||
PYR_CLOUD = 3, //!< The pyramid of point clouds, produced from the pyramid of depths
|
||||
PYR_DIX = 4, //!< The pyramid of dI/dx derivative images
|
||||
PYR_DIY = 5, //!< The pyramid of dI/dy derivative images
|
||||
PYR_TEXMASK = 6, //!< The pyramid of "textured" masks (i.e. additional masks for normals or grayscale images)
|
||||
PYR_NORM = 7, //!< The pyramid of normals
|
||||
PYR_NORMMASK = 8, //!< The pyramid of normals masks
|
||||
N_PYRAMIDS
|
||||
};
|
||||
|
||||
enum class OdometryFrameStoreType
|
||||
{
|
||||
MAT = 0,
|
||||
UMAT = 1
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W OdometryFrame
|
||||
{
|
||||
public:
|
||||
OdometryFrame();
|
||||
OdometryFrame(OdometryFrameStoreType matType);
|
||||
//TODO: add to docs: check image channels, if 3 or 4 then do cvtColor(BGR(A)2GRAY)
|
||||
OdometryFrame(InputArray image = noArray(), InputArray depth = noArray(), InputArray mask = noArray(), InputArray normals = noArray());
|
||||
~OdometryFrame() {};
|
||||
void setImage(InputArray image);
|
||||
|
||||
void getImage(OutputArray image) const;
|
||||
void getGrayImage(OutputArray image) const;
|
||||
void setDepth(InputArray depth);
|
||||
void getDepth(OutputArray depth) const;
|
||||
void getScaledDepth(OutputArray depth) const;
|
||||
void setMask(InputArray mask);
|
||||
void getMask(OutputArray mask) const;
|
||||
void setNormals(InputArray normals);
|
||||
void getNormals(OutputArray normals) const;
|
||||
void setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype);
|
||||
void setPyramidLevels(size_t _nLevels);
|
||||
size_t getPyramidLevels(OdometryFramePyramidType oftype) const;
|
||||
void setPyramidAt(InputArray img, OdometryFramePyramidType pyrType, size_t level);
|
||||
|
||||
//TODO: add docs
|
||||
// returns amt of levels in pyramids (all of them should have the same amt of levels) or 0 if no pyramids were prepared yet
|
||||
size_t getPyramidLevels() const;
|
||||
//TODO: add docs
|
||||
// returns empty img if no data in the pyramid or in the pyramid's level
|
||||
void getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const;
|
||||
|
||||
class Impl;
|
||||
private:
|
||||
Ptr<Impl> impl;
|
||||
};
|
||||
}
|
||||
|
@ -33,8 +33,13 @@ public:
|
||||
int getSobelSize() const;
|
||||
void setSobelScale(double val);
|
||||
double getSobelScale() const;
|
||||
|
||||
void setNormalWinSize(int val);
|
||||
int getNormalWinSize() const;
|
||||
void setNormalDiffThreshold(float val);
|
||||
float getNormalDiffThreshold() const;
|
||||
void setNormalMethod(RgbdNormals::RgbdNormalsMethod nm);
|
||||
RgbdNormals::RgbdNormalsMethod getNormalMethod() const;
|
||||
|
||||
void setAngleThreshold(float val);
|
||||
float getAngleThreshold() const;
|
||||
|
@ -61,15 +61,6 @@ public:
|
||||
|
||||
Rendered image size and camera intrinsics are taken from volume settings structure.
|
||||
|
||||
* @param cameraPose the pose of camera in global coordinates.
|
||||
* @param outFrame the object where to store rendered points and normals.
|
||||
*/
|
||||
void raycast(InputArray cameraPose, OdometryFrame& outFrame) const;
|
||||
|
||||
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
||||
|
||||
Rendered image size and camera intrinsics are taken from volume settings structure.
|
||||
|
||||
* @param cameraPose the pose of camera in global coordinates.
|
||||
* @param points image to store rendered points.
|
||||
* @param normals image to store rendered normals corresponding to points.
|
||||
@ -81,17 +72,6 @@ public:
|
||||
|
||||
Rendered image size and camera intrinsics are taken from volume settings structure.
|
||||
|
||||
* @param cameraPose the pose of camera in global coordinates.
|
||||
* @param height the height of result image.
|
||||
* @param width the width of result image.
|
||||
* @param outFrame the object where to store rendered points and normals.
|
||||
*/
|
||||
void raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const;
|
||||
|
||||
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
||||
|
||||
Rendered image size and camera intrinsics are taken from volume settings structure.
|
||||
|
||||
* @param cameraPose the pose of camera in global coordinates.
|
||||
* @param height the height of result image.
|
||||
* @param width the width of result image.
|
||||
|
@ -6,8 +6,10 @@ import cv2 as cv
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
class odometry_test(NewOpenCVTests):
|
||||
def test_OdometryDefault(self):
|
||||
def commonOdometryTest(self, needRgb, otype):
|
||||
depth = self.get_sample('cv/rgbd/depth.png', cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||
if needRgb:
|
||||
rgb = self.get_sample('cv/rgbd/rgb.png', cv.IMREAD_ANYCOLOR)
|
||||
radian = np.radians(1)
|
||||
Rt_warp = np.array(
|
||||
[[np.cos(radian), -np.sin(radian), 0],
|
||||
@ -22,91 +24,33 @@ class odometry_test(NewOpenCVTests):
|
||||
)
|
||||
Rt_res = np.zeros((4, 4))
|
||||
|
||||
odometry = cv.Odometry()
|
||||
if otype is not None:
|
||||
odometry = cv.Odometry(otype)
|
||||
else:
|
||||
odometry = cv.Odometry()
|
||||
warped_depth = cv.warpPerspective(depth, Rt_warp, (640, 480))
|
||||
isCorrect = odometry.compute(depth, warped_depth, Rt_res)
|
||||
if needRgb:
|
||||
warped_rgb = cv.warpPerspective(rgb, Rt_warp, (640, 480))
|
||||
isCorrect = odometry.compute(depth, rgb, warped_depth, warped_rgb, Rt_res)
|
||||
else:
|
||||
isCorrect = odometry.compute(depth, warped_depth, Rt_res)
|
||||
|
||||
res = np.absolute(Rt_curr - Rt_res).sum()
|
||||
eps = 0.15
|
||||
self.assertLessEqual(res, eps)
|
||||
self.assertTrue(isCorrect)
|
||||
|
||||
def test_OdometryDefault(self):
|
||||
self.commonOdometryTest(False, None)
|
||||
|
||||
def test_OdometryDepth(self):
|
||||
depth = self.get_sample('cv/rgbd/depth.png', cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||
radian = np.radians(1)
|
||||
Rt_warp = np.array(
|
||||
[[np.cos(radian), -np.sin(radian), 0],
|
||||
[np.sin(radian), np.cos(radian), 0],
|
||||
[0, 0, 1]], dtype=np.float32
|
||||
)
|
||||
Rt_curr = np.array(
|
||||
[[np.cos(radian), -np.sin(radian), 0, 0],
|
||||
[np.sin(radian), np.cos(radian), 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]], dtype=np.float32
|
||||
)
|
||||
Rt_res = np.zeros((4, 4))
|
||||
|
||||
odometry = cv.Odometry(cv.DEPTH)
|
||||
warped_depth = cv.warpPerspective(depth, Rt_warp, (640, 480))
|
||||
|
||||
isCorrect = odometry.compute(depth, warped_depth, Rt_res)
|
||||
res = np.absolute(Rt_curr - Rt_res).sum()
|
||||
eps = 0.15
|
||||
self.assertLessEqual(res, eps)
|
||||
self.assertTrue(isCorrect)
|
||||
self.commonOdometryTest(False, cv.DEPTH)
|
||||
|
||||
def test_OdometryRGB(self):
|
||||
rgb = self.get_sample('cv/rgbd/rgb.png', cv.IMREAD_ANYCOLOR)
|
||||
radian = np.radians(1)
|
||||
Rt_warp = np.array(
|
||||
[[np.cos(radian), -np.sin(radian), 0],
|
||||
[np.sin(radian), np.cos(radian), 0],
|
||||
[0, 0, 1]], dtype=np.float32
|
||||
)
|
||||
Rt_curr = np.array(
|
||||
[[np.cos(radian), -np.sin(radian), 0, 0],
|
||||
[np.sin(radian), np.cos(radian), 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]], dtype=np.float32
|
||||
)
|
||||
Rt_res = np.zeros((4, 4))
|
||||
|
||||
odometry = cv.Odometry(cv.RGB)
|
||||
warped_rgb = cv.warpPerspective(rgb, Rt_warp, (640, 480))
|
||||
isCorrect = odometry.compute(rgb, warped_rgb, Rt_res)
|
||||
|
||||
res = np.absolute(Rt_curr - Rt_res).sum()
|
||||
eps = 0.15
|
||||
self.assertLessEqual(res, eps)
|
||||
self.assertTrue(isCorrect)
|
||||
self.commonOdometryTest(True, cv.RGB)
|
||||
|
||||
def test_OdometryRGB_Depth(self):
|
||||
depth = self.get_sample('cv/rgbd/depth.png', cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||
rgb = self.get_sample('cv/rgbd/rgb.png', cv.IMREAD_ANYCOLOR)
|
||||
radian = np.radians(1)
|
||||
Rt_warp = np.array(
|
||||
[[np.cos(radian), -np.sin(radian), 0],
|
||||
[np.sin(radian), np.cos(radian), 0],
|
||||
[0, 0, 1]], dtype=np.float32
|
||||
)
|
||||
Rt_curr = np.array(
|
||||
[[np.cos(radian), -np.sin(radian), 0, 0],
|
||||
[np.sin(radian), np.cos(radian), 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]], dtype=np.float32
|
||||
)
|
||||
Rt_res = np.zeros((4, 4))
|
||||
|
||||
odometry = cv.Odometry(cv.RGB_DEPTH)
|
||||
warped_depth = cv.warpPerspective(depth, Rt_warp, (640, 480))
|
||||
warped_rgb = cv.warpPerspective(rgb, Rt_warp, (640, 480))
|
||||
isCorrect = odometry.compute(depth, rgb, warped_depth, warped_rgb, Rt_res)
|
||||
|
||||
res = np.absolute(Rt_curr - Rt_res).sum()
|
||||
eps = 0.15
|
||||
self.assertLessEqual(res, eps)
|
||||
self.assertTrue(isCorrect)
|
||||
self.commonOdometryTest(True, cv.RGB_DEPTH)
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
|
@ -471,13 +471,11 @@ PERF_TEST(Perf_TSDF, integrate_frame)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
OdometryFrame odf(noArray(), depth);
|
||||
|
||||
startTimer();
|
||||
volume.integrate(odf, pose);
|
||||
stopTimer();
|
||||
|
||||
}
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
@ -521,55 +519,6 @@ PERF_TEST(Perf_TSDF, raycast_mat)
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
// Perf_TSDF_GPU.raycast_frame
|
||||
#ifdef HAVE_OPENCL
|
||||
PERF_TEST(Perf_TSDF_GPU, raycast_frame)
|
||||
#else
|
||||
PERF_TEST(Perf_TSDF, raycast_frame)
|
||||
#endif
|
||||
{
|
||||
VolumeType volumeType = VolumeType::TSDF;
|
||||
VolumeSettings vs(volumeType);
|
||||
Volume volume(volumeType, vs);
|
||||
|
||||
Size frameSize(vs.getRaycastWidth(), vs.getRaycastHeight());
|
||||
Matx33f intr;
|
||||
vs.getCameraIntegrateIntrinsics(intr);
|
||||
bool onlySemisphere = false;
|
||||
float depthFactor = vs.getDepthFactor();
|
||||
Vec3f lightPose = Vec3f::all(0.f);
|
||||
Ptr<Scene> scene = Scene::create(frameSize, intr, depthFactor, onlySemisphere);
|
||||
std::vector<Affine3f> poses = scene->getPoses();
|
||||
|
||||
for (size_t i = 0; i < poses.size(); i++)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
OdometryFrame odf(OdometryFrameStoreType::UMAT);
|
||||
#else
|
||||
OdometryFrame odf;
|
||||
#endif
|
||||
|
||||
odf.setDepth(depth);
|
||||
volume.integrate(odf, pose);
|
||||
|
||||
startTimer();
|
||||
volume.raycast(pose, frameSize.height, frameSize.width, odf);
|
||||
stopTimer();
|
||||
|
||||
if (display)
|
||||
{
|
||||
Mat points, normals;
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
displayImage(depth, points, normals, depthFactor, lightPose);
|
||||
}
|
||||
}
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
// Perf_TSDF_CPU.integrate_mat
|
||||
@ -626,8 +575,7 @@ PERF_TEST(Perf_TSDF_CPU, integrate_frame)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
OdometryFrame odf(noArray(), depth);
|
||||
|
||||
startTimer();
|
||||
volume.integrate(odf, pose);
|
||||
@ -678,53 +626,8 @@ PERF_TEST(Perf_TSDF_CPU, raycast_mat)
|
||||
cv::ocl::setUseOpenCL(true);
|
||||
}
|
||||
|
||||
// Perf_TSDF_CPU.raycast_frame
|
||||
PERF_TEST(Perf_TSDF_CPU, raycast_frame)
|
||||
{
|
||||
cv::ocl::setUseOpenCL(false);
|
||||
|
||||
VolumeType volumeType = VolumeType::TSDF;
|
||||
VolumeSettings vs(volumeType);
|
||||
Volume volume(volumeType, vs);
|
||||
|
||||
Size frameSize(vs.getRaycastWidth(), vs.getRaycastHeight());
|
||||
Matx33f intr;
|
||||
vs.getCameraIntegrateIntrinsics(intr);
|
||||
bool onlySemisphere = false;
|
||||
float depthFactor = vs.getDepthFactor();
|
||||
Vec3f lightPose = Vec3f::all(0.f);
|
||||
Ptr<Scene> scene = Scene::create(frameSize, intr, depthFactor, onlySemisphere);
|
||||
std::vector<Affine3f> poses = scene->getPoses();
|
||||
|
||||
for (size_t i = 0; i < poses.size(); i++)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
volume.integrate(odf, pose);
|
||||
|
||||
startTimer();
|
||||
volume.raycast(pose, frameSize.height, frameSize.width, odf);
|
||||
stopTimer();
|
||||
|
||||
if (display)
|
||||
{
|
||||
Mat points, normals;
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
displayImage(depth, points, normals, depthFactor, lightPose);
|
||||
}
|
||||
}
|
||||
SANITY_CHECK_NOTHING();
|
||||
|
||||
cv::ocl::setUseOpenCL(true);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
// Perf_HashTSDF_GPU.integrate_mat
|
||||
#ifdef HAVE_OPENCL
|
||||
PERF_TEST(Perf_HashTSDF_GPU, integrate_mat)
|
||||
@ -780,8 +683,7 @@ PERF_TEST(Perf_HashTSDF, integrate_frame)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
OdometryFrame odf(noArray(), depth);
|
||||
|
||||
startTimer();
|
||||
volume.integrate(odf, pose);
|
||||
@ -830,56 +732,6 @@ PERF_TEST(Perf_HashTSDF, raycast_mat)
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
// Perf_HashTSDF_GPU.raycast_frame
|
||||
#ifdef HAVE_OPENCL
|
||||
PERF_TEST(Perf_HashTSDF_GPU, raycast_frame)
|
||||
#else
|
||||
PERF_TEST(Perf_HashTSDF, raycast_frame)
|
||||
#endif
|
||||
{
|
||||
VolumeType volumeType = VolumeType::HashTSDF;
|
||||
VolumeSettings vs(volumeType);
|
||||
Volume volume(volumeType, vs);
|
||||
|
||||
Size frameSize(vs.getRaycastWidth(), vs.getRaycastHeight());
|
||||
Matx33f intr;
|
||||
vs.getCameraIntegrateIntrinsics(intr);
|
||||
bool onlySemisphere = false;
|
||||
float depthFactor = vs.getDepthFactor();
|
||||
Vec3f lightPose = Vec3f::all(0.f);
|
||||
Ptr<Scene> scene = Scene::create(frameSize, intr, depthFactor, onlySemisphere);
|
||||
std::vector<Affine3f> poses = scene->getPoses();
|
||||
|
||||
for (size_t i = 0; i < poses.size(); i++)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
OdometryFrame odf(OdometryFrameStoreType::UMAT);
|
||||
#else
|
||||
OdometryFrame odf;
|
||||
#endif
|
||||
|
||||
odf.setDepth(depth);
|
||||
volume.integrate(odf, pose);
|
||||
|
||||
startTimer();
|
||||
volume.raycast(pose, frameSize.height, frameSize.width, odf);
|
||||
stopTimer();
|
||||
|
||||
if (display)
|
||||
{
|
||||
Mat points, normals;
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
displayImage(depth, points, normals, depthFactor, lightPose);
|
||||
}
|
||||
}
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
// Perf_HashTSDF_CPU.integrate_mat
|
||||
PERF_TEST(Perf_HashTSDF_CPU, integrate_mat)
|
||||
@ -935,8 +787,7 @@ PERF_TEST(Perf_HashTSDF_CPU, integrate_frame)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
OdometryFrame odf(noArray(), depth);
|
||||
|
||||
startTimer();
|
||||
volume.integrate(odf, pose);
|
||||
@ -985,53 +836,9 @@ PERF_TEST(Perf_HashTSDF_CPU, raycast_mat)
|
||||
|
||||
cv::ocl::setUseOpenCL(true);
|
||||
}
|
||||
|
||||
// Perf_HashTSDF_CPU.raycast_frame
|
||||
PERF_TEST(Perf_HashTSDF_CPU, raycast_frame)
|
||||
{
|
||||
cv::ocl::setUseOpenCL(false);
|
||||
|
||||
VolumeType volumeType = VolumeType::HashTSDF;
|
||||
VolumeSettings vs(volumeType);
|
||||
Volume volume(volumeType, vs);
|
||||
|
||||
Size frameSize(vs.getRaycastWidth(), vs.getRaycastHeight());
|
||||
Matx33f intr;
|
||||
vs.getCameraIntegrateIntrinsics(intr);
|
||||
bool onlySemisphere = false;
|
||||
float depthFactor = vs.getDepthFactor();
|
||||
Vec3f lightPose = Vec3f::all(0.f);
|
||||
Ptr<Scene> scene = Scene::create(frameSize, intr, depthFactor, onlySemisphere);
|
||||
std::vector<Affine3f> poses = scene->getPoses();
|
||||
|
||||
for (size_t i = 0; i < poses.size(); i++)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
volume.integrate(odf, pose);
|
||||
|
||||
startTimer();
|
||||
volume.raycast(pose, frameSize.height, frameSize.width, odf);
|
||||
stopTimer();
|
||||
|
||||
if (display)
|
||||
{
|
||||
Mat points, normals;
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
displayImage(depth, points, normals, depthFactor, lightPose);
|
||||
}
|
||||
}
|
||||
SANITY_CHECK_NOTHING();
|
||||
|
||||
cv::ocl::setUseOpenCL(true);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
// Perf_ColorTSDF_CPU.integrate_mat
|
||||
#ifdef HAVE_OPENCL
|
||||
PERF_TEST(Perf_ColorTSDF_CPU, integrate_mat)
|
||||
@ -1089,9 +896,7 @@ PERF_TEST(Perf_ColorTSDF, integrate_frame)
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
Mat rgb = scene->rgb(pose);
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
odf.setImage(rgb);
|
||||
OdometryFrame odf(rgb, depth);
|
||||
|
||||
startTimer();
|
||||
volume.integrate(odf, pose);
|
||||
@ -1141,52 +946,4 @@ PERF_TEST(Perf_ColorTSDF, raycast_mat)
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
// Perf_ColorTSDF_CPU.raycast_frame
|
||||
#ifdef HAVE_OPENCL
|
||||
PERF_TEST(Perf_ColorTSDF_CPU, raycast_frame)
|
||||
#else
|
||||
PERF_TEST(Perf_ColorTSDF, raycast_frame)
|
||||
#endif
|
||||
{
|
||||
VolumeType volumeType = VolumeType::ColorTSDF;
|
||||
VolumeSettings vs(volumeType);
|
||||
Volume volume(volumeType, vs);
|
||||
|
||||
Size frameSize(vs.getRaycastWidth(), vs.getRaycastHeight());
|
||||
Matx33f intr;
|
||||
vs.getCameraIntegrateIntrinsics(intr);
|
||||
bool onlySemisphere = false;
|
||||
float depthFactor = vs.getDepthFactor();
|
||||
Vec3f lightPose = Vec3f::all(0.f);
|
||||
Ptr<Scene> scene = Scene::create(frameSize, intr, depthFactor, onlySemisphere);
|
||||
std::vector<Affine3f> poses = scene->getPoses();
|
||||
|
||||
for (size_t i = 0; i < poses.size(); i++)
|
||||
{
|
||||
Matx44f pose = poses[i].matrix;
|
||||
Mat depth = scene->depth(pose);
|
||||
Mat rgb = scene->rgb(pose);
|
||||
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
odf.setImage(rgb);
|
||||
volume.integrate(odf, pose);
|
||||
|
||||
startTimer();
|
||||
volume.raycast(pose, frameSize.height, frameSize.width, odf);
|
||||
stopTimer();
|
||||
|
||||
if (display)
|
||||
{
|
||||
Mat points, normals, colors;
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
odf.getPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
displayColorImage(depth, rgb, points, normals, colors, depthFactor, lightPose);
|
||||
}
|
||||
}
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
|
||||
}} // namespace
|
||||
|
@ -128,8 +128,7 @@ int main(int argc, char** argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
OdometryFrame frame_prev = odometry.createOdometryFrame(),
|
||||
frame_curr = odometry.createOdometryFrame();
|
||||
OdometryFrame frame_prev, frame_curr;
|
||||
|
||||
TickMeter gtm;
|
||||
int count = 0;
|
||||
@ -182,8 +181,7 @@ int main(int argc, char** argv)
|
||||
{
|
||||
Mat gray;
|
||||
cvtColor(image, gray, COLOR_BGR2GRAY);
|
||||
frame_curr.setImage(gray);
|
||||
frame_curr.setDepth(depth);
|
||||
frame_curr = OdometryFrame(gray, depth);
|
||||
|
||||
Mat Rt;
|
||||
if(!Rts.empty())
|
||||
@ -216,7 +214,7 @@ int main(int argc, char** argv)
|
||||
//if (!frame_prev.empty())
|
||||
// frame_prev.release();
|
||||
frame_prev = frame_curr;
|
||||
frame_curr = odometry.createOdometryFrame();
|
||||
frame_curr = OdometryFrame();
|
||||
//std::swap(frame_prev, frame_curr);
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,41 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
/** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
|
||||
* by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
|
||||
* Otherwise, the image is simply converted to floats
|
||||
* @param in_in the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
|
||||
* (as done with the Microsoft Kinect), it is assumed in meters)
|
||||
* @param depth the desired output depth (floats or double)
|
||||
* @param out_out The rescaled float depth image
|
||||
*/
|
||||
void rescaleDepth(InputArray in_in, int type, OutputArray out_out, double depth_factor)
|
||||
{
|
||||
cv::Mat in = in_in.getMat();
|
||||
CV_Assert(in.type() == CV_64FC1 || in.type() == CV_32FC1 || in.type() == CV_16UC1 || in.type() == CV_16SC1);
|
||||
CV_Assert(type == CV_64FC1 || type == CV_32FC1);
|
||||
|
||||
int in_depth = in.depth();
|
||||
|
||||
out_out.create(in.size(), type);
|
||||
cv::Mat out = out_out.getMat();
|
||||
if (in_depth == CV_16U)
|
||||
{
|
||||
in.convertTo(out, type, 1 / depth_factor); //convert to float so that it is in meters
|
||||
cv::Mat valid_mask = in == std::numeric_limits<ushort>::min(); // Should we do std::numeric_limits<ushort>::max() too ?
|
||||
out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$
|
||||
}
|
||||
if (in_depth == CV_16S)
|
||||
{
|
||||
in.convertTo(out, type, 1 / depth_factor); //convert to float so tha$
|
||||
cv::Mat valid_mask = (in == std::numeric_limits<short>::min()) | (in == std::numeric_limits<short>::max()); // Should we do std::numeric_limits<ushort>::max() too ?
|
||||
out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$
|
||||
}
|
||||
if ((in_depth == CV_32F) || (in_depth == CV_64F))
|
||||
in.convertTo(out, type);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @param K
|
||||
* @param depth the depth image
|
||||
|
@ -11,6 +11,34 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
/** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
|
||||
* by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
|
||||
* Otherwise, the image is simply converted to floats
|
||||
* @param in the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
|
||||
* (as done with the Microsoft Kinect), it is assumed in meters)
|
||||
* @param the desired output depth (floats or double)
|
||||
* @param out The rescaled float depth image
|
||||
*/
|
||||
/* void rescaleDepth(InputArray in_in, int depth, OutputArray out_out); */
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
rescaleDepthTemplated(const Mat& in, Mat& out);
|
||||
|
||||
template<>
|
||||
inline void
|
||||
rescaleDepthTemplated<float>(const Mat& in, Mat& out)
|
||||
{
|
||||
rescaleDepth(in, CV_32F, out);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline void
|
||||
rescaleDepthTemplated<double>(const Mat& in, Mat& out)
|
||||
{
|
||||
rescaleDepth(in, CV_64F, out);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param depth the depth image, containing depth with the value T
|
||||
* @param the mask, containing CV_8UC1
|
||||
|
@ -12,18 +12,16 @@ namespace cv
|
||||
|
||||
class Odometry::Impl
|
||||
{
|
||||
private:
|
||||
|
||||
public:
|
||||
Impl() {};
|
||||
virtual ~Impl() {};
|
||||
virtual OdometryFrame createOdometryFrame() const = 0;
|
||||
virtual void prepareFrame(OdometryFrame& frame) = 0;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) = 0;
|
||||
virtual void prepareFrame(OdometryFrame& frame) const = 0;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const = 0;
|
||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const = 0;
|
||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const = 0;
|
||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const = 0;
|
||||
virtual Ptr<RgbdNormals> getNormalsComputer() const = 0;
|
||||
};
|
||||
|
||||
|
||||
@ -32,47 +30,36 @@ class OdometryICP : public Odometry::Impl
|
||||
private:
|
||||
OdometrySettings settings;
|
||||
OdometryAlgoType algtype;
|
||||
mutable Ptr<RgbdNormals> normalsComputer;
|
||||
|
||||
public:
|
||||
OdometryICP(OdometrySettings settings, OdometryAlgoType algtype);
|
||||
~OdometryICP();
|
||||
OdometryICP(OdometrySettings _settings, OdometryAlgoType _algtype) :
|
||||
settings(_settings), algtype(_algtype), normalsComputer()
|
||||
{ }
|
||||
~OdometryICP() { }
|
||||
|
||||
virtual OdometryFrame createOdometryFrame() const override;
|
||||
virtual void prepareFrame(OdometryFrame& frame) override;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) override;
|
||||
virtual void prepareFrame(OdometryFrame& frame) const override;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const override;
|
||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const override;
|
||||
virtual Ptr<RgbdNormals> getNormalsComputer() const override;
|
||||
};
|
||||
|
||||
OdometryICP::OdometryICP(OdometrySettings _settings, OdometryAlgoType _algtype)
|
||||
Ptr<RgbdNormals> OdometryICP::getNormalsComputer() const
|
||||
{
|
||||
this->settings = _settings;
|
||||
this->algtype = _algtype;
|
||||
return this->normalsComputer;
|
||||
}
|
||||
|
||||
OdometryICP::~OdometryICP()
|
||||
void OdometryICP::prepareFrame(OdometryFrame& frame) const
|
||||
{
|
||||
prepareICPFrame(frame, frame, this->normalsComputer, this->settings, this->algtype);
|
||||
}
|
||||
|
||||
OdometryFrame OdometryICP::createOdometryFrame() const
|
||||
void OdometryICP::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const
|
||||
{
|
||||
#ifdef HAVE_OPENCL
|
||||
return OdometryFrame(OdometryFrameStoreType::UMAT);
|
||||
#else
|
||||
return OdometryFrame(OdometryFrameStoreType::MAT);
|
||||
#endif
|
||||
}
|
||||
|
||||
void OdometryICP::prepareFrame(OdometryFrame& frame)
|
||||
{
|
||||
prepareICPFrame(frame, frame, this->settings, this->algtype);
|
||||
}
|
||||
|
||||
void OdometryICP::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame)
|
||||
{
|
||||
prepareICPFrame(srcFrame, dstFrame, this->settings, this->algtype);
|
||||
prepareICPFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
||||
}
|
||||
|
||||
bool OdometryICP::compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const
|
||||
@ -92,22 +79,26 @@ bool OdometryICP::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
|
||||
return isCorrect;
|
||||
}
|
||||
|
||||
bool OdometryICP::compute(InputArray _srcFrame, InputArray _dstFrame, OutputArray Rt) const
|
||||
bool OdometryICP::compute(InputArray _srcDepth, InputArray _dstDepth, OutputArray Rt) const
|
||||
{
|
||||
OdometryFrame srcFrame = this->createOdometryFrame();
|
||||
OdometryFrame dstFrame = this->createOdometryFrame();
|
||||
srcFrame.setDepth(_srcFrame);
|
||||
dstFrame.setDepth(_dstFrame);
|
||||
OdometryFrame srcFrame(noArray(), _srcDepth);
|
||||
OdometryFrame dstFrame(noArray(), _dstDepth);
|
||||
|
||||
prepareICPFrame(srcFrame, dstFrame, this->settings, this->algtype);
|
||||
prepareICPFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
||||
|
||||
bool isCorrect = compute(srcFrame, dstFrame, Rt);
|
||||
return isCorrect;
|
||||
}
|
||||
|
||||
bool OdometryICP::compute(InputArray, InputArray, InputArray, InputArray, OutputArray) const
|
||||
bool OdometryICP::compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const
|
||||
{
|
||||
CV_Error(cv::Error::StsBadFunc, "This odometry does not work with depth and rgb data simultaneously");
|
||||
CV_UNUSED(srcDepthFrame);
|
||||
CV_UNUSED(srcRGBFrame);
|
||||
CV_UNUSED(dstDepthFrame);
|
||||
CV_UNUSED(dstRGBFrame);
|
||||
CV_UNUSED(Rt);
|
||||
CV_Error(cv::Error::StsBadFunc, "This odometry does not work with rgb data");
|
||||
}
|
||||
|
||||
class OdometryRGB : public Odometry::Impl
|
||||
@ -117,41 +108,27 @@ private:
|
||||
OdometryAlgoType algtype;
|
||||
|
||||
public:
|
||||
OdometryRGB(OdometrySettings settings, OdometryAlgoType algtype);
|
||||
~OdometryRGB();
|
||||
OdometryRGB(OdometrySettings _settings, OdometryAlgoType _algtype) : settings(_settings), algtype(_algtype) { }
|
||||
~OdometryRGB() { }
|
||||
|
||||
virtual OdometryFrame createOdometryFrame() const override;
|
||||
virtual void prepareFrame(OdometryFrame& frame) override;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) override;
|
||||
virtual void prepareFrame(OdometryFrame& frame) const override;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const override;
|
||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const override;
|
||||
virtual Ptr<RgbdNormals> getNormalsComputer() const override { return Ptr<RgbdNormals>(); }
|
||||
};
|
||||
|
||||
OdometryRGB::OdometryRGB(OdometrySettings _settings, OdometryAlgoType _algtype)
|
||||
|
||||
void OdometryRGB::prepareFrame(OdometryFrame& frame) const
|
||||
{
|
||||
this->settings = _settings;
|
||||
this->algtype = _algtype;
|
||||
prepareRGBFrame(frame, frame, this->settings);
|
||||
}
|
||||
|
||||
OdometryRGB::~OdometryRGB()
|
||||
void OdometryRGB::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const
|
||||
{
|
||||
}
|
||||
|
||||
OdometryFrame OdometryRGB::createOdometryFrame() const
|
||||
{
|
||||
return OdometryFrame(OdometryFrameStoreType::MAT);
|
||||
}
|
||||
|
||||
void OdometryRGB::prepareFrame(OdometryFrame& frame)
|
||||
{
|
||||
prepareRGBFrame(frame, frame, this->settings, false);
|
||||
}
|
||||
|
||||
void OdometryRGB::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame)
|
||||
{
|
||||
prepareRGBFrame(srcFrame, dstFrame, this->settings, false);
|
||||
prepareRGBFrame(srcFrame, dstFrame, this->settings);
|
||||
}
|
||||
|
||||
bool OdometryRGB::compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const
|
||||
@ -172,22 +149,22 @@ bool OdometryRGB::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
|
||||
return isCorrect;
|
||||
}
|
||||
|
||||
bool OdometryRGB::compute(InputArray _srcFrame, InputArray _dstFrame, OutputArray Rt) const
|
||||
bool OdometryRGB::compute(InputArray _srcImage, InputArray _dstImage, OutputArray Rt) const
|
||||
{
|
||||
OdometryFrame srcFrame = this->createOdometryFrame();
|
||||
OdometryFrame dstFrame = this->createOdometryFrame();
|
||||
srcFrame.setImage(_srcFrame);
|
||||
dstFrame.setImage(_dstFrame);
|
||||
|
||||
prepareRGBFrame(srcFrame, dstFrame, this->settings, false);
|
||||
|
||||
bool isCorrect = compute(srcFrame, dstFrame, Rt);
|
||||
return isCorrect;
|
||||
CV_UNUSED(_srcImage);
|
||||
CV_UNUSED(_dstImage);
|
||||
CV_UNUSED(Rt);
|
||||
CV_Error(cv::Error::StsBadFunc, "This odometry algorithm requires depth and rgb data simultaneously");
|
||||
}
|
||||
|
||||
bool OdometryRGB::compute(InputArray, InputArray, InputArray, InputArray, OutputArray) const
|
||||
bool OdometryRGB::compute(InputArray srcDepthFrame, InputArray srcRGBFrame, InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const
|
||||
{
|
||||
CV_Error(cv::Error::StsBadFunc, "This odometry does not work with depth and rgb data simultaneously");
|
||||
OdometryFrame srcFrame(srcRGBFrame, srcDepthFrame);
|
||||
OdometryFrame dstFrame(dstRGBFrame, dstDepthFrame);
|
||||
|
||||
prepareRGBFrame(srcFrame, dstFrame, this->settings);
|
||||
|
||||
return compute(srcFrame, dstFrame, Rt);
|
||||
}
|
||||
|
||||
class OdometryRGBD : public Odometry::Impl
|
||||
@ -195,43 +172,34 @@ class OdometryRGBD : public Odometry::Impl
|
||||
private:
|
||||
OdometrySettings settings;
|
||||
OdometryAlgoType algtype;
|
||||
mutable Ptr<RgbdNormals> normalsComputer;
|
||||
|
||||
public:
|
||||
OdometryRGBD(OdometrySettings settings, OdometryAlgoType algtype);
|
||||
~OdometryRGBD();
|
||||
OdometryRGBD(OdometrySettings _settings, OdometryAlgoType _algtype) : settings(_settings), algtype(_algtype), normalsComputer() { }
|
||||
~OdometryRGBD() { }
|
||||
|
||||
virtual OdometryFrame createOdometryFrame() const override;
|
||||
virtual void prepareFrame(OdometryFrame& frame) override;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) override;
|
||||
virtual void prepareFrame(OdometryFrame& frame) const override;
|
||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const override;
|
||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const override;
|
||||
virtual Ptr<RgbdNormals> getNormalsComputer() const override;
|
||||
};
|
||||
|
||||
OdometryRGBD::OdometryRGBD(OdometrySettings _settings, OdometryAlgoType _algtype)
|
||||
Ptr<RgbdNormals> OdometryRGBD::getNormalsComputer() const
|
||||
{
|
||||
this->settings = _settings;
|
||||
this->algtype = _algtype;
|
||||
return normalsComputer;
|
||||
}
|
||||
|
||||
OdometryRGBD::~OdometryRGBD()
|
||||
void OdometryRGBD::prepareFrame(OdometryFrame& frame) const
|
||||
{
|
||||
prepareRGBDFrame(frame, frame, this->normalsComputer, this->settings, this->algtype);
|
||||
}
|
||||
|
||||
OdometryFrame OdometryRGBD::createOdometryFrame() const
|
||||
void OdometryRGBD::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const
|
||||
{
|
||||
return OdometryFrame(OdometryFrameStoreType::MAT);
|
||||
}
|
||||
|
||||
void OdometryRGBD::prepareFrame(OdometryFrame& frame)
|
||||
{
|
||||
prepareRGBDFrame(frame, frame, this->settings, this->algtype);
|
||||
}
|
||||
|
||||
void OdometryRGBD::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame)
|
||||
{
|
||||
prepareRGBDFrame(srcFrame, dstFrame, this->settings, this->algtype);
|
||||
prepareRGBDFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
||||
}
|
||||
|
||||
bool OdometryRGBD::compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const
|
||||
@ -251,26 +219,26 @@ bool OdometryRGBD::compute(const OdometryFrame& srcFrame, const OdometryFrame& d
|
||||
return isCorrect;
|
||||
}
|
||||
|
||||
bool OdometryRGBD::compute(InputArray, InputArray, OutputArray) const
|
||||
bool OdometryRGBD::compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const
|
||||
{
|
||||
CV_Error(cv::Error::StsBadFunc, "This volume needs depth and rgb data simultaneously");
|
||||
CV_UNUSED(srcFrame);
|
||||
CV_UNUSED(dstFrame);
|
||||
CV_UNUSED(Rt);
|
||||
CV_Error(cv::Error::StsBadFunc, "This odometry algorithm needs depth and rgb data simultaneously");
|
||||
}
|
||||
|
||||
bool OdometryRGBD::compute(InputArray _srcDepthFrame, InputArray _srcRGBFrame,
|
||||
InputArray _dstDepthFrame, InputArray _dstRGBFrame, OutputArray Rt) const
|
||||
{
|
||||
OdometryFrame srcFrame = this->createOdometryFrame();
|
||||
OdometryFrame dstFrame = this->createOdometryFrame();
|
||||
srcFrame.setDepth(_srcDepthFrame);
|
||||
srcFrame.setImage(_srcRGBFrame);
|
||||
dstFrame.setDepth(_dstDepthFrame);
|
||||
dstFrame.setImage(_dstRGBFrame);
|
||||
OdometryFrame srcFrame(_srcRGBFrame, _srcDepthFrame);
|
||||
OdometryFrame dstFrame(_dstRGBFrame, _dstDepthFrame);
|
||||
|
||||
prepareRGBDFrame(srcFrame, dstFrame, this->settings, this->algtype);
|
||||
prepareRGBDFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
||||
bool isCorrect = compute(srcFrame, dstFrame, Rt);
|
||||
return isCorrect;
|
||||
}
|
||||
|
||||
|
||||
Odometry::Odometry()
|
||||
{
|
||||
OdometrySettings settings;
|
||||
@ -313,54 +281,45 @@ Odometry::Odometry(OdometryType otype, OdometrySettings settings, OdometryAlgoTy
|
||||
break;
|
||||
default:
|
||||
CV_Error(Error::StsInternal,
|
||||
"Incorrect OdometryType, you are able to use only { ICP, RGB, RGBD }");
|
||||
"Incorrect OdometryType, you are able to use only { ICP, RGB, RGBD }");
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Odometry::~Odometry()
|
||||
{
|
||||
}
|
||||
|
||||
OdometryFrame Odometry::createOdometryFrame() const
|
||||
{
|
||||
return this->impl->createOdometryFrame();
|
||||
}
|
||||
|
||||
OdometryFrame Odometry::createOdometryFrame(OdometryFrameStoreType matType) const
|
||||
{
|
||||
return OdometryFrame(matType);
|
||||
}
|
||||
|
||||
void Odometry::prepareFrame(OdometryFrame& frame)
|
||||
void Odometry::prepareFrame(OdometryFrame& frame) const
|
||||
{
|
||||
this->impl->prepareFrame(frame);
|
||||
}
|
||||
|
||||
void Odometry::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame)
|
||||
void Odometry::prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const
|
||||
{
|
||||
this->impl->prepareFrames(srcFrame, dstFrame);
|
||||
}
|
||||
|
||||
bool Odometry::compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt)
|
||||
bool Odometry::compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const
|
||||
{
|
||||
return this->impl->compute(srcFrame, dstFrame, Rt);
|
||||
}
|
||||
|
||||
|
||||
bool Odometry::compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const
|
||||
{
|
||||
return this->impl->compute(srcFrame, dstFrame, Rt);
|
||||
}
|
||||
|
||||
|
||||
bool Odometry::compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const
|
||||
{
|
||||
return this->impl->compute(srcDepthFrame, srcRGBFrame, dstDepthFrame, dstRGBFrame, Rt);
|
||||
}
|
||||
|
||||
Ptr<RgbdNormals> Odometry::getNormalsComputer() const
|
||||
{
|
||||
return this->impl->getNormalsComputer();
|
||||
}
|
||||
|
||||
|
||||
void warpFrame(InputArray depth, InputArray image, InputArray mask,
|
||||
|
@ -11,257 +11,90 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class OdometryFrame::Impl
|
||||
OdometryFrame::OdometryFrame(InputArray image, InputArray depth, InputArray mask, InputArray normals)
|
||||
{
|
||||
public:
|
||||
Impl() {}
|
||||
virtual ~Impl() {}
|
||||
virtual void setImage(InputArray image) = 0;
|
||||
virtual void getImage(OutputArray image) const = 0;
|
||||
virtual void getGrayImage(OutputArray image) const = 0;
|
||||
virtual void setDepth(InputArray depth) = 0;
|
||||
virtual void getDepth(OutputArray depth) const = 0;
|
||||
virtual void getScaledDepth(OutputArray depth) const = 0;
|
||||
virtual void setMask(InputArray mask) = 0;
|
||||
virtual void getMask(OutputArray mask) const = 0;
|
||||
virtual void setNormals(InputArray normals) = 0;
|
||||
virtual void getNormals(OutputArray normals) const = 0;
|
||||
virtual void setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype) = 0;
|
||||
virtual void setPyramidLevels(size_t _nLevels) = 0;
|
||||
virtual size_t getPyramidLevels(OdometryFramePyramidType oftype) const = 0;
|
||||
virtual void setPyramidAt(InputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) = 0;
|
||||
virtual void getPyramidAt(OutputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) const = 0;
|
||||
};
|
||||
this->impl = makePtr<OdometryFrame::Impl>();
|
||||
if (!image.empty())
|
||||
{
|
||||
image.copyTo(this->impl->image);
|
||||
}
|
||||
if (!depth.empty())
|
||||
{
|
||||
depth.copyTo(this->impl->depth);
|
||||
}
|
||||
if (!mask.empty())
|
||||
{
|
||||
mask.copyTo(this->impl->mask);
|
||||
}
|
||||
if (!normals.empty())
|
||||
{
|
||||
normals.copyTo(this->impl->normals);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
class OdometryFrameImplTMat : public OdometryFrame::Impl
|
||||
{
|
||||
public:
|
||||
OdometryFrameImplTMat();
|
||||
~OdometryFrameImplTMat() {};
|
||||
|
||||
virtual void setImage(InputArray image) override;
|
||||
virtual void getImage(OutputArray image) const override;
|
||||
virtual void getGrayImage(OutputArray image) const override;
|
||||
virtual void setDepth(InputArray depth) override;
|
||||
virtual void getDepth(OutputArray depth) const override;
|
||||
virtual void getScaledDepth(OutputArray depth) const override;
|
||||
virtual void setMask(InputArray mask) override;
|
||||
virtual void getMask(OutputArray mask) const override;
|
||||
virtual void setNormals(InputArray normals) override;
|
||||
virtual void getNormals(OutputArray normals) const override;
|
||||
virtual void setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype) override;
|
||||
virtual void setPyramidLevels(size_t _nLevels) override;
|
||||
virtual size_t getPyramidLevels(OdometryFramePyramidType oftype) const override;
|
||||
virtual void setPyramidAt(InputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) override;
|
||||
virtual void getPyramidAt(OutputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) const override;
|
||||
|
||||
private:
|
||||
void findMask(InputArray image);
|
||||
|
||||
TMat image;
|
||||
TMat imageGray;
|
||||
TMat depth;
|
||||
TMat scaledDepth;
|
||||
TMat mask;
|
||||
TMat normals;
|
||||
std::vector< std::vector<TMat> > pyramids;
|
||||
};
|
||||
|
||||
OdometryFrame::OdometryFrame()
|
||||
{
|
||||
this->impl = makePtr<OdometryFrameImplTMat<Mat>>();
|
||||
};
|
||||
|
||||
OdometryFrame::OdometryFrame(OdometryFrameStoreType matType)
|
||||
{
|
||||
if (matType == OdometryFrameStoreType::UMAT)
|
||||
this->impl = makePtr<OdometryFrameImplTMat<UMat>>();
|
||||
else
|
||||
this->impl = makePtr<OdometryFrameImplTMat<Mat>>();
|
||||
};
|
||||
|
||||
void OdometryFrame::setImage(InputArray image) { this->impl->setImage(image); }
|
||||
void OdometryFrame::getImage(OutputArray image) const { this->impl->getImage(image); }
|
||||
void OdometryFrame::getGrayImage(OutputArray image) const { this->impl->getGrayImage(image); }
|
||||
void OdometryFrame::setDepth(InputArray depth) { this->impl->setDepth(depth); }
|
||||
void OdometryFrame::getDepth(OutputArray depth) const { this->impl->getDepth(depth); }
|
||||
void OdometryFrame::getScaledDepth(OutputArray depth) const { this->impl->getScaledDepth(depth); }
|
||||
void OdometryFrame::setMask(InputArray mask) { this->impl->setMask(mask); }
|
||||
void OdometryFrame::getMask(OutputArray mask) const { this->impl->getMask(mask); }
|
||||
void OdometryFrame::setNormals(InputArray normals) { this->impl->setNormals(normals); }
|
||||
void OdometryFrame::getNormals(OutputArray normals) const { this->impl->getNormals(normals); }
|
||||
void OdometryFrame::setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype)
|
||||
{
|
||||
this->impl->setPyramidLevel(_nLevels, oftype);
|
||||
}
|
||||
void OdometryFrame::setPyramidLevels(size_t _nLevels) { this->impl->setPyramidLevels(_nLevels); }
|
||||
size_t OdometryFrame::getPyramidLevels(OdometryFramePyramidType oftype) const { return this->impl->getPyramidLevels(oftype); }
|
||||
void OdometryFrame::setPyramidAt(InputArray img, OdometryFramePyramidType pyrType, size_t level)
|
||||
{
|
||||
this->impl->setPyramidAt(img, pyrType, level);
|
||||
}
|
||||
|
||||
size_t OdometryFrame::getPyramidLevels() const { return this->impl->getPyramidLevels(); }
|
||||
|
||||
void OdometryFrame::getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const
|
||||
{
|
||||
this->impl->getPyramidAt(img, pyrType, level);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
OdometryFrameImplTMat<TMat>::OdometryFrameImplTMat()
|
||||
: pyramids(OdometryFramePyramidType::N_PYRAMIDS)
|
||||
{
|
||||
};
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setImage(InputArray _image)
|
||||
{
|
||||
this->image = getTMat<TMat>(_image);
|
||||
Mat gray;
|
||||
if (_image.channels() != 1)
|
||||
cvtColor(_image, gray, COLOR_BGR2GRAY, 1);
|
||||
else
|
||||
gray = getTMat<Mat>(_image);
|
||||
gray.convertTo(gray, CV_8UC1);
|
||||
this->imageGray = getTMat<TMat>(gray);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getImage(OutputArray _image) const
|
||||
void OdometryFrame::Impl::getImage(OutputArray _image) const
|
||||
{
|
||||
_image.assign(this->image);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getGrayImage(OutputArray _image) const
|
||||
void OdometryFrame::Impl::getGrayImage(OutputArray _image) const
|
||||
{
|
||||
_image.assign(this->imageGray);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setDepth(InputArray _depth)
|
||||
{
|
||||
TMat depth_tmp;
|
||||
Mat depth_flt;
|
||||
|
||||
depth_tmp = getTMat<TMat>(_depth);
|
||||
// Odometry works well with depth values in range [0, 10)
|
||||
// If it's bigger, let's scale it down by 5000, a typical depth factor
|
||||
double max;
|
||||
cv::minMaxLoc(depth_tmp, nullptr, &max);
|
||||
if (max > 10)
|
||||
{
|
||||
depth_tmp.convertTo(depth_flt, CV_32FC1, 1.f / 5000.f);
|
||||
// getTMat<Mat>(depth_flt) < FLT_EPSILON dont work with UMat
|
||||
// depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), getTMat<Mat>(depth_flt) < FLT_EPSILON);
|
||||
depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), depth_flt < FLT_EPSILON);
|
||||
depth_tmp = getTMat<TMat>(depth_flt);
|
||||
|
||||
}
|
||||
this->depth = getTMat<TMat>(_depth);
|
||||
this->scaledDepth = depth_tmp;
|
||||
this->findMask(_depth);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getDepth(OutputArray _depth) const
|
||||
void OdometryFrame::Impl::getDepth(OutputArray _depth) const
|
||||
{
|
||||
_depth.assign(this->depth);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getScaledDepth(OutputArray _depth) const
|
||||
void OdometryFrame::Impl::getScaledDepth(OutputArray _depth) const
|
||||
{
|
||||
_depth.assign(this->scaledDepth);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setMask(InputArray _mask)
|
||||
{
|
||||
this->mask = getTMat<TMat>(_mask);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getMask(OutputArray _mask) const
|
||||
void OdometryFrame::Impl::getMask(OutputArray _mask) const
|
||||
{
|
||||
_mask.assign(this->mask);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setNormals(InputArray _normals)
|
||||
{
|
||||
this->normals = getTMat<TMat>(_normals);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getNormals(OutputArray _normals) const
|
||||
void OdometryFrame::Impl::getNormals(OutputArray _normals) const
|
||||
{
|
||||
_normals.assign(this->normals);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype)
|
||||
size_t OdometryFrame::Impl::getPyramidLevels() const
|
||||
{
|
||||
if (oftype < OdometryFramePyramidType::N_PYRAMIDS)
|
||||
pyramids[oftype].resize(_nLevels, TMat());
|
||||
else
|
||||
CV_Error(Error::StsBadArg, "Incorrect type.");
|
||||
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setPyramidLevels(size_t _nLevels)
|
||||
{
|
||||
for (auto& p : pyramids)
|
||||
// all pyramids should have the same size
|
||||
for (const auto& p : this->pyramids)
|
||||
{
|
||||
p.resize(_nLevels, TMat());
|
||||
if (!p.empty())
|
||||
return p.size();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
size_t OdometryFrameImplTMat<TMat>::getPyramidLevels(OdometryFramePyramidType oftype) const
|
||||
{
|
||||
if (oftype < OdometryFramePyramidType::N_PYRAMIDS)
|
||||
return pyramids[oftype].size();
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setPyramidAt(InputArray _img, OdometryFramePyramidType pyrType, size_t level)
|
||||
{
|
||||
CV_Assert(pyrType >= 0);
|
||||
CV_Assert((size_t)pyrType < pyramids.size());
|
||||
CV_Assert(level < pyramids[pyrType].size());
|
||||
TMat img = getTMat<TMat>(_img);
|
||||
pyramids[pyrType][level] = img;
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getPyramidAt(OutputArray _img, OdometryFramePyramidType pyrType, size_t level) const
|
||||
void OdometryFrame::Impl::getPyramidAt(OutputArray _img, OdometryFramePyramidType pyrType, size_t level) const
|
||||
{
|
||||
CV_Assert(pyrType < OdometryFramePyramidType::N_PYRAMIDS);
|
||||
CV_Assert(level < pyramids[pyrType].size());
|
||||
TMat img = pyramids[pyrType][level];
|
||||
_img.assign(img);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::findMask(InputArray _depth)
|
||||
{
|
||||
Mat depth_value = _depth.getMat();
|
||||
CV_Assert(depth_value.type() == DEPTH_TYPE);
|
||||
Mat m(depth_value.size(), CV_8UC1, Scalar(255));
|
||||
for (int y = 0; y < depth_value.rows; y++)
|
||||
for (int x = 0; x < depth_value.cols; x++)
|
||||
{
|
||||
if (cvIsNaN(depth_value.at<float>(y, x)) || depth_value.at<float>(y, x) <= FLT_EPSILON)
|
||||
m.at<uchar>(y, x) = 0;
|
||||
}
|
||||
this->setMask(m);
|
||||
if (level < pyramids[pyrType].size())
|
||||
_img.assign(pyramids[pyrType][level]);
|
||||
else
|
||||
_img.clear();
|
||||
}
|
||||
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -30,37 +30,6 @@ static inline int getTransformDim(OdometryTransformType transformType)
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void checkImage(InputArray image)
|
||||
{
|
||||
if (image.empty())
|
||||
CV_Error(Error::StsBadSize, "Image is empty.");
|
||||
if (image.type() != CV_8UC1)
|
||||
CV_Error(Error::StsBadSize, "Image type has to be CV_8UC1.");
|
||||
}
|
||||
|
||||
static inline
|
||||
void checkDepth(InputArray depth, const Size& imageSize)
|
||||
{
|
||||
if (depth.empty())
|
||||
CV_Error(Error::StsBadSize, "Depth is empty.");
|
||||
if (depth.size() != imageSize)
|
||||
CV_Error(Error::StsBadSize, "Depth has to have the size equal to the image size.");
|
||||
if (depth.type() != CV_32FC1)
|
||||
CV_Error(Error::StsBadSize, "Depth type has to be CV_32FC1.");
|
||||
}
|
||||
|
||||
static inline
|
||||
void checkMask(InputArray mask, const Size& imageSize)
|
||||
{
|
||||
if (!mask.empty())
|
||||
{
|
||||
if (mask.size() != imageSize)
|
||||
CV_Error(Error::StsBadSize, "Mask has to have the size equal to the image size.");
|
||||
if (mask.type() != CV_8UC1)
|
||||
CV_Error(Error::StsBadSize, "Mask type has to be CV_8UC1.");
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void checkNormals(InputArray normals, const Size& depthSize)
|
||||
@ -195,46 +164,9 @@ void icpCoeffsFunc(OdometryTransformType transformType, double* C, const Point3f
|
||||
C[i] = ret[i];
|
||||
}
|
||||
|
||||
void prepareRGBDFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
void prepareRGBFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings, bool useDepth);
|
||||
void prepareICPFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
|
||||
void prepareRGBFrameBase(OdometryFrame& frame, const OdometrySettings settings, bool useDepth);
|
||||
void prepareRGBFrameSrc (OdometryFrame& frame, const OdometrySettings settings);
|
||||
void prepareRGBFrameDst (OdometryFrame& frame, const OdometrySettings settings);
|
||||
|
||||
void prepareICPFrameBase(OdometryFrame& frame, const OdometrySettings settings);
|
||||
void prepareICPFrameSrc (OdometryFrame& frame, const OdometrySettings settings);
|
||||
void prepareICPFrameDst (OdometryFrame& frame, const OdometrySettings settings);
|
||||
|
||||
|
||||
void setPyramids(OdometryFrame& odf, OdometryFramePyramidType oftype, InputArrayOfArrays pyramidImage);
|
||||
void getPyramids(OdometryFrame& odf, OdometryFramePyramidType oftype, OutputArrayOfArrays _pyramid);
|
||||
|
||||
void preparePyramidImage(InputArray image, InputOutputArrayOfArrays pyramidImage, size_t levelCount);
|
||||
|
||||
template<typename TMat>
|
||||
void preparePyramidMask(InputArray mask, InputArrayOfArrays pyramidDepth, float minDepth, float maxDepth, InputArrayOfArrays pyramidNormal, InputOutputArrayOfArrays pyramidMask);
|
||||
|
||||
template<typename TMat>
|
||||
void preparePyramidCloud(InputArrayOfArrays pyramidDepth, const Matx33f& cameraMatrix, InputOutputArrayOfArrays pyramidCloud);
|
||||
|
||||
void buildPyramidCameraMatrix(const Matx33f& cameraMatrix, int levels, std::vector<Matx33f>& pyramidCameraMatrix);
|
||||
|
||||
template<typename TMat>
|
||||
void preparePyramidSobel(InputArrayOfArrays pyramidImage, int dx, int dy, InputOutputArrayOfArrays pyramidSobel, int sobelSize);
|
||||
|
||||
void preparePyramidTexturedMask(InputArrayOfArrays pyramid_dI_dx, InputArrayOfArrays pyramid_dI_dy,
|
||||
InputArray minGradMagnitudes, InputArrayOfArrays pyramidMask, double maxPointsPart,
|
||||
InputOutputArrayOfArrays pyramidTexturedMask, double sobelScale);
|
||||
|
||||
void randomSubsetOfMask(InputOutputArray _mask, float part);
|
||||
|
||||
void preparePyramidNormals(InputArray normals, InputArrayOfArrays pyramidDepth, InputOutputArrayOfArrays pyramidNormals);
|
||||
|
||||
void preparePyramidNormalsMask(InputArray pyramidNormals, InputArray pyramidMask, double maxPointsPart,
|
||||
InputOutputArrayOfArrays /*std::vector<Mat>&*/ pyramidNormalsMask);
|
||||
|
||||
void prepareRGBDFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, Ptr<RgbdNormals>& normalsComputer, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
void prepareRGBFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, OdometrySettings settings);
|
||||
void prepareICPFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, Ptr<RgbdNormals>& normalsComputer, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
|
||||
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
|
||||
const OdometryFrame srcFrame,
|
||||
@ -260,7 +192,7 @@ void calcICPLsmMatrices(const Mat& cloud0, const Mat& Rt,
|
||||
const Mat& corresps,
|
||||
Mat& AtA, Mat& AtB, OdometryTransformType transformType);
|
||||
|
||||
void calcICPLsmMatricesFast(Matx33f cameraMatrix, const Mat& oldPts, const Mat& oldNrm, const Mat& newPts, const Mat& newNrm,
|
||||
void calcICPLsmMatricesFast(Matx33f cameraMatrix, const UMat& oldPts, const UMat& oldNrm, const UMat& newPts, const UMat& newNrm,
|
||||
cv::Affine3f pose, int level, float maxDepthDiff, float angleThreshold, cv::Matx66f& A, cv::Vec6f& b);
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
|
@ -31,8 +31,13 @@ public:
|
||||
virtual int getSobelSize() const = 0;
|
||||
virtual void setSobelScale(double val) = 0;
|
||||
virtual double getSobelScale() const = 0;
|
||||
|
||||
virtual void setNormalWinSize(int val) = 0;
|
||||
virtual int getNormalWinSize() const = 0;
|
||||
virtual void setNormalDiffThreshold(float val) = 0;
|
||||
virtual float getNormalDiffThreshold() const = 0;
|
||||
virtual void setNormalMethod(RgbdNormals::RgbdNormalsMethod nm) = 0;
|
||||
virtual RgbdNormals::RgbdNormalsMethod getNormalMethod() const = 0;
|
||||
|
||||
virtual void setAngleThreshold(float val) = 0;
|
||||
virtual float getAngleThreshold() const = 0;
|
||||
@ -70,8 +75,13 @@ public:
|
||||
virtual int getSobelSize() const override;
|
||||
virtual void setSobelScale(double val) override;
|
||||
virtual double getSobelScale() const override;
|
||||
|
||||
virtual void setNormalWinSize(int val) override;
|
||||
virtual int getNormalWinSize() const override;
|
||||
virtual void setNormalDiffThreshold(float val) override;
|
||||
virtual float getNormalDiffThreshold() const override;
|
||||
virtual void setNormalMethod(RgbdNormals::RgbdNormalsMethod nm) override;
|
||||
virtual RgbdNormals::RgbdNormalsMethod getNormalMethod() const override;
|
||||
|
||||
virtual void setAngleThreshold(float val) override;
|
||||
virtual float getAngleThreshold() const override;
|
||||
@ -96,7 +106,10 @@ private:
|
||||
|
||||
int sobelSize;
|
||||
double sobelScale;
|
||||
|
||||
int normalWinSize;
|
||||
float normalDiffThreshold;
|
||||
RgbdNormals::RgbdNormalsMethod normalMethod;
|
||||
|
||||
float angleThreshold;
|
||||
float maxTranslation;
|
||||
@ -124,7 +137,10 @@ public:
|
||||
|
||||
static const int defaultSobelSize = 3;
|
||||
static constexpr double defaultSobelScale = 1. / 8.;
|
||||
|
||||
static const int defaultNormalWinSize = 5;
|
||||
static const RgbdNormals::RgbdNormalsMethod defaultNormalMethod = RgbdNormals::RGBD_NORMALS_METHOD_FALS;
|
||||
static constexpr float defaultNormalDiffThreshold = 50.f;
|
||||
|
||||
static constexpr float defaultAngleThreshold = (float)(30. * CV_PI / 180.);
|
||||
static constexpr float defaultMaxTranslation = 0.15f;
|
||||
@ -159,8 +175,13 @@ void OdometrySettings::setSobelSize(int val) { this->impl->setSobelSize(val); }
|
||||
int OdometrySettings::getSobelSize() const { return this->impl->getSobelSize(); }
|
||||
void OdometrySettings::setSobelScale(double val) { this->impl->setSobelScale(val); }
|
||||
double OdometrySettings::getSobelScale() const { return this->impl->getSobelScale(); }
|
||||
void OdometrySettings::setNormalWinSize(int val) { this->impl->setNormalWinSize(val); }
|
||||
int OdometrySettings::getNormalWinSize() const { return this->impl->getNormalWinSize(); }
|
||||
|
||||
void OdometrySettings::setNormalWinSize(int val) { this->impl->setNormalWinSize(val); }
|
||||
int OdometrySettings::getNormalWinSize() const { return this->impl->getNormalWinSize(); }
|
||||
void OdometrySettings::setNormalDiffThreshold(float val) { this->impl->setNormalDiffThreshold(val); }
|
||||
float OdometrySettings::getNormalDiffThreshold() const { return this->impl->getNormalDiffThreshold(); }
|
||||
void OdometrySettings::setNormalMethod(RgbdNormals::RgbdNormalsMethod nm) { this->impl->setNormalMethod(nm); }
|
||||
RgbdNormals::RgbdNormalsMethod OdometrySettings::getNormalMethod() const { return this->impl->getNormalMethod(); }
|
||||
|
||||
void OdometrySettings::setAngleThreshold(float val) { this->impl->setAngleThreshold(val); }
|
||||
float OdometrySettings::getAngleThreshold() const { return this->impl->getAngleThreshold(); }
|
||||
@ -188,7 +209,10 @@ OdometrySettingsImplCommon::OdometrySettingsImplCommon()
|
||||
|
||||
this->sobelSize = ds.defaultSobelSize;
|
||||
this->sobelScale = ds.defaultSobelScale;
|
||||
|
||||
this->normalWinSize = ds.defaultNormalWinSize;
|
||||
this->normalDiffThreshold = ds.defaultNormalDiffThreshold;
|
||||
this->normalMethod = ds.defaultNormalMethod;
|
||||
|
||||
this->angleThreshold = ds.defaultAngleThreshold;
|
||||
this->maxTranslation = ds.defaultMaxTranslation;
|
||||
@ -298,7 +322,22 @@ int OdometrySettingsImplCommon::getNormalWinSize() const
|
||||
{
|
||||
return this->normalWinSize;
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::setNormalDiffThreshold(float val)
|
||||
{
|
||||
this->normalDiffThreshold = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getNormalDiffThreshold() const
|
||||
{
|
||||
return this->normalDiffThreshold;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setNormalMethod(RgbdNormals::RgbdNormalsMethod nm)
|
||||
{
|
||||
this->normalMethod = nm;
|
||||
}
|
||||
RgbdNormals::RgbdNormalsMethod OdometrySettingsImplCommon::getNormalMethod() const
|
||||
{
|
||||
return this->normalMethod;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setAngleThreshold(float val)
|
||||
{
|
||||
this->angleThreshold = val;
|
||||
|
@ -1,74 +0,0 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat getTMat(InputArray, int)
|
||||
{
|
||||
return TMat();
|
||||
}
|
||||
|
||||
template<>
|
||||
Mat getTMat<Mat>(InputArray a, int i)
|
||||
{
|
||||
return a.getMat(i);
|
||||
}
|
||||
|
||||
template<>
|
||||
UMat getTMat<UMat>(InputArray a, int i)
|
||||
{
|
||||
return a.getUMat(i);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat& getTMatRef(InputOutputArray, int)
|
||||
{
|
||||
return TMat();
|
||||
}
|
||||
|
||||
template<>
|
||||
Mat& getTMatRef<Mat>(InputOutputArray a, int i)
|
||||
{
|
||||
return a.getMatRef(i);
|
||||
}
|
||||
|
||||
/** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
|
||||
* by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
|
||||
* Otherwise, the image is simply converted to floats
|
||||
* @param in_in the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
|
||||
* (as done with the Microsoft Kinect), it is assumed in meters)
|
||||
* @param depth the desired output depth (floats or double)
|
||||
* @param out_out The rescaled float depth image
|
||||
*/
|
||||
void rescaleDepth(InputArray in_in, int type, OutputArray out_out, double depth_factor)
|
||||
{
|
||||
cv::Mat in = in_in.getMat();
|
||||
CV_Assert(in.type() == CV_64FC1 || in.type() == CV_32FC1 || in.type() == CV_16UC1 || in.type() == CV_16SC1);
|
||||
CV_Assert(type == CV_64FC1 || type == CV_32FC1);
|
||||
|
||||
int in_depth = in.depth();
|
||||
|
||||
out_out.create(in.size(), type);
|
||||
cv::Mat out = out_out.getMat();
|
||||
if (in_depth == CV_16U)
|
||||
{
|
||||
in.convertTo(out, type, 1 / depth_factor); //convert to float so that it is in meters
|
||||
cv::Mat valid_mask = in == std::numeric_limits<ushort>::min(); // Should we do std::numeric_limits<ushort>::max() too ?
|
||||
out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$
|
||||
}
|
||||
if (in_depth == CV_16S)
|
||||
{
|
||||
in.convertTo(out, type, 1 / depth_factor); //convert to float so tha$
|
||||
cv::Mat valid_mask = (in == std::numeric_limits<short>::min()) | (in == std::numeric_limits<short>::max()); // Should we do std::numeric_limits<ushort>::max() too ?
|
||||
out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$
|
||||
}
|
||||
if ((in_depth == CV_32F) || (in_depth == CV_64F))
|
||||
in.convertTo(out, type);
|
||||
}
|
||||
} // namespace cv
|
@ -13,7 +13,6 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
|
||||
/** Checks if the value is a valid depth. For CV_16U or CV_16S, the convention is to be invalid if it is
|
||||
* a limit. For a float/double, we just check if it is a NaN
|
||||
* @param depth the depth to check for validity
|
||||
@ -46,41 +45,12 @@ inline bool isValidDepth(const int& depth)
|
||||
(depth != std::numeric_limits<int>::max());
|
||||
}
|
||||
|
||||
|
||||
inline bool isValidDepth(const unsigned int& depth)
|
||||
{
|
||||
return (depth != std::numeric_limits<unsigned int>::min()) &&
|
||||
(depth != std::numeric_limits<unsigned int>::max());
|
||||
}
|
||||
|
||||
/** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
|
||||
* by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
|
||||
* Otherwise, the image is simply converted to floats
|
||||
* @param in the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
|
||||
* (as done with the Microsoft Kinect), it is assumed in meters)
|
||||
* @param the desired output depth (floats or double)
|
||||
* @param out The rescaled float depth image
|
||||
*/
|
||||
/* void rescaleDepth(InputArray in_in, int depth, OutputArray out_out); */
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
rescaleDepthTemplated(const Mat& in, Mat& out);
|
||||
|
||||
template<>
|
||||
inline void
|
||||
rescaleDepthTemplated<float>(const Mat& in, Mat& out)
|
||||
{
|
||||
rescaleDepth(in, CV_32F, out);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline void
|
||||
rescaleDepthTemplated<double>(const Mat& in, Mat& out)
|
||||
{
|
||||
rescaleDepth(in, CV_64F, out);
|
||||
}
|
||||
|
||||
|
||||
// One place to turn intrinsics on and off
|
||||
#define USE_INTRINSICS CV_SIMD128
|
||||
@ -105,18 +75,6 @@ static inline bool isNaN(const cv::v_float32x4& p)
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat getTMat(InputArray, int = -1);
|
||||
template<>
|
||||
Mat getTMat<Mat>(InputArray a, int i);
|
||||
template<>
|
||||
UMat getTMat<UMat>(InputArray a, int i);
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat& getTMatRef(InputOutputArray, int = -1);
|
||||
template<>
|
||||
Mat& getTMatRef<Mat>(InputOutputArray a, int i);
|
||||
|
||||
inline size_t roundDownPow2(size_t x)
|
||||
{
|
||||
size_t shift = 0;
|
||||
@ -280,6 +238,33 @@ struct Intr
|
||||
float fx, fy, cx, cy;
|
||||
};
|
||||
|
||||
class OdometryFrame::Impl
|
||||
{
|
||||
public:
|
||||
Impl() : pyramids(OdometryFramePyramidType::N_PYRAMIDS) { }
|
||||
virtual ~Impl() {}
|
||||
|
||||
virtual void getImage(OutputArray image) const ;
|
||||
virtual void getGrayImage(OutputArray image) const ;
|
||||
virtual void getDepth(OutputArray depth) const ;
|
||||
virtual void getScaledDepth(OutputArray depth) const ;
|
||||
virtual void getMask(OutputArray mask) const ;
|
||||
virtual void getNormals(OutputArray normals) const ;
|
||||
|
||||
virtual size_t getPyramidLevels() const ;
|
||||
|
||||
virtual void getPyramidAt(OutputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) const ;
|
||||
|
||||
UMat imageGray;
|
||||
UMat image;
|
||||
UMat depth;
|
||||
UMat scaledDepth;
|
||||
UMat mask;
|
||||
UMat normals;
|
||||
std::vector< std::vector<UMat> > pyramids;
|
||||
};
|
||||
|
||||
} // namespace cv
|
||||
|
||||
|
||||
|
@ -96,47 +96,12 @@ void TsdfVolume::integrate(InputArray, InputArray, InputArray)
|
||||
CV_Error(cv::Error::StsBadFunc, "This volume doesn't support vertex colors");
|
||||
}
|
||||
|
||||
void TsdfVolume::raycast(InputArray cameraPose, OdometryFrame& outFrame) const
|
||||
{
|
||||
raycast(cameraPose, settings.getRaycastHeight(), settings.getRaycastWidth(), outFrame);
|
||||
}
|
||||
|
||||
void TsdfVolume::raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const
|
||||
{
|
||||
raycast(cameraPose, settings.getRaycastHeight(), settings.getRaycastWidth(), points, normals, colors);
|
||||
}
|
||||
|
||||
void TsdfVolume::raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const
|
||||
{
|
||||
#ifndef HAVE_OPENCL
|
||||
Mat points, normals;
|
||||
raycast(cameraPose, height, width, points, normals, noArray());
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_NORM);
|
||||
outFrame.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
outFrame.setPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
#else
|
||||
if (useGPU)
|
||||
{
|
||||
UMat points, normals;
|
||||
raycast(cameraPose, height, width, points, normals, noArray());
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_NORM);
|
||||
outFrame.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
outFrame.setPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
Mat points, normals;
|
||||
raycast(cameraPose, height, width, points, normals, noArray());
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_NORM);
|
||||
outFrame.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
outFrame.setPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void TsdfVolume::raycast(InputArray _cameraPose, int height, int width, OutputArray _points, OutputArray _normals, OutputArray _colors) const
|
||||
{
|
||||
@ -301,47 +266,13 @@ void HashTsdfVolume::integrate(InputArray, InputArray, InputArray)
|
||||
CV_Error(cv::Error::StsBadFunc, "This volume doesn't support vertex colors");
|
||||
}
|
||||
|
||||
void HashTsdfVolume::raycast(InputArray cameraPose, OdometryFrame& outFrame) const
|
||||
{
|
||||
raycast(cameraPose, settings.getRaycastHeight(), settings.getRaycastWidth(), outFrame);
|
||||
}
|
||||
|
||||
void HashTsdfVolume::raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const
|
||||
{
|
||||
raycast(cameraPose, settings.getRaycastHeight(), settings.getRaycastWidth(), points, normals, colors);
|
||||
}
|
||||
|
||||
void HashTsdfVolume::raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const
|
||||
{
|
||||
#ifndef HAVE_OPENCL
|
||||
Mat points, normals;
|
||||
raycast(cameraPose, height, width, points, normals, noArray());
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_NORM);
|
||||
outFrame.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
outFrame.setPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
#else
|
||||
if (useGPU)
|
||||
{
|
||||
UMat points, normals;
|
||||
raycast(cameraPose, height, width, points, normals, noArray());
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_NORM);
|
||||
outFrame.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
outFrame.setPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
Mat points, normals;
|
||||
raycast(cameraPose, height, width, points, normals, noArray());
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_NORM);
|
||||
outFrame.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
outFrame.setPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
|
||||
}
|
||||
#endif
|
||||
}
|
||||
void HashTsdfVolume::raycast(InputArray _cameraPose, int height, int width, OutputArray _points, OutputArray _normals, OutputArray _colors) const
|
||||
{
|
||||
if (_colors.needed())
|
||||
@ -481,27 +412,11 @@ void ColorTsdfVolume::integrate(InputArray _depth, InputArray _image, InputArray
|
||||
integrateColorTsdfVolumeUnit(settings, cameraPose, depth, image, pixNorms, volume);
|
||||
}
|
||||
|
||||
void ColorTsdfVolume::raycast(InputArray cameraPose, OdometryFrame& outFrame) const
|
||||
{
|
||||
raycast(cameraPose, settings.getRaycastHeight(), settings.getRaycastWidth(), outFrame);
|
||||
}
|
||||
void ColorTsdfVolume::raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const
|
||||
{
|
||||
raycast(cameraPose, settings.getRaycastHeight(), settings.getRaycastWidth(), points, normals, colors);
|
||||
}
|
||||
|
||||
void ColorTsdfVolume::raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const
|
||||
{
|
||||
Mat points, normals, colors;
|
||||
raycast(cameraPose, height, width, points, normals, colors);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_NORM);
|
||||
outFrame.setPyramidLevel(1, OdometryFramePyramidType::PYR_IMAGE);
|
||||
outFrame.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
outFrame.setPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
outFrame.setPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
|
||||
void ColorTsdfVolume::raycast(InputArray _cameraPose, int height, int width, OutputArray _points, OutputArray _normals, OutputArray _colors) const
|
||||
{
|
||||
const Matx44f cameraPose = _cameraPose.getMat();
|
||||
|
@ -26,10 +26,7 @@ public:
|
||||
virtual void integrate(InputArray depth, InputArray pose) = 0;
|
||||
virtual void integrate(InputArray depth, InputArray image, InputArray pose) = 0;
|
||||
|
||||
virtual void raycast(InputArray cameraPose, OdometryFrame& outFrame) const = 0;
|
||||
virtual void raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const = 0;
|
||||
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const = 0;
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OutputArray points, OutputArray normals, OutputArray colors) const = 0;
|
||||
|
||||
virtual void fetchNormals(InputArray points, OutputArray normals) const = 0;
|
||||
@ -57,9 +54,7 @@ public:
|
||||
virtual void integrate(const OdometryFrame& frame, InputArray pose) override;
|
||||
virtual void integrate(InputArray depth, InputArray pose) override;
|
||||
virtual void integrate(InputArray depth, InputArray image, InputArray pose) override;
|
||||
virtual void raycast(InputArray cameraPose, OdometryFrame& outFrame) const override;
|
||||
virtual void raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const override;
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const override;
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OutputArray points, OutputArray normals, OutputArray colors) const override;
|
||||
|
||||
virtual void fetchNormals(InputArray points, OutputArray normals) const override;
|
||||
@ -100,9 +95,7 @@ public:
|
||||
virtual void integrate(const OdometryFrame& frame, InputArray pose) override;
|
||||
virtual void integrate(InputArray depth, InputArray pose) override;
|
||||
virtual void integrate(InputArray depth, InputArray image, InputArray pose) override;
|
||||
virtual void raycast(InputArray cameraPose, OdometryFrame& outFrame) const override;
|
||||
virtual void raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const override;
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const override;
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OutputArray points, OutputArray normals, OutputArray colors) const override;
|
||||
|
||||
virtual void fetchNormals(InputArray points, OutputArray normals) const override;
|
||||
@ -151,9 +144,7 @@ public:
|
||||
virtual void integrate(const OdometryFrame& frame, InputArray pose) override;
|
||||
virtual void integrate(InputArray depth, InputArray pose) override;
|
||||
virtual void integrate(InputArray depth, InputArray image, InputArray pose) override;
|
||||
virtual void raycast(InputArray cameraPose, OdometryFrame& outFrame) const override;
|
||||
virtual void raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const override;
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const override;
|
||||
virtual void raycast(InputArray cameraPose, int height, int width, OutputArray points, OutputArray normals, OutputArray colors) const override;
|
||||
|
||||
virtual void fetchNormals(InputArray points, OutputArray normals) const override;
|
||||
@ -202,9 +193,7 @@ Volume::~Volume() {}
|
||||
void Volume::integrate(const OdometryFrame& frame, InputArray pose) { this->impl->integrate(frame, pose); }
|
||||
void Volume::integrate(InputArray depth, InputArray pose) { this->impl->integrate(depth, pose); }
|
||||
void Volume::integrate(InputArray depth, InputArray image, InputArray pose) { this->impl->integrate(depth, image, pose); }
|
||||
void Volume::raycast(InputArray cameraPose, OdometryFrame& outFrame) const { this->impl->raycast(cameraPose, outFrame); }
|
||||
void Volume::raycast(InputArray cameraPose, OutputArray _points, OutputArray _normals, OutputArray _colors) const { this->impl->raycast(cameraPose, _points, _normals, _colors); }
|
||||
void Volume::raycast(InputArray cameraPose, int height, int width, OdometryFrame& outFrame) const { this->impl->raycast(cameraPose, height, width, outFrame); }
|
||||
void Volume::raycast(InputArray cameraPose, int height, int width, OutputArray _points, OutputArray _normals, OutputArray _colors) const { this->impl->raycast(cameraPose, height, width, _points, _normals, _colors); }
|
||||
void Volume::fetchNormals(InputArray points, OutputArray normals) const { this->impl->fetchNormals(points, normals); }
|
||||
void Volume::fetchPointsNormals(OutputArray points, OutputArray normals) const { this->impl->fetchPointsNormals(points, normals); }
|
||||
|
@ -358,8 +358,7 @@ void normal_test_custom_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
Mat points, normals;
|
||||
AccessFlag af = ACCESS_READ;
|
||||
|
||||
OdometryFrame odf(OdometryFrameStoreType::UMAT);
|
||||
odf.setDepth(udepth);
|
||||
OdometryFrame odf(noArray(), udepth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
volume.integrate(depth, poses[0].matrix);
|
||||
@ -368,24 +367,15 @@ void normal_test_custom_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
|
||||
if (testFunction == VolumeTestFunction::RAYCAST)
|
||||
{
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, upoints, unormals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, odf);
|
||||
odf.getPyramidAt(upoints, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(unormals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, upoints, unormals);
|
||||
}
|
||||
else if (testFunction == VolumeTestFunction::FETCH_NORMALS)
|
||||
{
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
// takes only point from raycast for checking fetched normals on the display
|
||||
volume.raycast(poses[0].matrix, frameSize.height,frameSize.width, upoints, utmpnormals);
|
||||
//volume.fetchPointsNormals(upoints, utmpnormals);
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, upoints, utmpnormals);
|
||||
// volume.fetchPointsNormals(upoints, utmpnormals);
|
||||
volume.fetchNormals(upoints, unormals);
|
||||
}
|
||||
}
|
||||
@ -427,8 +417,7 @@ void normal_test_common_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
Mat points, normals;
|
||||
AccessFlag af = ACCESS_READ;
|
||||
|
||||
OdometryFrame odf(OdometryFrameStoreType::UMAT);
|
||||
odf.setDepth(udepth);
|
||||
OdometryFrame odf(noArray(), udepth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
volume.integrate(depth, poses[0].matrix);
|
||||
@ -437,16 +426,7 @@ void normal_test_common_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
|
||||
if (testFunction == VolumeTestFunction::RAYCAST)
|
||||
{
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, upoints, unormals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, odf);
|
||||
odf.getPyramidAt(upoints, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(unormals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
}
|
||||
else if (testFunction == VolumeTestFunction::FETCH_NORMALS)
|
||||
{
|
||||
@ -498,25 +478,14 @@ void valid_points_test_custom_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
AccessFlag af = ACCESS_READ;
|
||||
int anfas, profile;
|
||||
|
||||
OdometryFrame odf(OdometryFrameStoreType::UMAT);
|
||||
odf.setDepth(udepth);
|
||||
OdometryFrame odf(noArray(), udepth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
volume.integrate(depth, poses[0].matrix);
|
||||
else
|
||||
volume.integrate(odf, poses[0].matrix);
|
||||
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, upoints, unormals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, odf);
|
||||
odf.getPyramidAt(upoints, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(unormals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, upoints, unormals);
|
||||
|
||||
normals = unormals.getMat(af);
|
||||
points = upoints.getMat(af);
|
||||
@ -526,16 +495,7 @@ void valid_points_test_custom_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
if (cvtest::debugLevel > 0)
|
||||
displayImage(depth, points, normals, depthFactor, lightPose);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, upoints1, unormals1);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, odf);
|
||||
odf.getPyramidAt(upoints1, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(unormals1, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, upoints1, unormals1);
|
||||
|
||||
normals = unormals1.getMat(af);
|
||||
points = upoints1.getMat(af);
|
||||
@ -576,25 +536,14 @@ void valid_points_test_common_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
AccessFlag af = ACCESS_READ;
|
||||
int anfas, profile;
|
||||
|
||||
OdometryFrame odf(OdometryFrameStoreType::UMAT);
|
||||
odf.setDepth(udepth);
|
||||
OdometryFrame odf(noArray(), udepth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
volume.integrate(depth, poses[0].matrix);
|
||||
else
|
||||
volume.integrate(odf, poses[0].matrix);
|
||||
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
volume.raycast(poses[0].matrix, upoints, unormals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, odf);
|
||||
odf.getPyramidAt(upoints, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(unormals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
volume.raycast(poses[0].matrix, upoints, unormals);
|
||||
|
||||
normals = unormals.getMat(af);
|
||||
points = upoints.getMat(af);
|
||||
@ -604,19 +553,10 @@ void valid_points_test_common_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
if (cvtest::debugLevel > 0)
|
||||
displayImage(depth, points, normals, depthFactor, lightPose);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
volume.raycast(poses[17].matrix, upoints1, unormals1);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[17].matrix, odf);
|
||||
odf.getPyramidAt(upoints1, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(unormals1, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
}
|
||||
volume.raycast(poses[17].matrix, upoints1, unormals1);
|
||||
|
||||
normals = unormals1.getMat(af);
|
||||
points = upoints1.getMat(af);
|
||||
points = upoints1.getMat(af);
|
||||
patchNaNs(points);
|
||||
profile = counterOfValid(points);
|
||||
|
||||
|
@ -625,7 +625,7 @@ TEST_P(RenderedNormals, check)
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(RGBD_Normals, RenderedNormals, ::testing::Combine(::testing::Values(CV_32F, CV_64F),
|
||||
::testing::Values(
|
||||
NormalComputerThresholds { RgbdNormals::RGBD_NORMALS_METHOD_FALS, { 81.8210, 0}},
|
||||
NormalComputerThresholds { RgbdNormals::RGBD_NORMALS_METHOD_FALS, { 81.8213, 0}},
|
||||
NormalComputerThresholds { RgbdNormals::RGBD_NORMALS_METHOD_LINEMOD, { 107.2710, 29168}},
|
||||
NormalComputerThresholds { RgbdNormals::RGBD_NORMALS_METHOD_SRI, { 73.2027, 17693}},
|
||||
NormalComputerThresholds { RgbdNormals::RGBD_NORMALS_METHOD_CROSS_PRODUCT, { 57.9832, 2531}}),
|
||||
|
@ -136,18 +136,17 @@ void OdometryTest::checkUMats()
|
||||
Mat image, depth;
|
||||
readData(image, depth);
|
||||
|
||||
OdometrySettings ods;
|
||||
ods.setCameraMatrix(K);
|
||||
Odometry odometry = Odometry(otype, ods, algtype);
|
||||
OdometryFrame odf = odometry.createOdometryFrame(OdometryFrameStoreType::UMAT);
|
||||
|
||||
Mat calcRt;
|
||||
|
||||
UMat uimage, udepth;
|
||||
image.copyTo(uimage);
|
||||
depth.copyTo(udepth);
|
||||
odf.setImage(uimage);
|
||||
odf.setDepth(udepth);
|
||||
|
||||
OdometrySettings ods;
|
||||
ods.setCameraMatrix(K);
|
||||
Odometry odometry = Odometry(otype, ods, algtype);
|
||||
OdometryFrame odf(uimage, udepth);
|
||||
|
||||
Mat calcRt;
|
||||
|
||||
uimage.release();
|
||||
udepth.release();
|
||||
|
||||
@ -155,7 +154,7 @@ void OdometryTest::checkUMats()
|
||||
bool isComputed = odometry.compute(odf, odf, calcRt);
|
||||
ASSERT_TRUE(isComputed);
|
||||
double diff = cv::norm(calcRt, Mat::eye(4, 4, CV_64FC1));
|
||||
ASSERT_FALSE(diff > idError) << "Incorrect transformation between the same frame (not the identity matrix), diff = " << diff << std::endl;
|
||||
ASSERT_LE(diff, idError) << "Incorrect transformation between the same frame (not the identity matrix)" << std::endl;
|
||||
}
|
||||
|
||||
void OdometryTest::run()
|
||||
@ -167,9 +166,7 @@ void OdometryTest::run()
|
||||
OdometrySettings ods;
|
||||
ods.setCameraMatrix(K);
|
||||
Odometry odometry = Odometry(otype, ods, algtype);
|
||||
OdometryFrame odf = odometry.createOdometryFrame();
|
||||
odf.setImage(image);
|
||||
odf.setDepth(depth);
|
||||
OdometryFrame odf(image, depth);
|
||||
Mat calcRt;
|
||||
|
||||
// 1. Try to find Rt between the same frame (try masks also).
|
||||
@ -180,7 +177,7 @@ void OdometryTest::run()
|
||||
|
||||
ASSERT_TRUE(isComputed) << "Can not find Rt between the same frame" << std::endl;
|
||||
double ndiff = cv::norm(calcRt, Mat::eye(4,4,CV_64FC1));
|
||||
ASSERT_FALSE(ndiff > idError) << "Incorrect transformation between the same frame (not the identity matrix), diff = " << ndiff << std::endl;
|
||||
ASSERT_LE(ndiff, idError) << "Incorrect transformation between the same frame (not the identity matrix)" << std::endl;
|
||||
|
||||
// 2. Generate random rigid body motion in some ranges several times (iterCount).
|
||||
// On each iteration an input frame is warped using generated transformation.
|
||||
@ -202,13 +199,8 @@ void OdometryTest::run()
|
||||
warpFrame(depth, image, noArray(), rt.matrix, K, warpedDepth, warpedImage);
|
||||
dilateFrame(warpedImage, warpedDepth); // due to inaccuracy after warping
|
||||
|
||||
OdometryFrame odfSrc = odometry.createOdometryFrame();
|
||||
OdometryFrame odfDst = odometry.createOdometryFrame();
|
||||
|
||||
odfSrc.setImage(image);
|
||||
odfSrc.setDepth(depth);
|
||||
odfDst.setImage(warpedImage);
|
||||
odfDst.setDepth(warpedDepth);
|
||||
OdometryFrame odfSrc(image, depth);
|
||||
OdometryFrame odfDst(warpedImage, warpedDepth);
|
||||
|
||||
odometry.prepareFrames(odfSrc, odfDst);
|
||||
isComputed = odometry.compute(odfSrc, odfDst, calcRt);
|
||||
@ -235,7 +227,7 @@ void OdometryTest::run()
|
||||
}
|
||||
|
||||
// compare rotation
|
||||
double possibleError = algtype == OdometryAlgoType::COMMON ? 0.11f : 0.015f;
|
||||
double possibleError = algtype == OdometryAlgoType::COMMON ? 0.015f : 0.01f;
|
||||
|
||||
Affine3f src = Affine3f(Vec3f(rvec), Vec3f(tvec));
|
||||
Affine3f res = Affine3f(Vec3f(calcRvec), Vec3f(calcTvec));
|
||||
@ -275,44 +267,178 @@ void OdometryTest::prepareFrameCheck()
|
||||
{
|
||||
Mat K = getCameraMatrix();
|
||||
|
||||
Mat image, depth;
|
||||
readData(image, depth);
|
||||
Mat gtImage, gtDepth;
|
||||
readData(gtImage, gtDepth);
|
||||
OdometrySettings ods;
|
||||
ods.setCameraMatrix(K);
|
||||
Odometry odometry = Odometry(otype, ods, algtype);
|
||||
OdometryFrame odf = odometry.createOdometryFrame();
|
||||
odf.setImage(image);
|
||||
odf.setDepth(depth);
|
||||
OdometryFrame odf(gtImage, gtDepth);
|
||||
|
||||
odometry.prepareFrame(odf);
|
||||
|
||||
Mat points, mask;
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(mask, OdometryFramePyramidType::PYR_MASK, 0);
|
||||
std::vector<int> iters;
|
||||
ods.getIterCounts(iters);
|
||||
size_t nlevels = iters.size();
|
||||
|
||||
OdometryFrame todf = odometry.createOdometryFrame();
|
||||
if (otype != OdometryType::DEPTH)
|
||||
Mat points, mask, depth, gray, rgb, scaled;
|
||||
|
||||
odf.getMask(mask);
|
||||
int masknz = countNonZero(mask);
|
||||
ASSERT_GT(masknz, 0);
|
||||
|
||||
odf.getDepth(depth);
|
||||
Mat patchedDepth = depth.clone();
|
||||
patchNaNs(patchedDepth, 0);
|
||||
int depthnz = countNonZero(patchedDepth);
|
||||
double depthNorm = cv::norm(depth, gtDepth, NORM_INF, mask);
|
||||
ASSERT_LE(depthNorm, 0.0);
|
||||
|
||||
Mat gtGray;
|
||||
if (otype == OdometryType::RGB || otype == OdometryType::RGB_DEPTH)
|
||||
{
|
||||
Mat img;
|
||||
odf.getPyramidAt(img, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
todf.setPyramidLevel(1, OdometryFramePyramidType::PYR_IMAGE);
|
||||
todf.setPyramidAt(img, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
todf.setPyramidLevel(1, OdometryFramePyramidType::PYR_CLOUD);
|
||||
todf.setPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
todf.setPyramidLevel(1, OdometryFramePyramidType::PYR_MASK);
|
||||
todf.setPyramidAt(mask, OdometryFramePyramidType::PYR_MASK, 0);
|
||||
odf.getGrayImage(gray);
|
||||
odf.getImage(rgb);
|
||||
double rgbNorm = cv::norm(rgb, gtImage);
|
||||
ASSERT_LE(rgbNorm, 0.0);
|
||||
|
||||
odometry.prepareFrame(todf);
|
||||
if (gtImage.channels() == 3)
|
||||
cvtColor(gtImage, gtGray, COLOR_BGR2GRAY);
|
||||
else
|
||||
gtGray = gtImage;
|
||||
gtGray.convertTo(gtGray, CV_8U);
|
||||
double grayNorm = cv::norm(gray, gtGray);
|
||||
ASSERT_LE(grayNorm, 0.0);
|
||||
}
|
||||
|
||||
//TODO: remove it when scale issue is fixed
|
||||
odf.getScaledDepth(scaled);
|
||||
int scalednz = countNonZero(scaled);
|
||||
EXPECT_EQ(scalednz, depthnz);
|
||||
|
||||
std::vector<Mat> gtPyrDepth, gtPyrMask;
|
||||
//TODO: this depth calculation would become incorrect when we implement bilateral filtering, fixit
|
||||
buildPyramid(gtDepth, gtPyrDepth, nlevels - 1);
|
||||
for (const auto& gd : gtPyrDepth)
|
||||
{
|
||||
Mat pm = (gd > ods.getMinDepth()) & (gd < ods.getMaxDepth());
|
||||
gtPyrMask.push_back(pm);
|
||||
}
|
||||
|
||||
size_t npyr = odf.getPyramidLevels();
|
||||
ASSERT_EQ(npyr, nlevels);
|
||||
Matx33f levelK = K;
|
||||
for (size_t i = 0; i < nlevels; i++)
|
||||
{
|
||||
Mat depthi, cloudi, maski;
|
||||
|
||||
odf.getPyramidAt(maski, OdometryFramePyramidType::PYR_MASK, i);
|
||||
ASSERT_FALSE(maski.empty());
|
||||
double mnorm = cv::norm(maski, gtPyrMask[i]);
|
||||
EXPECT_LE(mnorm, 16 * 255.0) << "Mask diff is too big at pyr level " << i;
|
||||
|
||||
odf.getPyramidAt(depthi, OdometryFramePyramidType::PYR_DEPTH, i);
|
||||
ASSERT_FALSE(depthi.empty());
|
||||
double dnorm = cv::norm(depthi, gtPyrDepth[i], NORM_INF, maski);
|
||||
EXPECT_LE(dnorm, 8.e-7) << "Depth diff norm is too big at pyr level " << i;
|
||||
|
||||
odf.getPyramidAt(cloudi, OdometryFramePyramidType::PYR_CLOUD, i);
|
||||
ASSERT_FALSE(cloudi.empty());
|
||||
Mat gtCloud;
|
||||
depthTo3d(depthi, levelK, gtCloud);
|
||||
double cnorm = cv::norm(cloudi, gtCloud, NORM_INF, maski);
|
||||
EXPECT_LE(cnorm, 0.0) << "Cloud diff norm is too big at pyr level " << i;
|
||||
// downscale camera matrix for next pyramid level
|
||||
levelK = 0.5f * levelK;
|
||||
levelK(2, 2) = 1.f;
|
||||
}
|
||||
|
||||
if (otype == OdometryType::RGB || otype == OdometryType::RGB_DEPTH)
|
||||
{
|
||||
std::vector<Mat> gtPyrImage;
|
||||
buildPyramid(gtGray, gtPyrImage, nlevels - 1);
|
||||
|
||||
for (size_t i = 0; i < nlevels; i++)
|
||||
{
|
||||
Mat rgbi, texi, dixi, diyi, maski;
|
||||
odf.getPyramidAt(maski, OdometryFramePyramidType::PYR_MASK, i);
|
||||
odf.getPyramidAt(rgbi, OdometryFramePyramidType::PYR_IMAGE, i);
|
||||
ASSERT_FALSE(rgbi.empty());
|
||||
double rnorm = cv::norm(rgbi, gtPyrImage[i], NORM_INF);
|
||||
EXPECT_LE(rnorm, 1.0) << "RGB diff is too big at pyr level " << i;
|
||||
odf.getPyramidAt(texi, OdometryFramePyramidType::PYR_TEXMASK, i);
|
||||
ASSERT_FALSE(texi.empty());
|
||||
int tnz = countNonZero(texi);
|
||||
EXPECT_GE(tnz, 1000) << "Texture mask has too few valid pixels at pyr level " << i;
|
||||
Mat gtDixi, gtDiyi;
|
||||
Sobel(rgbi, gtDixi, CV_16S, 1, 0, ods.getSobelSize());
|
||||
odf.getPyramidAt(dixi, OdometryFramePyramidType::PYR_DIX, i);
|
||||
ASSERT_FALSE(dixi.empty());
|
||||
double dixnorm = cv::norm(dixi, gtDixi, NORM_INF, maski);
|
||||
EXPECT_LE(dixnorm, 0) << "dI/dx diff is too big at pyr level " << i;
|
||||
Sobel(rgbi, gtDiyi, CV_16S, 0, 1, ods.getSobelSize());
|
||||
odf.getPyramidAt(diyi, OdometryFramePyramidType::PYR_DIY, i);
|
||||
ASSERT_FALSE(diyi.empty());
|
||||
double diynorm = cv::norm(diyi, gtDiyi, NORM_INF, maski);
|
||||
EXPECT_LE(diynorm, 0) << "dI/dy diff is too big at pyr level " << i;
|
||||
}
|
||||
}
|
||||
|
||||
if (otype == OdometryType::DEPTH || otype == OdometryType::RGB_DEPTH)
|
||||
{
|
||||
Ptr<RgbdNormals> normalComputer = odometry.getNormalsComputer();
|
||||
ASSERT_FALSE(normalComputer.empty());
|
||||
Mat normals;
|
||||
odf.getNormals(normals);
|
||||
std::vector<Mat> gtPyrNormals;
|
||||
buildPyramid(normals, gtPyrNormals, nlevels - 1);
|
||||
for (size_t i = 0; i < nlevels; i++)
|
||||
{
|
||||
Mat gtNormal = gtPyrNormals[i];
|
||||
CV_Assert(gtNormal.type() == CV_32FC4);
|
||||
for (int y = 0; y < gtNormal.rows; y++)
|
||||
{
|
||||
Vec4f *normals_row = gtNormal.ptr<Vec4f>(y);
|
||||
for (int x = 0; x < gtNormal.cols; x++)
|
||||
{
|
||||
Vec4f n4 = normals_row[x];
|
||||
Point3f n(n4[0], n4[1], n4[2]);
|
||||
double nrm = cv::norm(n);
|
||||
n *= 1.f / nrm;
|
||||
normals_row[x] = Vec4f(n.x, n.y, n.z, 0);
|
||||
}
|
||||
}
|
||||
|
||||
Mat normmaski;
|
||||
odf.getPyramidAt(normmaski, OdometryFramePyramidType::PYR_NORMMASK, i);
|
||||
ASSERT_FALSE(normmaski.empty());
|
||||
int nnm = countNonZero(normmaski);
|
||||
EXPECT_GE(nnm, 1000) << "Normals mask has too few valid pixels at pyr level " << i;
|
||||
|
||||
Mat ptsi;
|
||||
odf.getPyramidAt(ptsi, OdometryFramePyramidType::PYR_CLOUD, i);
|
||||
|
||||
Mat normi;
|
||||
odf.getPyramidAt(normi, OdometryFramePyramidType::PYR_NORM, i);
|
||||
ASSERT_FALSE(normi.empty());
|
||||
double nnorm = cv::norm(normi, gtNormal, NORM_INF, normmaski);
|
||||
EXPECT_LE(nnorm, 1.8e-7) << "Normals diff is too big at pyr level " << i;
|
||||
|
||||
if (i == 0)
|
||||
{
|
||||
double pnnorm = cv::norm(normals, normi, NORM_INF, normmaski);
|
||||
EXPECT_GE(pnnorm, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************************\
|
||||
* Tests registrations *
|
||||
\****************************************************************************************/
|
||||
|
||||
TEST(RGBD_Odometry_Rgbd, algorithmic)
|
||||
TEST(RGBD_Odometry_Rgb, algorithmic)
|
||||
{
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.89);
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.99);
|
||||
test.run();
|
||||
}
|
||||
|
||||
@ -330,14 +456,14 @@ TEST(RGBD_Odometry_RgbdICP, algorithmic)
|
||||
|
||||
TEST(RGBD_Odometry_FastICP, algorithmic)
|
||||
{
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.89, FLT_EPSILON);
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.87, 1.84e-5);
|
||||
test.run();
|
||||
}
|
||||
|
||||
|
||||
TEST(RGBD_Odometry_Rgbd, UMats)
|
||||
TEST(RGBD_Odometry_Rgb, UMats)
|
||||
{
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.89);
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.99);
|
||||
test.checkUMats();
|
||||
}
|
||||
|
||||
@ -355,14 +481,15 @@ TEST(RGBD_Odometry_RgbdICP, UMats)
|
||||
|
||||
TEST(RGBD_Odometry_FastICP, UMats)
|
||||
{
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.89, FLT_EPSILON);
|
||||
// OpenCL version has slightly less accuracy than CPU version
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.99, 1.84e-5);
|
||||
test.checkUMats();
|
||||
}
|
||||
|
||||
|
||||
TEST(RGBD_Odometry_Rgbd, prepareFrame)
|
||||
TEST(RGBD_Odometry_Rgb, prepareFrame)
|
||||
{
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.89);
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.99);
|
||||
test.prepareFrameCheck();
|
||||
}
|
||||
|
||||
@ -380,7 +507,7 @@ TEST(RGBD_Odometry_RgbdICP, prepareFrame)
|
||||
|
||||
TEST(RGBD_Odometry_FastICP, prepareFrame)
|
||||
{
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.89, FLT_EPSILON);
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.99, FLT_EPSILON);
|
||||
test.prepareFrameCheck();
|
||||
}
|
||||
|
||||
|
@ -489,9 +489,7 @@ void normal_test_custom_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
Mat rgb = scene->rgb(poses[0]);
|
||||
Mat points, normals, tmpnormals, colors;
|
||||
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
odf.setImage(rgb);
|
||||
OdometryFrame odf(rgb, depth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
@ -507,21 +505,10 @@ void normal_test_custom_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
|
||||
if (testFunction == VolumeTestFunction::RAYCAST)
|
||||
{
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, odf);
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
odf.getPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals);
|
||||
}
|
||||
else if (testFunction == VolumeTestFunction::FETCH_NORMALS)
|
||||
{
|
||||
@ -568,9 +555,7 @@ void normal_test_common_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
Mat rgb = scene->rgb(poses[0]);
|
||||
Mat points, normals, tmpnormals, colors;
|
||||
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
odf.setImage(rgb);
|
||||
OdometryFrame odf(rgb, depth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
@ -586,21 +571,10 @@ void normal_test_common_framesize(VolumeType volumeType, VolumeTestFunction test
|
||||
|
||||
if (testFunction == VolumeTestFunction::RAYCAST)
|
||||
{
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, points, normals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, odf);
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
odf.getPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, points, normals);
|
||||
}
|
||||
else if (testFunction == VolumeTestFunction::FETCH_NORMALS)
|
||||
{
|
||||
@ -648,9 +622,7 @@ void valid_points_test_custom_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
Mat points, normals, colors, newPoints, newNormals;
|
||||
int anfas, profile;
|
||||
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
odf.setImage(rgb);
|
||||
OdometryFrame odf(rgb, depth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
@ -664,21 +636,10 @@ void valid_points_test_custom_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
volume.integrate(odf, poses[0].matrix);
|
||||
}
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, odf);
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
odf.getPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, frameSize.height, frameSize.width, points, normals);
|
||||
|
||||
patchNaNs(points);
|
||||
anfas = counterOfValid(points);
|
||||
@ -694,21 +655,10 @@ void valid_points_test_custom_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
points.release();
|
||||
normals.release();
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, points, normals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, odf);
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
odf.getPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[17].matrix, frameSize.height, frameSize.width, points, normals);
|
||||
|
||||
patchNaNs(points);
|
||||
profile = counterOfValid(points);
|
||||
@ -748,9 +698,7 @@ void valid_points_test_common_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
Mat points, normals, colors, newPoints, newNormals;
|
||||
int anfas, profile;
|
||||
|
||||
OdometryFrame odf;
|
||||
odf.setDepth(depth);
|
||||
odf.setImage(rgb);
|
||||
OdometryFrame odf(rgb, depth);
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT)
|
||||
{
|
||||
@ -764,21 +712,10 @@ void valid_points_test_common_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
volume.integrate(odf, poses[0].matrix);
|
||||
}
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, points, normals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[0].matrix, odf);
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
odf.getPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[0].matrix, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[0].matrix, points, normals);
|
||||
|
||||
patchNaNs(points);
|
||||
anfas = counterOfValid(points);
|
||||
@ -794,21 +731,10 @@ void valid_points_test_common_framesize(VolumeType volumeType, VolumeTestSrcType
|
||||
points.release();
|
||||
normals.release();
|
||||
|
||||
if (testSrcType == VolumeTestSrcType::MAT) // Odometry frame or Mats
|
||||
{
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[17].matrix, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[17].matrix, points, normals);
|
||||
}
|
||||
else if (testSrcType == VolumeTestSrcType::ODOMETRY_FRAME)
|
||||
{
|
||||
volume.raycast(poses[17].matrix, odf);
|
||||
odf.getPyramidAt(points, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
odf.getPyramidAt(normals, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
odf.getPyramidAt(colors, OdometryFramePyramidType::PYR_IMAGE, 0);
|
||||
}
|
||||
if (volumeType == VolumeType::ColorTSDF)
|
||||
volume.raycast(poses[17].matrix, points, normals, colors);
|
||||
else
|
||||
volume.raycast(poses[17].matrix, points, normals);
|
||||
|
||||
patchNaNs(points);
|
||||
profile = counterOfValid(points);
|
||||
|
Loading…
Reference in New Issue
Block a user