mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
Merge pull request #22925 from savuor:pytsdf_from_scratch
Fixes #22799 Replaces #21559 which was taken as a base Connected PR in contrib: [#3388@contrib](https://github.com/opencv/opencv_contrib/pull/3388) ### Changes OK, now this is more Odometry-related PR than Volume-related. Anyway, * `Volume` class gets wrapped * The same was done for helper classes like `VolumeSettings`, `OdometryFrame` and `OdometrySettings` * `OdometryFrame` constructor signature changed to more convenient where depth goes on 1st place, RGB image on 2nd. This works better for depth-only `Odometry` algorithms. * `OdometryFrame` is checked for amount of pyramid layers inside `Odometry::compute()` * `Odometry` was fully wrapped + more docs added * Added Python tests for `Odometry`, `OdometryFrame` and `Volume` * Added Python sample for `Volume` * Minor fixes including better var names ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
parent
86c6e07326
commit
d49958141e
@ -45,10 +45,9 @@ public:
|
|||||||
|
|
||||||
Submap(int _id, const VolumeSettings& settings, const cv::Affine3f& _pose = cv::Affine3f::Identity(),
|
Submap(int _id, const VolumeSettings& settings, const cv::Affine3f& _pose = cv::Affine3f::Identity(),
|
||||||
int _startFrameId = 0)
|
int _startFrameId = 0)
|
||||||
: id(_id), pose(_pose), cameraPose(Affine3f::Identity()), startFrameId(_startFrameId)
|
: id(_id), pose(_pose), cameraPose(Affine3f::Identity()), startFrameId(_startFrameId),
|
||||||
{
|
volume(VolumeType::HashTSDF, settings)
|
||||||
volume = Volume(VolumeType::HashTSDF, settings);
|
{ }
|
||||||
}
|
|
||||||
virtual ~Submap() = default;
|
virtual ~Submap() = default;
|
||||||
|
|
||||||
virtual void integrate(InputArray _depth, const int currframeId);
|
virtual void integrate(InputArray _depth, const int currframeId);
|
||||||
@ -126,7 +125,7 @@ void Submap<MatType>::raycast(const cv::Affine3f& _cameraPose, cv::Size frameSiz
|
|||||||
|
|
||||||
renderFrame = frame;
|
renderFrame = frame;
|
||||||
|
|
||||||
frame = OdometryFrame(noArray(), pch[2]);
|
frame = OdometryFrame(pch[2]);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -18,7 +18,7 @@ namespace cv
|
|||||||
* @param RGB only rgb image
|
* @param RGB only rgb image
|
||||||
* @param RGB_DEPTH depth and rgb data simultaneously
|
* @param RGB_DEPTH depth and rgb data simultaneously
|
||||||
*/
|
*/
|
||||||
enum OdometryType
|
enum class OdometryType
|
||||||
{
|
{
|
||||||
DEPTH = 0,
|
DEPTH = 0,
|
||||||
RGB = 1,
|
RGB = 1,
|
||||||
@ -39,22 +39,24 @@ class CV_EXPORTS_W Odometry
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CV_WRAP Odometry();
|
CV_WRAP Odometry();
|
||||||
CV_WRAP Odometry(OdometryType otype);
|
CV_WRAP explicit Odometry(OdometryType otype);
|
||||||
Odometry(OdometryType otype, const OdometrySettings settings, OdometryAlgoType algtype);
|
CV_WRAP Odometry(OdometryType otype, const OdometrySettings& settings, OdometryAlgoType algtype);
|
||||||
~Odometry();
|
~Odometry();
|
||||||
|
|
||||||
/** Prepare frame for odometry calculation
|
/** Prepare frame for odometry calculation
|
||||||
* @param frame odometry prepare this frame as src frame and dst frame simultaneously
|
* @param frame odometry prepare this frame as src frame and dst frame simultaneously
|
||||||
*/
|
*/
|
||||||
void prepareFrame(OdometryFrame& frame) const;
|
CV_WRAP void prepareFrame(OdometryFrame& frame) const;
|
||||||
|
|
||||||
/** Prepare frame for odometry calculation
|
/** Prepare frame for odometry calculation
|
||||||
* @param srcFrame frame will be prepared as src frame ("original" image)
|
* @param srcFrame frame will be prepared as src frame ("original" image)
|
||||||
* @param dstFrame frame will be prepared as dsr frame ("rotated" image)
|
* @param dstFrame frame will be prepared as dsr frame ("rotated" image)
|
||||||
*/
|
*/
|
||||||
void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const;
|
CV_WRAP void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const;
|
||||||
|
|
||||||
/** Compute Rigid Transformation between two frames so that Rt * src = dst
|
/** Compute Rigid Transformation between two frames so that Rt * src = dst
|
||||||
|
* Both frames, source and destination, should have been prepared by calling prepareFrame() first
|
||||||
|
*
|
||||||
* @param srcFrame src frame ("original" image)
|
* @param srcFrame src frame ("original" image)
|
||||||
* @param dstFrame dst frame ("rotated" image)
|
* @param dstFrame dst frame ("rotated" image)
|
||||||
* @param Rt Rigid transformation, which will be calculated, in form:
|
* @param Rt Rigid transformation, which will be calculated, in form:
|
||||||
@ -62,18 +64,44 @@ public:
|
|||||||
* R_21 R_22 R_23 t_2
|
* R_21 R_22 R_23 t_2
|
||||||
* R_31 R_32 R_33 t_3
|
* R_31 R_32 R_33 t_3
|
||||||
* 0 0 0 1 }
|
* 0 0 0 1 }
|
||||||
|
* @return true on success, false if failed to find the transformation
|
||||||
*/
|
*/
|
||||||
bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const;
|
CV_WRAP bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const;
|
||||||
|
/**
|
||||||
CV_WRAP bool compute(InputArray srcDepthFrame, InputArray dstDepthFrame, OutputArray Rt) const;
|
* @brief Compute Rigid Transformation between two frames so that Rt * src = dst
|
||||||
CV_WRAP bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame, InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const;
|
*
|
||||||
|
* @param srcDepth source depth ("original" image)
|
||||||
|
* @param dstDepth destination depth ("rotated" image)
|
||||||
|
* @param Rt Rigid transformation, which will be calculated, in form:
|
||||||
|
* { R_11 R_12 R_13 t_1
|
||||||
|
* R_21 R_22 R_23 t_2
|
||||||
|
* R_31 R_32 R_33 t_3
|
||||||
|
* 0 0 0 1 }
|
||||||
|
* @return true on success, false if failed to find the transformation
|
||||||
|
*/
|
||||||
|
CV_WRAP bool compute(InputArray srcDepth, InputArray dstDepth, OutputArray Rt) const;
|
||||||
|
/**
|
||||||
|
* @brief Compute Rigid Transformation between two frames so that Rt * src = dst
|
||||||
|
*
|
||||||
|
* @param srcDepth source depth ("original" image)
|
||||||
|
* @param srcRGB source RGB
|
||||||
|
* @param dstDepth destination depth ("rotated" image)
|
||||||
|
* @param dstRGB destination RGB
|
||||||
|
* @param Rt Rigid transformation, which will be calculated, in form:
|
||||||
|
* { R_11 R_12 R_13 t_1
|
||||||
|
* R_21 R_22 R_23 t_2
|
||||||
|
* R_31 R_32 R_33 t_3
|
||||||
|
* 0 0 0 1 }
|
||||||
|
* @return true on success, false if failed to find the transformation
|
||||||
|
*/
|
||||||
|
CV_WRAP bool compute(InputArray srcDepth, InputArray srcRGB, InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Get the normals computer object used for normals calculation (if presented).
|
* @brief Get the normals computer object used for normals calculation (if presented).
|
||||||
* The normals computer is generated at first need during prepareFrame when normals are required for the ICP algorithm
|
* The normals computer is generated at first need during prepareFrame when normals are required for the ICP algorithm
|
||||||
* but not presented by a user. Re-generated each time the related settings change or a new frame arrives with the different size.
|
* but not presented by a user. Re-generated each time the related settings change or a new frame arrives with the different size.
|
||||||
*/
|
*/
|
||||||
Ptr<RgbdNormals> getNormalsComputer() const;
|
CV_WRAP Ptr<RgbdNormals> getNormalsComputer() const;
|
||||||
|
|
||||||
class Impl;
|
class Impl;
|
||||||
private:
|
private:
|
||||||
|
@ -29,24 +29,24 @@ enum OdometryFramePyramidType
|
|||||||
/**
|
/**
|
||||||
* @brief An object that keeps per-frame data for Odometry algorithms from user-provided images to algorithm-specific precalculated data.
|
* @brief An object that keeps per-frame data for Odometry algorithms from user-provided images to algorithm-specific precalculated data.
|
||||||
* When not empty, it contains a depth image, a mask of valid pixels and a set of pyramids generated from that data.
|
* When not empty, it contains a depth image, a mask of valid pixels and a set of pyramids generated from that data.
|
||||||
* An BGR/Gray image and normals are optional.
|
* A BGR/Gray image and normals are optional.
|
||||||
* OdometryFrame is made to be used together with Odometry class to reuse precalculated data between Rt data calculations.
|
* OdometryFrame is made to be used together with Odometry class to reuse precalculated data between Rt data calculations.
|
||||||
* A proper way to do that is to call Odometry::prepareFrames() on prev and next frames and then pass them to Odometry::compute() method.
|
* A correct way to do that is to call Odometry::prepareFrames() on prev and next frames and then pass them to Odometry::compute() method.
|
||||||
*/
|
*/
|
||||||
class CV_EXPORTS_W OdometryFrame
|
class CV_EXPORTS_W_SIMPLE OdometryFrame
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
* @brief Construct a new OdometryFrame object. All non-empty images should have the same size.
|
* @brief Construct a new OdometryFrame object. All non-empty images should have the same size.
|
||||||
*
|
*
|
||||||
|
* @param depth A depth image, should be CV_8UC1
|
||||||
* @param image An BGR or grayscale image (or noArray() if it's not required for used ICP algorithm).
|
* @param image An BGR or grayscale image (or noArray() if it's not required for used ICP algorithm).
|
||||||
* Should be CV_8UC3 or CV_8C4 if it's BGR image or CV_8UC1 if it's grayscale. If it's BGR then it's converted to grayscale
|
* Should be CV_8UC3 or CV_8C4 if it's BGR image or CV_8UC1 if it's grayscale. If it's BGR then it's converted to grayscale
|
||||||
* image automatically.
|
* image automatically.
|
||||||
* @param depth A depth image, should be CV_8UC1
|
|
||||||
* @param mask A user-provided mask of valid pixels, should be CV_8UC1
|
* @param mask A user-provided mask of valid pixels, should be CV_8UC1
|
||||||
* @param normals A user-provided normals to the depth surface, should be CV_32FC4
|
* @param normals A user-provided normals to the depth surface, should be CV_32FC4
|
||||||
*/
|
*/
|
||||||
OdometryFrame(InputArray image = noArray(), InputArray depth = noArray(), InputArray mask = noArray(), InputArray normals = noArray());
|
CV_WRAP explicit OdometryFrame(InputArray depth = noArray(), InputArray image = noArray(), InputArray mask = noArray(), InputArray normals = noArray());
|
||||||
~OdometryFrame() {};
|
~OdometryFrame() {};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -54,43 +54,43 @@ public:
|
|||||||
*
|
*
|
||||||
* @param image Output image
|
* @param image Output image
|
||||||
*/
|
*/
|
||||||
void getImage(OutputArray image) const;
|
CV_WRAP void getImage(OutputArray image) const;
|
||||||
/**
|
/**
|
||||||
* @brief Get the gray image generated from the user-provided BGR/Gray image
|
* @brief Get the gray image generated from the user-provided BGR/Gray image
|
||||||
*
|
*
|
||||||
* @param image Output image
|
* @param image Output image
|
||||||
*/
|
*/
|
||||||
void getGrayImage(OutputArray image) const;
|
CV_WRAP void getGrayImage(OutputArray image) const;
|
||||||
/**
|
/**
|
||||||
* @brief Get the original user-provided depth image
|
* @brief Get the original user-provided depth image
|
||||||
*
|
*
|
||||||
* @param depth Output image
|
* @param depth Output image
|
||||||
*/
|
*/
|
||||||
void getDepth(OutputArray depth) const;
|
CV_WRAP void getDepth(OutputArray depth) const;
|
||||||
/**
|
/**
|
||||||
* @brief Get the depth image generated from the user-provided one after conversion, rescale or filtering for ICP algorithm needs
|
* @brief Get the depth image generated from the user-provided one after conversion, rescale or filtering for ICP algorithm needs
|
||||||
*
|
*
|
||||||
* @param depth Output image
|
* @param depth Output image
|
||||||
*/
|
*/
|
||||||
void getProcessedDepth(OutputArray depth) const;
|
CV_WRAP void getProcessedDepth(OutputArray depth) const;
|
||||||
/**
|
/**
|
||||||
* @brief Get the valid pixels mask generated for the ICP calculations intersected with the user-provided mask
|
* @brief Get the valid pixels mask generated for the ICP calculations intersected with the user-provided mask
|
||||||
*
|
*
|
||||||
* @param mask Output image
|
* @param mask Output image
|
||||||
*/
|
*/
|
||||||
void getMask(OutputArray mask) const;
|
CV_WRAP void getMask(OutputArray mask) const;
|
||||||
/**
|
/**
|
||||||
* @brief Get the normals image either generated for the ICP calculations or user-provided
|
* @brief Get the normals image either generated for the ICP calculations or user-provided
|
||||||
*
|
*
|
||||||
* @param normals Output image
|
* @param normals Output image
|
||||||
*/
|
*/
|
||||||
void getNormals(OutputArray normals) const;
|
CV_WRAP void getNormals(OutputArray normals) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Get the amount of levels in pyramids (all of them if not empty should have the same number of levels)
|
* @brief Get the amount of levels in pyramids (all of them if not empty should have the same number of levels)
|
||||||
* or 0 if no pyramids were prepared yet
|
* or 0 if no pyramids were prepared yet
|
||||||
*/
|
*/
|
||||||
size_t getPyramidLevels() const;
|
CV_WRAP int getPyramidLevels() const;
|
||||||
/**
|
/**
|
||||||
* @brief Get the image generated for the ICP calculations from one of the pyramids specified by pyrType. Returns empty image if
|
* @brief Get the image generated for the ICP calculations from one of the pyramids specified by pyrType. Returns empty image if
|
||||||
* the pyramid is empty or there's no such pyramid level
|
* the pyramid is empty or there's no such pyramid level
|
||||||
@ -99,7 +99,7 @@ public:
|
|||||||
* @param pyrType Type of pyramid
|
* @param pyrType Type of pyramid
|
||||||
* @param level Level in the pyramid
|
* @param level Level in the pyramid
|
||||||
*/
|
*/
|
||||||
void getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const;
|
CV_WRAP void getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const;
|
||||||
|
|
||||||
class Impl;
|
class Impl;
|
||||||
Ptr<Impl> impl;
|
Ptr<Impl> impl;
|
||||||
|
@ -10,48 +10,50 @@
|
|||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
class CV_EXPORTS_W OdometrySettings
|
class CV_EXPORTS_W_SIMPLE OdometrySettings
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
OdometrySettings();
|
CV_WRAP OdometrySettings();
|
||||||
|
OdometrySettings(const OdometrySettings&);
|
||||||
|
OdometrySettings& operator=(const OdometrySettings&);
|
||||||
~OdometrySettings() {};
|
~OdometrySettings() {};
|
||||||
void setCameraMatrix(InputArray val);
|
CV_WRAP void setCameraMatrix(InputArray val);
|
||||||
void getCameraMatrix(OutputArray val) const;
|
CV_WRAP void getCameraMatrix(OutputArray val) const;
|
||||||
void setIterCounts(InputArray val);
|
CV_WRAP void setIterCounts(InputArray val);
|
||||||
void getIterCounts(OutputArray val) const;
|
CV_WRAP void getIterCounts(OutputArray val) const;
|
||||||
|
|
||||||
void setMinDepth(float val);
|
CV_WRAP void setMinDepth(float val);
|
||||||
float getMinDepth() const;
|
CV_WRAP float getMinDepth() const;
|
||||||
void setMaxDepth(float val);
|
CV_WRAP void setMaxDepth(float val);
|
||||||
float getMaxDepth() const;
|
CV_WRAP float getMaxDepth() const;
|
||||||
void setMaxDepthDiff(float val);
|
CV_WRAP void setMaxDepthDiff(float val);
|
||||||
float getMaxDepthDiff() const;
|
CV_WRAP float getMaxDepthDiff() const;
|
||||||
void setMaxPointsPart(float val);
|
CV_WRAP void setMaxPointsPart(float val);
|
||||||
float getMaxPointsPart() const;
|
CV_WRAP float getMaxPointsPart() const;
|
||||||
|
|
||||||
void setSobelSize(int val);
|
CV_WRAP void setSobelSize(int val);
|
||||||
int getSobelSize() const;
|
CV_WRAP int getSobelSize() const;
|
||||||
void setSobelScale(double val);
|
CV_WRAP void setSobelScale(double val);
|
||||||
double getSobelScale() const;
|
CV_WRAP double getSobelScale() const;
|
||||||
|
|
||||||
void setNormalWinSize(int val);
|
CV_WRAP void setNormalWinSize(int val);
|
||||||
int getNormalWinSize() const;
|
CV_WRAP int getNormalWinSize() const;
|
||||||
void setNormalDiffThreshold(float val);
|
CV_WRAP void setNormalDiffThreshold(float val);
|
||||||
float getNormalDiffThreshold() const;
|
CV_WRAP float getNormalDiffThreshold() const;
|
||||||
void setNormalMethod(RgbdNormals::RgbdNormalsMethod nm);
|
CV_WRAP void setNormalMethod(RgbdNormals::RgbdNormalsMethod nm);
|
||||||
RgbdNormals::RgbdNormalsMethod getNormalMethod() const;
|
CV_WRAP RgbdNormals::RgbdNormalsMethod getNormalMethod() const;
|
||||||
|
|
||||||
void setAngleThreshold(float val);
|
CV_WRAP void setAngleThreshold(float val);
|
||||||
float getAngleThreshold() const;
|
CV_WRAP float getAngleThreshold() const;
|
||||||
void setMaxTranslation(float val);
|
CV_WRAP void setMaxTranslation(float val);
|
||||||
float getMaxTranslation() const;
|
CV_WRAP float getMaxTranslation() const;
|
||||||
void setMaxRotation(float val);
|
CV_WRAP void setMaxRotation(float val);
|
||||||
float getMaxRotation() const;
|
CV_WRAP float getMaxRotation() const;
|
||||||
|
|
||||||
void setMinGradientMagnitude(float val);
|
CV_WRAP void setMinGradientMagnitude(float val);
|
||||||
float getMinGradientMagnitude() const;
|
CV_WRAP float getMinGradientMagnitude() const;
|
||||||
void setMinGradientMagnitudes(InputArray val);
|
CV_WRAP void setMinGradientMagnitudes(InputArray val);
|
||||||
void getMinGradientMagnitudes(OutputArray val) const;
|
CV_WRAP void getMinGradientMagnitudes(OutputArray val) const;
|
||||||
|
|
||||||
class Impl;
|
class Impl;
|
||||||
|
|
||||||
|
@ -15,15 +15,13 @@ namespace cv
|
|||||||
class CV_EXPORTS_W Volume
|
class CV_EXPORTS_W Volume
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/** @brief Constructor of default volume - TSDF.
|
|
||||||
*/
|
|
||||||
Volume();
|
|
||||||
/** @brief Constructor of custom volume.
|
/** @brief Constructor of custom volume.
|
||||||
* @param vtype the volume type [TSDF, HashTSDF, ColorTSDF].
|
* @param vtype the volume type [TSDF, HashTSDF, ColorTSDF].
|
||||||
* @param settings the custom settings for volume.
|
* @param settings the custom settings for volume.
|
||||||
*/
|
*/
|
||||||
Volume(VolumeType vtype, VolumeSettings settings);
|
CV_WRAP explicit Volume(VolumeType vtype = VolumeType::TSDF,
|
||||||
virtual ~Volume();
|
const VolumeSettings& settings = VolumeSettings(VolumeType::TSDF));
|
||||||
|
~Volume();
|
||||||
|
|
||||||
/** @brief Integrates the input data to the volume.
|
/** @brief Integrates the input data to the volume.
|
||||||
|
|
||||||
@ -34,7 +32,7 @@ public:
|
|||||||
This can be done using function registerDepth() from 3d module.
|
This can be done using function registerDepth() from 3d module.
|
||||||
* @param pose the pose of camera in global coordinates.
|
* @param pose the pose of camera in global coordinates.
|
||||||
*/
|
*/
|
||||||
void integrate(const OdometryFrame& frame, InputArray pose);
|
CV_WRAP_AS(integrateFrame) void integrate(const OdometryFrame& frame, InputArray pose);
|
||||||
|
|
||||||
/** @brief Integrates the input data to the volume.
|
/** @brief Integrates the input data to the volume.
|
||||||
|
|
||||||
@ -43,7 +41,7 @@ public:
|
|||||||
* @param depth the depth image.
|
* @param depth the depth image.
|
||||||
* @param pose the pose of camera in global coordinates.
|
* @param pose the pose of camera in global coordinates.
|
||||||
*/
|
*/
|
||||||
void integrate(InputArray depth, InputArray pose);
|
CV_WRAP_AS(integrate) void integrate(InputArray depth, InputArray pose);
|
||||||
|
|
||||||
/** @brief Integrates the input data to the volume.
|
/** @brief Integrates the input data to the volume.
|
||||||
|
|
||||||
@ -55,7 +53,19 @@ public:
|
|||||||
This can be done using function registerDepth() from 3d module.
|
This can be done using function registerDepth() from 3d module.
|
||||||
* @param pose the pose of camera in global coordinates.
|
* @param pose the pose of camera in global coordinates.
|
||||||
*/
|
*/
|
||||||
void integrate(InputArray depth, InputArray image, InputArray pose);
|
CV_WRAP_AS(integrateColor) void integrate(InputArray depth, InputArray image, InputArray pose);
|
||||||
|
|
||||||
|
// Python bindings do not process noArray() default argument correctly, so the raycast() method is repeated several times
|
||||||
|
|
||||||
|
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
||||||
|
|
||||||
|
Rendered image size and camera intrinsics are taken from volume settings structure.
|
||||||
|
|
||||||
|
* @param cameraPose the pose of camera in global coordinates.
|
||||||
|
* @param points image to store rendered points.
|
||||||
|
* @param normals image to store rendered normals corresponding to points.
|
||||||
|
*/
|
||||||
|
CV_WRAP void raycast(InputArray cameraPose, OutputArray points, OutputArray normals) const;
|
||||||
|
|
||||||
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
||||||
|
|
||||||
@ -66,7 +76,21 @@ public:
|
|||||||
* @param normals image to store rendered normals corresponding to points.
|
* @param normals image to store rendered normals corresponding to points.
|
||||||
* @param colors image to store rendered colors corresponding to points (only for ColorTSDF).
|
* @param colors image to store rendered colors corresponding to points (only for ColorTSDF).
|
||||||
*/
|
*/
|
||||||
void raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors = noArray()) const;
|
CV_WRAP_AS(raycastColor) void raycast(InputArray cameraPose, OutputArray points, OutputArray normals, OutputArray colors) const;
|
||||||
|
|
||||||
|
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
||||||
|
|
||||||
|
Rendered image size and camera intrinsics are taken from volume settings structure.
|
||||||
|
|
||||||
|
* @param cameraPose the pose of camera in global coordinates.
|
||||||
|
* @param height the height of result image
|
||||||
|
* @param width the width of result image
|
||||||
|
* @param K camera raycast intrinsics
|
||||||
|
* @param points image to store rendered points.
|
||||||
|
* @param normals image to store rendered normals corresponding to points.
|
||||||
|
*/
|
||||||
|
CV_WRAP_AS(raycastEx) void raycast(InputArray cameraPose, int height, int width, InputArray K, OutputArray points, OutputArray normals) const;
|
||||||
|
|
||||||
|
|
||||||
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
/** @brief Renders the volume contents into an image. The resulting points and normals are in camera's coordinate system.
|
||||||
|
|
||||||
@ -80,60 +104,63 @@ public:
|
|||||||
* @param normals image to store rendered normals corresponding to points.
|
* @param normals image to store rendered normals corresponding to points.
|
||||||
* @param colors image to store rendered colors corresponding to points (only for ColorTSDF).
|
* @param colors image to store rendered colors corresponding to points (only for ColorTSDF).
|
||||||
*/
|
*/
|
||||||
void raycast(InputArray cameraPose, int height, int width, InputArray K, OutputArray points, OutputArray normals, OutputArray colors = noArray()) const;
|
CV_WRAP_AS(raycastExColor) void raycast(InputArray cameraPose, int height, int width, InputArray K, OutputArray points, OutputArray normals, OutputArray colors) const;
|
||||||
|
|
||||||
/** @brief Extract the all data from volume.
|
/** @brief Extract the all data from volume.
|
||||||
* @param points the input exist point.
|
* @param points the input exist point.
|
||||||
* @param normals the storage of normals (corresponding to input points) in the image.
|
* @param normals the storage of normals (corresponding to input points) in the image.
|
||||||
*/
|
*/
|
||||||
void fetchNormals(InputArray points, OutputArray normals) const;
|
CV_WRAP void fetchNormals(InputArray points, OutputArray normals) const;
|
||||||
/** @brief Extract the all data from volume.
|
/** @brief Extract the all data from volume.
|
||||||
* @param points the storage of all points.
|
* @param points the storage of all points.
|
||||||
* @param normals the storage of all normals, corresponding to points.
|
* @param normals the storage of all normals, corresponding to points.
|
||||||
*/
|
*/
|
||||||
void fetchPointsNormals(OutputArray points, OutputArray normals) const;
|
CV_WRAP void fetchPointsNormals(OutputArray points, OutputArray normals) const;
|
||||||
/** @brief Extract the all data from volume.
|
/** @brief Extract the all data from volume.
|
||||||
* @param points the storage of all points.
|
* @param points the storage of all points.
|
||||||
* @param normals the storage of all normals, corresponding to points.
|
* @param normals the storage of all normals, corresponding to points.
|
||||||
* @param colors the storage of all colors, corresponding to points (only for ColorTSDF).
|
* @param colors the storage of all colors, corresponding to points (only for ColorTSDF).
|
||||||
*/
|
*/
|
||||||
void fetchPointsNormalsColors(OutputArray points, OutputArray normals, OutputArray colors) const;
|
CV_WRAP void fetchPointsNormalsColors(OutputArray points, OutputArray normals, OutputArray colors) const;
|
||||||
|
|
||||||
/** @brief clear all data in volume.
|
/** @brief clear all data in volume.
|
||||||
*/
|
*/
|
||||||
void reset();
|
CV_WRAP void reset();
|
||||||
|
|
||||||
//TODO: remove this
|
//TODO: remove this
|
||||||
/** @brief return visible blocks in volume.
|
/** @brief return visible blocks in volume.
|
||||||
*/
|
*/
|
||||||
int getVisibleBlocks() const;
|
CV_WRAP int getVisibleBlocks() const;
|
||||||
|
|
||||||
/** @brief return number of volume units in volume.
|
/** @brief return number of volume units in volume.
|
||||||
*/
|
*/
|
||||||
size_t getTotalVolumeUnits() const;
|
CV_WRAP size_t getTotalVolumeUnits() const;
|
||||||
|
|
||||||
enum BoundingBoxPrecision
|
enum BoundingBoxPrecision
|
||||||
{
|
{
|
||||||
VOLUME_UNIT = 0,
|
VOLUME_UNIT = 0,
|
||||||
VOXEL = 1
|
VOXEL = 1
|
||||||
};
|
};
|
||||||
/** @brief Gets bounding box in volume coordinates with given precision:
|
|
||||||
|
/**
|
||||||
|
* @brief Gets bounding box in volume coordinates with given precision:
|
||||||
* VOLUME_UNIT - up to volume unit
|
* VOLUME_UNIT - up to volume unit
|
||||||
* VOXEL - up to voxel (currently not supported)
|
* VOXEL - up to voxel (currently not supported)
|
||||||
* @return (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
* @param bb 6-float 1d array containing (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
||||||
|
* @param precision bounding box calculation precision
|
||||||
*/
|
*/
|
||||||
virtual Vec6f getBoundingBox(int precision) const;
|
CV_WRAP void getBoundingBox(OutputArray bb, int precision) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Enables or disables new volume unit allocation during integration.
|
* @brief Enables or disables new volume unit allocation during integration.
|
||||||
* Makes sense for HashTSDF only.
|
* Makes sense for HashTSDF only.
|
||||||
*/
|
*/
|
||||||
virtual void setEnableGrowth(bool v);
|
CV_WRAP void setEnableGrowth(bool v);
|
||||||
/**
|
/**
|
||||||
* @brief Returns if new volume units are allocated during integration or not.
|
* @brief Returns if new volume units are allocated during integration or not.
|
||||||
* Makes sense for HashTSDF only.
|
* Makes sense for HashTSDF only.
|
||||||
*/
|
*/
|
||||||
virtual bool getEnableGrowth() const;
|
CV_WRAP bool getEnableGrowth() const;
|
||||||
|
|
||||||
class Impl;
|
class Impl;
|
||||||
private:
|
private:
|
||||||
|
@ -20,126 +20,121 @@ enum class VolumeType
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class CV_EXPORTS_W VolumeSettings
|
class CV_EXPORTS_W_SIMPLE VolumeSettings
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/** @brief Constructor of settings for common TSDF volume type.
|
|
||||||
*/
|
|
||||||
VolumeSettings();
|
|
||||||
|
|
||||||
/** @brief Constructor of settings for custom Volume type.
|
/** @brief Constructor of settings for custom Volume type.
|
||||||
* @param volumeType volume type.
|
* @param volumeType volume type.
|
||||||
*/
|
*/
|
||||||
VolumeSettings(VolumeType volumeType);
|
CV_WRAP explicit VolumeSettings(VolumeType volumeType = VolumeType::TSDF);
|
||||||
|
|
||||||
VolumeSettings(const VolumeSettings& vs);
|
VolumeSettings(const VolumeSettings& vs);
|
||||||
|
VolumeSettings& operator=(const VolumeSettings&);
|
||||||
~VolumeSettings();
|
~VolumeSettings();
|
||||||
|
|
||||||
/** @brief Sets the width of the image for integration.
|
/** @brief Sets the width of the image for integration.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setIntegrateWidth(int val);
|
CV_WRAP void setIntegrateWidth(int val);
|
||||||
|
|
||||||
/** @brief Returns the width of the image for integration.
|
/** @brief Returns the width of the image for integration.
|
||||||
*/
|
*/
|
||||||
int getIntegrateWidth() const;
|
CV_WRAP int getIntegrateWidth() const;
|
||||||
|
|
||||||
/** @brief Sets the height of the image for integration.
|
/** @brief Sets the height of the image for integration.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setIntegrateHeight(int val);
|
CV_WRAP void setIntegrateHeight(int val);
|
||||||
|
|
||||||
/** @brief Returns the height of the image for integration.
|
/** @brief Returns the height of the image for integration.
|
||||||
*/
|
*/
|
||||||
int getIntegrateHeight() const;
|
CV_WRAP int getIntegrateHeight() const;
|
||||||
|
|
||||||
|
|
||||||
/** @brief Sets the width of the raycasted image, used when user does not provide it at raycast() call.
|
/** @brief Sets the width of the raycasted image, used when user does not provide it at raycast() call.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setRaycastWidth(int val);
|
CV_WRAP void setRaycastWidth(int val);
|
||||||
|
|
||||||
/** @brief Returns the width of the raycasted image, used when user does not provide it at raycast() call.
|
/** @brief Returns the width of the raycasted image, used when user does not provide it at raycast() call.
|
||||||
*/
|
*/
|
||||||
int getRaycastWidth() const;
|
CV_WRAP int getRaycastWidth() const;
|
||||||
|
|
||||||
/** @brief Sets the height of the raycasted image, used when user does not provide it at raycast() call.
|
/** @brief Sets the height of the raycasted image, used when user does not provide it at raycast() call.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setRaycastHeight(int val);
|
CV_WRAP void setRaycastHeight(int val);
|
||||||
|
|
||||||
/** @brief Returns the height of the raycasted image, used when user does not provide it at raycast() call.
|
/** @brief Returns the height of the raycasted image, used when user does not provide it at raycast() call.
|
||||||
*/
|
*/
|
||||||
int getRaycastHeight() const;
|
CV_WRAP int getRaycastHeight() const;
|
||||||
|
|
||||||
|
|
||||||
/** @brief Sets depth factor, witch is the number for depth scaling.
|
/** @brief Sets depth factor, witch is the number for depth scaling.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setDepthFactor(float val);
|
CV_WRAP void setDepthFactor(float val);
|
||||||
|
|
||||||
/** @brief Returns depth factor, witch is the number for depth scaling.
|
/** @brief Returns depth factor, witch is the number for depth scaling.
|
||||||
*/
|
*/
|
||||||
float getDepthFactor() const;
|
CV_WRAP float getDepthFactor() const;
|
||||||
|
|
||||||
/** @brief Sets the size of voxel.
|
/** @brief Sets the size of voxel.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setVoxelSize(float val);
|
CV_WRAP void setVoxelSize(float val);
|
||||||
|
|
||||||
/** @brief Returns the size of voxel.
|
/** @brief Returns the size of voxel.
|
||||||
*/
|
*/
|
||||||
float getVoxelSize() const;
|
CV_WRAP float getVoxelSize() const;
|
||||||
|
|
||||||
|
|
||||||
/** @brief Sets TSDF truncation distance. Distances greater than value from surface will be truncated to 1.0.
|
/** @brief Sets TSDF truncation distance. Distances greater than value from surface will be truncated to 1.0.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setTsdfTruncateDistance(float val);
|
CV_WRAP void setTsdfTruncateDistance(float val);
|
||||||
|
|
||||||
/** @brief Returns TSDF truncation distance. Distances greater than value from surface will be truncated to 1.0.
|
/** @brief Returns TSDF truncation distance. Distances greater than value from surface will be truncated to 1.0.
|
||||||
*/
|
*/
|
||||||
float getTsdfTruncateDistance() const;
|
CV_WRAP float getTsdfTruncateDistance() const;
|
||||||
|
|
||||||
/** @brief Sets threshold for depth truncation in meters. Truncates the depth greater than threshold to 0.
|
/** @brief Sets threshold for depth truncation in meters. Truncates the depth greater than threshold to 0.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setMaxDepth(float val);
|
CV_WRAP void setMaxDepth(float val);
|
||||||
|
|
||||||
/** @brief Returns threshold for depth truncation in meters. Truncates the depth greater than threshold to 0.
|
/** @brief Returns threshold for depth truncation in meters. Truncates the depth greater than threshold to 0.
|
||||||
*/
|
*/
|
||||||
float getMaxDepth() const;
|
CV_WRAP float getMaxDepth() const;
|
||||||
|
|
||||||
/** @brief Sets max number of frames to integrate per voxel.
|
/** @brief Sets max number of frames to integrate per voxel.
|
||||||
Represents the max number of frames over which a running average of the TSDF is calculated for a voxel.
|
Represents the max number of frames over which a running average of the TSDF is calculated for a voxel.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setMaxWeight(int val);
|
CV_WRAP void setMaxWeight(int val);
|
||||||
|
|
||||||
/** @brief Returns max number of frames to integrate per voxel.
|
/** @brief Returns max number of frames to integrate per voxel.
|
||||||
Represents the max number of frames over which a running average of the TSDF is calculated for a voxel.
|
Represents the max number of frames over which a running average of the TSDF is calculated for a voxel.
|
||||||
*/
|
*/
|
||||||
int getMaxWeight() const;
|
CV_WRAP int getMaxWeight() const;
|
||||||
|
|
||||||
/** @brief Sets length of single raycast step.
|
/** @brief Sets length of single raycast step.
|
||||||
Describes the percentage of voxel length that is skipped per march.
|
Describes the percentage of voxel length that is skipped per march.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setRaycastStepFactor(float val);
|
CV_WRAP void setRaycastStepFactor(float val);
|
||||||
|
|
||||||
/** @brief Returns length of single raycast step.
|
/** @brief Returns length of single raycast step.
|
||||||
Describes the percentage of voxel length that is skipped per march.
|
Describes the percentage of voxel length that is skipped per march.
|
||||||
*/
|
*/
|
||||||
float getRaycastStepFactor() const;
|
CV_WRAP float getRaycastStepFactor() const;
|
||||||
|
|
||||||
/** @brief Sets volume pose.
|
/** @brief Sets volume pose.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setVolumePose(InputArray val);
|
CV_WRAP void setVolumePose(InputArray val);
|
||||||
|
|
||||||
/** @brief Sets volume pose.
|
/** @brief Sets volume pose.
|
||||||
* @param val output value.
|
* @param val output value.
|
||||||
*/
|
*/
|
||||||
void getVolumePose(OutputArray val) const;
|
CV_WRAP void getVolumePose(OutputArray val) const;
|
||||||
|
|
||||||
/** @brief Resolution of voxel space.
|
/** @brief Resolution of voxel space.
|
||||||
Number of voxels in each dimension.
|
Number of voxels in each dimension.
|
||||||
@ -147,7 +142,7 @@ public:
|
|||||||
HashTSDF volume only supports equal resolution in all three dimensions.
|
HashTSDF volume only supports equal resolution in all three dimensions.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setVolumeResolution(InputArray val);
|
CV_WRAP void setVolumeResolution(InputArray val);
|
||||||
|
|
||||||
/** @brief Resolution of voxel space.
|
/** @brief Resolution of voxel space.
|
||||||
Number of voxels in each dimension.
|
Number of voxels in each dimension.
|
||||||
@ -155,13 +150,13 @@ public:
|
|||||||
HashTSDF volume only supports equal resolution in all three dimensions.
|
HashTSDF volume only supports equal resolution in all three dimensions.
|
||||||
* @param val output value.
|
* @param val output value.
|
||||||
*/
|
*/
|
||||||
void getVolumeResolution(OutputArray val) const;
|
CV_WRAP void getVolumeResolution(OutputArray val) const;
|
||||||
|
|
||||||
/** @brief Returns 3 integers representing strides by x, y and z dimension.
|
/** @brief Returns 3 integers representing strides by x, y and z dimension.
|
||||||
Can be used to iterate over raw volume unit data.
|
Can be used to iterate over raw volume unit data.
|
||||||
* @param val output value.
|
* @param val output value.
|
||||||
*/
|
*/
|
||||||
void getVolumeStrides(OutputArray val) const;
|
CV_WRAP void getVolumeStrides(OutputArray val) const;
|
||||||
|
|
||||||
/** @brief Sets intrinsics of camera for integrations.
|
/** @brief Sets intrinsics of camera for integrations.
|
||||||
* Format of input:
|
* Format of input:
|
||||||
@ -171,7 +166,7 @@ public:
|
|||||||
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setCameraIntegrateIntrinsics(InputArray val);
|
CV_WRAP void setCameraIntegrateIntrinsics(InputArray val);
|
||||||
|
|
||||||
/** @brief Returns intrinsics of camera for integrations.
|
/** @brief Returns intrinsics of camera for integrations.
|
||||||
* Format of output:
|
* Format of output:
|
||||||
@ -181,7 +176,7 @@ public:
|
|||||||
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
||||||
* @param val output value.
|
* @param val output value.
|
||||||
*/
|
*/
|
||||||
void getCameraIntegrateIntrinsics(OutputArray val) const;
|
CV_WRAP void getCameraIntegrateIntrinsics(OutputArray val) const;
|
||||||
|
|
||||||
/** @brief Sets camera intrinsics for raycast image which, used when user does not provide them at raycast() call.
|
/** @brief Sets camera intrinsics for raycast image which, used when user does not provide them at raycast() call.
|
||||||
* Format of input:
|
* Format of input:
|
||||||
@ -191,7 +186,7 @@ public:
|
|||||||
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
||||||
* @param val input value.
|
* @param val input value.
|
||||||
*/
|
*/
|
||||||
void setCameraRaycastIntrinsics(InputArray val);
|
CV_WRAP void setCameraRaycastIntrinsics(InputArray val);
|
||||||
|
|
||||||
/** @brief Returns camera intrinsics for raycast image, used when user does not provide them at raycast() call.
|
/** @brief Returns camera intrinsics for raycast image, used when user does not provide them at raycast() call.
|
||||||
* Format of output:
|
* Format of output:
|
||||||
@ -201,8 +196,7 @@ public:
|
|||||||
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
* where fx and fy are focus points of Ox and Oy axises, and cx and cy are central points of Ox and Oy axises.
|
||||||
* @param val output value.
|
* @param val output value.
|
||||||
*/
|
*/
|
||||||
void getCameraRaycastIntrinsics(OutputArray val) const;
|
CV_WRAP void getCameraRaycastIntrinsics(OutputArray val) const;
|
||||||
|
|
||||||
|
|
||||||
class Impl;
|
class Impl;
|
||||||
private:
|
private:
|
||||||
|
@ -6,7 +6,7 @@ import cv2 as cv
|
|||||||
from tests_common import NewOpenCVTests
|
from tests_common import NewOpenCVTests
|
||||||
|
|
||||||
class odometry_test(NewOpenCVTests):
|
class odometry_test(NewOpenCVTests):
|
||||||
def commonOdometryTest(self, needRgb, otype):
|
def commonOdometryTest(self, needRgb, otype, algoType, useFrame):
|
||||||
depth = self.get_sample('cv/rgbd/depth.png', cv.IMREAD_ANYDEPTH).astype(np.float32)
|
depth = self.get_sample('cv/rgbd/depth.png', cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||||
if needRgb:
|
if needRgb:
|
||||||
rgb = self.get_sample('cv/rgbd/rgb.png', cv.IMREAD_ANYCOLOR)
|
rgb = self.get_sample('cv/rgbd/rgb.png', cv.IMREAD_ANYCOLOR)
|
||||||
@ -25,12 +25,26 @@ class odometry_test(NewOpenCVTests):
|
|||||||
Rt_res = np.zeros((4, 4))
|
Rt_res = np.zeros((4, 4))
|
||||||
|
|
||||||
if otype is not None:
|
if otype is not None:
|
||||||
odometry = cv.Odometry(otype)
|
settings = cv.OdometrySettings()
|
||||||
|
odometry = cv.Odometry(otype, settings, algoType)
|
||||||
else:
|
else:
|
||||||
odometry = cv.Odometry()
|
odometry = cv.Odometry()
|
||||||
|
|
||||||
warped_depth = cv.warpPerspective(depth, Rt_warp, (640, 480))
|
warped_depth = cv.warpPerspective(depth, Rt_warp, (640, 480))
|
||||||
if needRgb:
|
if needRgb:
|
||||||
warped_rgb = cv.warpPerspective(rgb, Rt_warp, (640, 480))
|
warped_rgb = cv.warpPerspective(rgb, Rt_warp, (640, 480))
|
||||||
|
|
||||||
|
if useFrame:
|
||||||
|
if needRgb:
|
||||||
|
srcFrame = cv.OdometryFrame(depth, rgb)
|
||||||
|
dstFrame = cv.OdometryFrame(warped_depth, warped_rgb)
|
||||||
|
else:
|
||||||
|
srcFrame = cv.OdometryFrame(depth)
|
||||||
|
dstFrame = cv.OdometryFrame(warped_depth)
|
||||||
|
odometry.prepareFrames(srcFrame, dstFrame)
|
||||||
|
isCorrect = odometry.compute(srcFrame, dstFrame, Rt_res)
|
||||||
|
else:
|
||||||
|
if needRgb:
|
||||||
isCorrect = odometry.compute(depth, rgb, warped_depth, warped_rgb, Rt_res)
|
isCorrect = odometry.compute(depth, rgb, warped_depth, warped_rgb, Rt_res)
|
||||||
else:
|
else:
|
||||||
isCorrect = odometry.compute(depth, warped_depth, Rt_res)
|
isCorrect = odometry.compute(depth, warped_depth, Rt_res)
|
||||||
@ -41,16 +55,34 @@ class odometry_test(NewOpenCVTests):
|
|||||||
self.assertTrue(isCorrect)
|
self.assertTrue(isCorrect)
|
||||||
|
|
||||||
def test_OdometryDefault(self):
|
def test_OdometryDefault(self):
|
||||||
self.commonOdometryTest(False, None)
|
self.commonOdometryTest(False, None, None, False)
|
||||||
|
|
||||||
|
def test_OdometryDefaultFrame(self):
|
||||||
|
self.commonOdometryTest(False, None, None, True)
|
||||||
|
|
||||||
def test_OdometryDepth(self):
|
def test_OdometryDepth(self):
|
||||||
self.commonOdometryTest(False, cv.DEPTH)
|
self.commonOdometryTest(False, cv.OdometryType_DEPTH, cv.OdometryAlgoType_COMMON, False)
|
||||||
|
|
||||||
|
def test_OdometryDepthFast(self):
|
||||||
|
self.commonOdometryTest(False, cv.OdometryType_DEPTH, cv.OdometryAlgoType_FAST, False)
|
||||||
|
|
||||||
|
def test_OdometryDepthFrame(self):
|
||||||
|
self.commonOdometryTest(False, cv.OdometryType_DEPTH, cv.OdometryAlgoType_COMMON, True)
|
||||||
|
|
||||||
|
def test_OdometryDepthFastFrame(self):
|
||||||
|
self.commonOdometryTest(False, cv.OdometryType_DEPTH, cv.OdometryAlgoType_FAST, True)
|
||||||
|
|
||||||
def test_OdometryRGB(self):
|
def test_OdometryRGB(self):
|
||||||
self.commonOdometryTest(True, cv.RGB)
|
self.commonOdometryTest(True, cv.OdometryType_RGB, cv.OdometryAlgoType_COMMON, False)
|
||||||
|
|
||||||
|
def test_OdometryRGBFrame(self):
|
||||||
|
self.commonOdometryTest(True, cv.OdometryType_RGB, cv.OdometryAlgoType_COMMON, True)
|
||||||
|
|
||||||
def test_OdometryRGB_Depth(self):
|
def test_OdometryRGB_Depth(self):
|
||||||
self.commonOdometryTest(True, cv.RGB_DEPTH)
|
self.commonOdometryTest(True, cv.OdometryType_RGB_DEPTH, cv.OdometryAlgoType_COMMON, False)
|
||||||
|
|
||||||
|
def test_OdometryRGB_DepthFrame(self):
|
||||||
|
self.commonOdometryTest(True, cv.OdometryType_RGB_DEPTH, cv.OdometryAlgoType_COMMON, True)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
NewOpenCVTests.bootstrap()
|
NewOpenCVTests.bootstrap()
|
||||||
|
123
modules/3d/misc/python/test/test_volume.py
Normal file
123
modules/3d/misc/python/test/test_volume.py
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
import numpy as np
|
||||||
|
import cv2 as cv
|
||||||
|
|
||||||
|
from tests_common import NewOpenCVTests
|
||||||
|
|
||||||
|
class volume_test(NewOpenCVTests):
|
||||||
|
def test_VolumeDefault(self):
|
||||||
|
depthPath = 'cv/rgbd/depth.png'
|
||||||
|
depth = self.get_sample(depthPath, cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||||
|
if depth.size <= 0:
|
||||||
|
raise Exception('Failed to load depth file: %s' % depthPath)
|
||||||
|
|
||||||
|
Rt = np.eye(4)
|
||||||
|
volume = cv.Volume()
|
||||||
|
volume.integrate(depth, Rt)
|
||||||
|
|
||||||
|
size = (480, 640, 4)
|
||||||
|
points = np.zeros(size, np.float32)
|
||||||
|
normals = np.zeros(size, np.float32)
|
||||||
|
|
||||||
|
Kraycast = np.array([[525. , 0. , 319.5],
|
||||||
|
[ 0. , 525. , 239.5],
|
||||||
|
[ 0. , 0. , 1. ]])
|
||||||
|
|
||||||
|
volume.raycastEx(Rt, size[0], size[1], Kraycast, points, normals)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
|
||||||
|
volume.raycast(Rt, points, normals)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
|
||||||
|
def test_VolumeTSDF(self):
|
||||||
|
depthPath = 'cv/rgbd/depth.png'
|
||||||
|
depth = self.get_sample(depthPath, cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||||
|
if depth.size <= 0:
|
||||||
|
raise Exception('Failed to load depth file: %s' % depthPath)
|
||||||
|
|
||||||
|
Rt = np.eye(4)
|
||||||
|
|
||||||
|
settings = cv.VolumeSettings(cv.VolumeType_TSDF)
|
||||||
|
volume = cv.Volume(cv.VolumeType_TSDF, settings)
|
||||||
|
volume.integrate(depth, Rt)
|
||||||
|
|
||||||
|
size = (480, 640, 4)
|
||||||
|
points = np.zeros(size, np.float32)
|
||||||
|
normals = np.zeros(size, np.float32)
|
||||||
|
|
||||||
|
Kraycast = settings.getCameraRaycastIntrinsics()
|
||||||
|
volume.raycastEx(Rt, size[0], size[1], Kraycast, points, normals)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
|
||||||
|
volume.raycast(Rt, points, normals)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
|
||||||
|
def test_VolumeHashTSDF(self):
|
||||||
|
depthPath = 'cv/rgbd/depth.png'
|
||||||
|
depth = self.get_sample(depthPath, cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||||
|
if depth.size <= 0:
|
||||||
|
raise Exception('Failed to load depth file: %s' % depthPath)
|
||||||
|
|
||||||
|
Rt = np.eye(4)
|
||||||
|
settings = cv.VolumeSettings(cv.VolumeType_HashTSDF)
|
||||||
|
volume = cv.Volume(cv.VolumeType_HashTSDF, settings)
|
||||||
|
volume.integrate(depth, Rt)
|
||||||
|
|
||||||
|
size = (480, 640, 4)
|
||||||
|
points = np.zeros(size, np.float32)
|
||||||
|
normals = np.zeros(size, np.float32)
|
||||||
|
|
||||||
|
Kraycast = settings.getCameraRaycastIntrinsics()
|
||||||
|
volume.raycastEx(Rt, size[0], size[1], Kraycast, points, normals)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
|
||||||
|
volume.raycast(Rt, points, normals)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
|
||||||
|
def test_VolumeColorTSDF(self):
|
||||||
|
depthPath = 'cv/rgbd/depth.png'
|
||||||
|
rgbPath = 'cv/rgbd/rgb.png'
|
||||||
|
depth = self.get_sample(depthPath, cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||||
|
rgb = self.get_sample(rgbPath, cv.IMREAD_ANYCOLOR).astype(np.float32)
|
||||||
|
|
||||||
|
if depth.size <= 0:
|
||||||
|
raise Exception('Failed to load depth file: %s' % depthPath)
|
||||||
|
if rgb.size <= 0:
|
||||||
|
raise Exception('Failed to load RGB file: %s' % rgbPath)
|
||||||
|
|
||||||
|
Rt = np.eye(4)
|
||||||
|
settings = cv.VolumeSettings(cv.VolumeType_ColorTSDF)
|
||||||
|
volume = cv.Volume(cv.VolumeType_ColorTSDF, settings)
|
||||||
|
volume.integrateColor(depth, rgb, Rt)
|
||||||
|
|
||||||
|
size = (480, 640, 4)
|
||||||
|
points = np.zeros(size, np.float32)
|
||||||
|
normals = np.zeros(size, np.float32)
|
||||||
|
colors = np.zeros(size, np.float32)
|
||||||
|
|
||||||
|
Kraycast = settings.getCameraRaycastIntrinsics()
|
||||||
|
volume.raycastExColor(Rt, size[0], size[1], Kraycast, points, normals, colors)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
|
||||||
|
volume.raycastColor(Rt, points, normals, colors)
|
||||||
|
|
||||||
|
self.assertEqual(points.shape, size)
|
||||||
|
self.assertEqual(points.shape, normals.shape)
|
||||||
|
self.assertEqual(points.shape, colors.shape)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
NewOpenCVTests.bootstrap()
|
@ -584,7 +584,7 @@ PERF_TEST_P_(VolumePerfFixture, integrate)
|
|||||||
UMat urgb, udepth;
|
UMat urgb, udepth;
|
||||||
depth.copyTo(udepth);
|
depth.copyTo(udepth);
|
||||||
rgb.copyTo(urgb);
|
rgb.copyTo(urgb);
|
||||||
OdometryFrame odf(urgb, udepth);
|
OdometryFrame odf(udepth, urgb);
|
||||||
|
|
||||||
bool done = false;
|
bool done = false;
|
||||||
while (repeat1st ? next() : !done)
|
while (repeat1st ? next() : !done)
|
||||||
@ -621,7 +621,7 @@ PERF_TEST_P_(VolumePerfFixture, raycast)
|
|||||||
depth.copyTo(udepth);
|
depth.copyTo(udepth);
|
||||||
rgb.copyTo(urgb);
|
rgb.copyTo(urgb);
|
||||||
|
|
||||||
OdometryFrame odf(urgb, udepth);
|
OdometryFrame odf(udepth, urgb);
|
||||||
|
|
||||||
if (testSrcType == VolumeTestSrcType::MAT)
|
if (testSrcType == VolumeTestSrcType::MAT)
|
||||||
if (volumeType == VolumeType::ColorTSDF)
|
if (volumeType == VolumeType::ColorTSDF)
|
||||||
|
@ -181,7 +181,7 @@ int main(int argc, char** argv)
|
|||||||
{
|
{
|
||||||
Mat gray;
|
Mat gray;
|
||||||
cvtColor(image, gray, COLOR_BGR2GRAY);
|
cvtColor(image, gray, COLOR_BGR2GRAY);
|
||||||
frame_curr = OdometryFrame(gray, depth);
|
frame_curr = OdometryFrame(depth, gray);
|
||||||
|
|
||||||
Mat Rt;
|
Mat Rt;
|
||||||
if(!Rts.empty())
|
if(!Rts.empty())
|
||||||
|
@ -18,9 +18,9 @@ public:
|
|||||||
virtual void prepareFrame(OdometryFrame& frame) const = 0;
|
virtual void prepareFrame(OdometryFrame& frame) const = 0;
|
||||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const = 0;
|
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const = 0;
|
||||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const = 0;
|
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const = 0;
|
||||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const = 0;
|
virtual bool compute(InputArray srcDepth, InputArray dstDepth, OutputArray Rt) const = 0;
|
||||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
virtual bool compute(InputArray srcDepth, InputArray srcRGB,
|
||||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const = 0;
|
InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const = 0;
|
||||||
virtual Ptr<RgbdNormals> getNormalsComputer() const = 0;
|
virtual Ptr<RgbdNormals> getNormalsComputer() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -41,9 +41,9 @@ public:
|
|||||||
virtual void prepareFrame(OdometryFrame& frame) const override;
|
virtual void prepareFrame(OdometryFrame& frame) const override;
|
||||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
||||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
||||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const override;
|
virtual bool compute(InputArray srcDepth, InputArray dstDepth, OutputArray Rt) const override;
|
||||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
virtual bool compute(InputArray srcDepth, InputArray srcRGB,
|
||||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const override;
|
InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const override;
|
||||||
virtual Ptr<RgbdNormals> getNormalsComputer() const override;
|
virtual Ptr<RgbdNormals> getNormalsComputer() const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -67,10 +67,7 @@ bool OdometryICP::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
|
|||||||
Matx33f cameraMatrix;
|
Matx33f cameraMatrix;
|
||||||
settings.getCameraMatrix(cameraMatrix);
|
settings.getCameraMatrix(cameraMatrix);
|
||||||
std::vector<int> iterCounts;
|
std::vector<int> iterCounts;
|
||||||
Mat miterCounts;
|
settings.getIterCounts(iterCounts);
|
||||||
settings.getIterCounts(miterCounts);
|
|
||||||
for (int i = 0; i < miterCounts.size().height; i++)
|
|
||||||
iterCounts.push_back(miterCounts.at<int>(i));
|
|
||||||
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
|
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
|
||||||
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
|
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
|
||||||
iterCounts, this->settings.getMaxTranslation(),
|
iterCounts, this->settings.getMaxTranslation(),
|
||||||
@ -81,8 +78,8 @@ bool OdometryICP::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
|
|||||||
|
|
||||||
bool OdometryICP::compute(InputArray _srcDepth, InputArray _dstDepth, OutputArray Rt) const
|
bool OdometryICP::compute(InputArray _srcDepth, InputArray _dstDepth, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
OdometryFrame srcFrame(noArray(), _srcDepth);
|
OdometryFrame srcFrame(_srcDepth);
|
||||||
OdometryFrame dstFrame(noArray(), _dstDepth);
|
OdometryFrame dstFrame(_dstDepth);
|
||||||
|
|
||||||
prepareICPFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
prepareICPFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
||||||
|
|
||||||
@ -90,13 +87,13 @@ bool OdometryICP::compute(InputArray _srcDepth, InputArray _dstDepth, OutputArra
|
|||||||
return isCorrect;
|
return isCorrect;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OdometryICP::compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
bool OdometryICP::compute(InputArray srcDepth, InputArray srcRGB,
|
||||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const
|
InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
CV_UNUSED(srcDepthFrame);
|
CV_UNUSED(srcDepth);
|
||||||
CV_UNUSED(srcRGBFrame);
|
CV_UNUSED(srcRGB);
|
||||||
CV_UNUSED(dstDepthFrame);
|
CV_UNUSED(dstDepth);
|
||||||
CV_UNUSED(dstRGBFrame);
|
CV_UNUSED(dstRGB);
|
||||||
CV_UNUSED(Rt);
|
CV_UNUSED(Rt);
|
||||||
CV_Error(cv::Error::StsBadFunc, "This odometry does not work with rgb data");
|
CV_Error(cv::Error::StsBadFunc, "This odometry does not work with rgb data");
|
||||||
}
|
}
|
||||||
@ -114,9 +111,9 @@ public:
|
|||||||
virtual void prepareFrame(OdometryFrame& frame) const override;
|
virtual void prepareFrame(OdometryFrame& frame) const override;
|
||||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
||||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
||||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const override;
|
virtual bool compute(InputArray srcDepth, InputArray dstDepth, OutputArray Rt) const override;
|
||||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
virtual bool compute(InputArray srcDepth, InputArray srcRGB,
|
||||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const override;
|
InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const override;
|
||||||
virtual Ptr<RgbdNormals> getNormalsComputer() const override { return Ptr<RgbdNormals>(); }
|
virtual Ptr<RgbdNormals> getNormalsComputer() const override { return Ptr<RgbdNormals>(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -136,11 +133,7 @@ bool OdometryRGB::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
|
|||||||
Matx33f cameraMatrix;
|
Matx33f cameraMatrix;
|
||||||
settings.getCameraMatrix(cameraMatrix);
|
settings.getCameraMatrix(cameraMatrix);
|
||||||
std::vector<int> iterCounts;
|
std::vector<int> iterCounts;
|
||||||
Mat miterCounts;
|
settings.getIterCounts(iterCounts);
|
||||||
settings.getIterCounts(miterCounts);
|
|
||||||
CV_CheckTypeEQ(miterCounts.type(), CV_32S, "");
|
|
||||||
for (int i = 0; i < miterCounts.size().height; i++)
|
|
||||||
iterCounts.push_back(miterCounts.at<int>(i));
|
|
||||||
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
|
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
|
||||||
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
|
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
|
||||||
iterCounts, this->settings.getMaxTranslation(),
|
iterCounts, this->settings.getMaxTranslation(),
|
||||||
@ -149,18 +142,18 @@ bool OdometryRGB::compute(const OdometryFrame& srcFrame, const OdometryFrame& ds
|
|||||||
return isCorrect;
|
return isCorrect;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OdometryRGB::compute(InputArray _srcImage, InputArray _dstImage, OutputArray Rt) const
|
bool OdometryRGB::compute(InputArray _srcDepth, InputArray _dstDepth, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
CV_UNUSED(_srcImage);
|
CV_UNUSED(_srcDepth);
|
||||||
CV_UNUSED(_dstImage);
|
CV_UNUSED(_dstDepth);
|
||||||
CV_UNUSED(Rt);
|
CV_UNUSED(Rt);
|
||||||
CV_Error(cv::Error::StsBadFunc, "This odometry algorithm requires depth and rgb data simultaneously");
|
CV_Error(cv::Error::StsBadFunc, "This odometry algorithm requires depth and rgb data simultaneously");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OdometryRGB::compute(InputArray srcDepthFrame, InputArray srcRGBFrame, InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const
|
bool OdometryRGB::compute(InputArray srcDepth, InputArray srcRGB, InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
OdometryFrame srcFrame(srcRGBFrame, srcDepthFrame);
|
OdometryFrame srcFrame(srcDepth, srcRGB);
|
||||||
OdometryFrame dstFrame(dstRGBFrame, dstDepthFrame);
|
OdometryFrame dstFrame(dstDepth, dstRGB);
|
||||||
|
|
||||||
prepareRGBFrame(srcFrame, dstFrame, this->settings);
|
prepareRGBFrame(srcFrame, dstFrame, this->settings);
|
||||||
|
|
||||||
@ -181,9 +174,9 @@ public:
|
|||||||
virtual void prepareFrame(OdometryFrame& frame) const override;
|
virtual void prepareFrame(OdometryFrame& frame) const override;
|
||||||
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
virtual void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame) const override;
|
||||||
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
virtual bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt) const override;
|
||||||
virtual bool compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const override;
|
virtual bool compute(InputArray srcDepth, InputArray dstDepth, OutputArray Rt) const override;
|
||||||
virtual bool compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
virtual bool compute(InputArray srcDepth, InputArray srcRGB,
|
||||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const override;
|
InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const override;
|
||||||
virtual Ptr<RgbdNormals> getNormalsComputer() const override;
|
virtual Ptr<RgbdNormals> getNormalsComputer() const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -207,10 +200,7 @@ bool OdometryRGBD::compute(const OdometryFrame& srcFrame, const OdometryFrame& d
|
|||||||
Matx33f cameraMatrix;
|
Matx33f cameraMatrix;
|
||||||
settings.getCameraMatrix(cameraMatrix);
|
settings.getCameraMatrix(cameraMatrix);
|
||||||
std::vector<int> iterCounts;
|
std::vector<int> iterCounts;
|
||||||
Mat miterCounts;
|
settings.getIterCounts(iterCounts);
|
||||||
settings.getIterCounts(miterCounts);
|
|
||||||
for (int i = 0; i < miterCounts.size().height; i++)
|
|
||||||
iterCounts.push_back(miterCounts.at<int>(i));
|
|
||||||
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
|
bool isCorrect = RGBDICPOdometryImpl(Rt, Mat(), srcFrame, dstFrame, cameraMatrix,
|
||||||
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
|
this->settings.getMaxDepthDiff(), this->settings.getAngleThreshold(),
|
||||||
iterCounts, this->settings.getMaxTranslation(),
|
iterCounts, this->settings.getMaxTranslation(),
|
||||||
@ -219,19 +209,19 @@ bool OdometryRGBD::compute(const OdometryFrame& srcFrame, const OdometryFrame& d
|
|||||||
return isCorrect;
|
return isCorrect;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OdometryRGBD::compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const
|
bool OdometryRGBD::compute(InputArray srcDepth, InputArray dstDepth, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
CV_UNUSED(srcFrame);
|
CV_UNUSED(srcDepth);
|
||||||
CV_UNUSED(dstFrame);
|
CV_UNUSED(dstDepth);
|
||||||
CV_UNUSED(Rt);
|
CV_UNUSED(Rt);
|
||||||
CV_Error(cv::Error::StsBadFunc, "This odometry algorithm needs depth and rgb data simultaneously");
|
CV_Error(cv::Error::StsBadFunc, "This odometry algorithm needs depth and rgb data simultaneously");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OdometryRGBD::compute(InputArray _srcDepthFrame, InputArray _srcRGBFrame,
|
bool OdometryRGBD::compute(InputArray _srcDepth, InputArray _srcRGB,
|
||||||
InputArray _dstDepthFrame, InputArray _dstRGBFrame, OutputArray Rt) const
|
InputArray _dstDepth, InputArray _dstRGB, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
OdometryFrame srcFrame(_srcRGBFrame, _srcDepthFrame);
|
OdometryFrame srcFrame(_srcDepth, _srcRGB);
|
||||||
OdometryFrame dstFrame(_dstRGBFrame, _dstDepthFrame);
|
OdometryFrame dstFrame(_dstDepth, _dstRGB);
|
||||||
|
|
||||||
prepareRGBDFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
prepareRGBDFrame(srcFrame, dstFrame, this->normalsComputer, this->settings, this->algtype);
|
||||||
bool isCorrect = compute(srcFrame, dstFrame, Rt);
|
bool isCorrect = compute(srcFrame, dstFrame, Rt);
|
||||||
@ -266,7 +256,7 @@ Odometry::Odometry(OdometryType otype)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Odometry::Odometry(OdometryType otype, OdometrySettings settings, OdometryAlgoType algtype)
|
Odometry::Odometry(OdometryType otype, const OdometrySettings& settings, OdometryAlgoType algtype)
|
||||||
{
|
{
|
||||||
switch (otype)
|
switch (otype)
|
||||||
{
|
{
|
||||||
@ -305,15 +295,15 @@ bool Odometry::compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFr
|
|||||||
return this->impl->compute(srcFrame, dstFrame, Rt);
|
return this->impl->compute(srcFrame, dstFrame, Rt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Odometry::compute(InputArray srcFrame, InputArray dstFrame, OutputArray Rt) const
|
bool Odometry::compute(InputArray srcDepth, InputArray dstDepth, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
return this->impl->compute(srcFrame, dstFrame, Rt);
|
return this->impl->compute(srcDepth, dstDepth, Rt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Odometry::compute(InputArray srcDepthFrame, InputArray srcRGBFrame,
|
bool Odometry::compute(InputArray srcDepth, InputArray srcRGB,
|
||||||
InputArray dstDepthFrame, InputArray dstRGBFrame, OutputArray Rt) const
|
InputArray dstDepth, InputArray dstRGB, OutputArray Rt) const
|
||||||
{
|
{
|
||||||
return this->impl->compute(srcDepthFrame, srcRGBFrame, dstDepthFrame, dstRGBFrame, Rt);
|
return this->impl->compute(srcDepth, srcRGB, dstDepth, dstRGB, Rt);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<RgbdNormals> Odometry::getNormalsComputer() const
|
Ptr<RgbdNormals> Odometry::getNormalsComputer() const
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
|
|
||||||
OdometryFrame::OdometryFrame(InputArray image, InputArray depth, InputArray mask, InputArray normals)
|
OdometryFrame::OdometryFrame(InputArray depth, InputArray image, InputArray mask, InputArray normals)
|
||||||
{
|
{
|
||||||
this->impl = makePtr<OdometryFrame::Impl>();
|
this->impl = makePtr<OdometryFrame::Impl>();
|
||||||
if (!image.empty())
|
if (!image.empty())
|
||||||
@ -39,7 +39,7 @@ void OdometryFrame::getProcessedDepth(OutputArray depth) const { this->impl->get
|
|||||||
void OdometryFrame::getMask(OutputArray mask) const { this->impl->getMask(mask); }
|
void OdometryFrame::getMask(OutputArray mask) const { this->impl->getMask(mask); }
|
||||||
void OdometryFrame::getNormals(OutputArray normals) const { this->impl->getNormals(normals); }
|
void OdometryFrame::getNormals(OutputArray normals) const { this->impl->getNormals(normals); }
|
||||||
|
|
||||||
size_t OdometryFrame::getPyramidLevels() const { return this->impl->getPyramidLevels(); }
|
int OdometryFrame::getPyramidLevels() const { return this->impl->getPyramidLevels(); }
|
||||||
|
|
||||||
void OdometryFrame::getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const
|
void OdometryFrame::getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const
|
||||||
{
|
{
|
||||||
@ -76,13 +76,13 @@ void OdometryFrame::Impl::getNormals(OutputArray _normals) const
|
|||||||
_normals.assign(this->normals);
|
_normals.assign(this->normals);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t OdometryFrame::Impl::getPyramidLevels() const
|
int OdometryFrame::Impl::getPyramidLevels() const
|
||||||
{
|
{
|
||||||
// all pyramids should have the same size
|
// all pyramids should have the same size
|
||||||
for (const auto& p : this->pyramids)
|
for (const auto& p : this->pyramids)
|
||||||
{
|
{
|
||||||
if (!p.empty())
|
if (!p.empty())
|
||||||
return p.size();
|
return (int)(p.size());
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -489,13 +489,18 @@ void prepareRGBDFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, Ptr<Rgbd
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
|
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
|
||||||
const OdometryFrame srcFrame,
|
const OdometryFrame& srcFrame,
|
||||||
const OdometryFrame dstFrame,
|
const OdometryFrame& dstFrame,
|
||||||
const Matx33f& cameraMatrix,
|
const Matx33f& cameraMatrix,
|
||||||
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
|
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
|
||||||
double maxTranslation, double maxRotation, double sobelScale,
|
double maxTranslation, double maxRotation, double sobelScale,
|
||||||
OdometryType method, OdometryTransformType transformType, OdometryAlgoType algtype)
|
OdometryType method, OdometryTransformType transformType, OdometryAlgoType algtype)
|
||||||
{
|
{
|
||||||
|
if (srcFrame.getPyramidLevels() != (int)(iterCounts.size()))
|
||||||
|
CV_Error(Error::StsBadArg, "srcFrame has incorrect number of pyramid levels. Did you forget to call prepareFrame()?");
|
||||||
|
if (dstFrame.getPyramidLevels() != (int)(iterCounts.size()))
|
||||||
|
CV_Error(Error::StsBadArg, "dstFrame has incorrect number of pyramid levels. Did you forget to call prepareFrame()?");
|
||||||
|
|
||||||
int transformDim = getTransformDim(transformType);
|
int transformDim = getTransformDim(transformType);
|
||||||
|
|
||||||
const int minOverdetermScale = 20;
|
const int minOverdetermScale = 20;
|
||||||
|
@ -159,8 +159,8 @@ void prepareRGBFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, OdometryS
|
|||||||
void prepareICPFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, Ptr<RgbdNormals>& normalsComputer, const OdometrySettings settings, OdometryAlgoType algtype);
|
void prepareICPFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, Ptr<RgbdNormals>& normalsComputer, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||||
|
|
||||||
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
|
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
|
||||||
const OdometryFrame srcFrame,
|
const OdometryFrame& srcFrame,
|
||||||
const OdometryFrame dstFrame,
|
const OdometryFrame& dstFrame,
|
||||||
const Matx33f& cameraMatrix,
|
const Matx33f& cameraMatrix,
|
||||||
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
|
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
|
||||||
double maxTranslation, double maxRotation, double sobelScale,
|
double maxTranslation, double maxRotation, double sobelScale,
|
||||||
|
@ -157,6 +157,17 @@ OdometrySettings::OdometrySettings()
|
|||||||
this->impl = makePtr<OdometrySettingsImplCommon>();
|
this->impl = makePtr<OdometrySettingsImplCommon>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
OdometrySettings::OdometrySettings(const OdometrySettings& s)
|
||||||
|
{
|
||||||
|
this->impl = makePtr<OdometrySettingsImplCommon>(*s.impl.dynamicCast<OdometrySettingsImplCommon>());
|
||||||
|
}
|
||||||
|
|
||||||
|
OdometrySettings& OdometrySettings::operator=(const OdometrySettings& s)
|
||||||
|
{
|
||||||
|
this->impl = makePtr<OdometrySettingsImplCommon>(*s.impl.dynamicCast<OdometrySettingsImplCommon>());
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
void OdometrySettings::setCameraMatrix(InputArray val) { this->impl->setCameraMatrix(val); }
|
void OdometrySettings::setCameraMatrix(InputArray val) { this->impl->setCameraMatrix(val); }
|
||||||
void OdometrySettings::getCameraMatrix(OutputArray val) const { this->impl->getCameraMatrix(val); }
|
void OdometrySettings::getCameraMatrix(OutputArray val) const { this->impl->getCameraMatrix(val); }
|
||||||
void OdometrySettings::setIterCounts(InputArray val) { this->impl->setIterCounts(val); }
|
void OdometrySettings::setIterCounts(InputArray val) { this->impl->setIterCounts(val); }
|
||||||
|
@ -251,7 +251,7 @@ public:
|
|||||||
virtual void getMask(OutputArray mask) const ;
|
virtual void getMask(OutputArray mask) const ;
|
||||||
virtual void getNormals(OutputArray normals) const ;
|
virtual void getNormals(OutputArray normals) const ;
|
||||||
|
|
||||||
virtual size_t getPyramidLevels() const ;
|
virtual int getPyramidLevels() const ;
|
||||||
|
|
||||||
virtual void getPyramidAt(OutputArray img,
|
virtual void getPyramidAt(OutputArray img,
|
||||||
OdometryFramePyramidType pyrType, size_t level) const ;
|
OdometryFramePyramidType pyrType, size_t level) const ;
|
||||||
|
@ -179,7 +179,7 @@ int TsdfVolume::getVisibleBlocks() const { return 1; }
|
|||||||
size_t TsdfVolume::getTotalVolumeUnits() const { return 1; }
|
size_t TsdfVolume::getTotalVolumeUnits() const { return 1; }
|
||||||
|
|
||||||
|
|
||||||
Vec6f TsdfVolume::getBoundingBox(int precision) const
|
void TsdfVolume::getBoundingBox(OutputArray bb, int precision) const
|
||||||
{
|
{
|
||||||
if (precision == Volume::BoundingBoxPrecision::VOXEL)
|
if (precision == Volume::BoundingBoxPrecision::VOXEL)
|
||||||
{
|
{
|
||||||
@ -191,7 +191,7 @@ Vec6f TsdfVolume::getBoundingBox(int precision) const
|
|||||||
Vec3f res;
|
Vec3f res;
|
||||||
this->settings.getVolumeResolution(res);
|
this->settings.getVolumeResolution(res);
|
||||||
Vec3f volSize = res * sz;
|
Vec3f volSize = res * sz;
|
||||||
return Vec6f(0, 0, 0, volSize[0], volSize[1], volSize[2]);
|
Vec6f(0, 0, 0, volSize[0], volSize[1], volSize[2]).copyTo(bb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -403,7 +403,7 @@ bool HashTsdfVolume::getEnableGrowth() const
|
|||||||
return enableGrowth;
|
return enableGrowth;
|
||||||
}
|
}
|
||||||
|
|
||||||
Vec6f HashTsdfVolume::getBoundingBox(int precision) const
|
void HashTsdfVolume::getBoundingBox(OutputArray boundingBox, int precision) const
|
||||||
{
|
{
|
||||||
if (precision == Volume::BoundingBoxPrecision::VOXEL)
|
if (precision == Volume::BoundingBoxPrecision::VOXEL)
|
||||||
{
|
{
|
||||||
@ -442,7 +442,7 @@ Vec6f HashTsdfVolume::getBoundingBox(int precision) const
|
|||||||
|
|
||||||
if (vi.empty())
|
if (vi.empty())
|
||||||
{
|
{
|
||||||
return Vec6f();
|
boundingBox.setZero();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -474,7 +474,7 @@ Vec6f HashTsdfVolume::getBoundingBox(int precision) const
|
|||||||
bb[5] = max(bb[5], pg.z);
|
bb[5] = max(bb[5], pg.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
return bb;
|
bb.copyTo(boundingBox);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -569,7 +569,7 @@ void ColorTsdfVolume::reset()
|
|||||||
int ColorTsdfVolume::getVisibleBlocks() const { return 1; }
|
int ColorTsdfVolume::getVisibleBlocks() const { return 1; }
|
||||||
size_t ColorTsdfVolume::getTotalVolumeUnits() const { return 1; }
|
size_t ColorTsdfVolume::getTotalVolumeUnits() const { return 1; }
|
||||||
|
|
||||||
Vec6f ColorTsdfVolume::getBoundingBox(int precision) const
|
void ColorTsdfVolume::getBoundingBox(OutputArray bb, int precision) const
|
||||||
{
|
{
|
||||||
if (precision == Volume::BoundingBoxPrecision::VOXEL)
|
if (precision == Volume::BoundingBoxPrecision::VOXEL)
|
||||||
{
|
{
|
||||||
@ -581,7 +581,7 @@ Vec6f ColorTsdfVolume::getBoundingBox(int precision) const
|
|||||||
Vec3f res;
|
Vec3f res;
|
||||||
this->settings.getVolumeResolution(res);
|
this->settings.getVolumeResolution(res);
|
||||||
Vec3f volSize = res * sz;
|
Vec3f volSize = res * sz;
|
||||||
return Vec6f(0, 0, 0, volSize[0], volSize[1], volSize[2]);
|
Vec6f(0, 0, 0, volSize[0], volSize[1], volSize[2]).copyTo(bb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ public:
|
|||||||
virtual int getVisibleBlocks() const = 0;
|
virtual int getVisibleBlocks() const = 0;
|
||||||
virtual size_t getTotalVolumeUnits() const = 0;
|
virtual size_t getTotalVolumeUnits() const = 0;
|
||||||
|
|
||||||
virtual Vec6f getBoundingBox(int precision) const = 0;
|
virtual void getBoundingBox(OutputArray bb, int precision) const = 0;
|
||||||
virtual void setEnableGrowth(bool v) = 0;
|
virtual void setEnableGrowth(bool v) = 0;
|
||||||
virtual bool getEnableGrowth() const = 0;
|
virtual bool getEnableGrowth() const = 0;
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ public:
|
|||||||
// VOLUME_UNIT - up to volume unit
|
// VOLUME_UNIT - up to volume unit
|
||||||
// VOXEL - up to voxel
|
// VOXEL - up to voxel
|
||||||
// returns (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
// returns (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
||||||
virtual Vec6f getBoundingBox(int precision) const override;
|
virtual void getBoundingBox(OutputArray bb, int precision) const override;
|
||||||
|
|
||||||
// Enabels or disables new volume unit allocation during integration
|
// Enabels or disables new volume unit allocation during integration
|
||||||
// Applicable for HashTSDF only
|
// Applicable for HashTSDF only
|
||||||
@ -134,7 +134,7 @@ public:
|
|||||||
// VOLUME_UNIT - up to volume unit
|
// VOLUME_UNIT - up to volume unit
|
||||||
// VOXEL - up to voxel
|
// VOXEL - up to voxel
|
||||||
// returns (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
// returns (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
||||||
virtual Vec6f getBoundingBox(int precision) const override;
|
virtual void getBoundingBox(OutputArray bb, int precision) const override;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
int lastVolIndex;
|
int lastVolIndex;
|
||||||
@ -191,7 +191,7 @@ public:
|
|||||||
// VOLUME_UNIT - up to volume unit
|
// VOLUME_UNIT - up to volume unit
|
||||||
// VOXEL - up to voxel
|
// VOXEL - up to voxel
|
||||||
// returns (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
// returns (min_x, min_y, min_z, max_x, max_y, max_z) in volume coordinates
|
||||||
virtual Vec6f getBoundingBox(int precision) const override;
|
virtual void getBoundingBox(OutputArray bb, int precision) const override;
|
||||||
|
|
||||||
// Enabels or disables new volume unit allocation during integration
|
// Enabels or disables new volume unit allocation during integration
|
||||||
// Applicable for HashTSDF only
|
// Applicable for HashTSDF only
|
||||||
@ -211,12 +211,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
Volume::Volume()
|
Volume::Volume(VolumeType vtype, const VolumeSettings& settings)
|
||||||
{
|
|
||||||
VolumeSettings settings;
|
|
||||||
this->impl = makePtr<TsdfVolume>(settings);
|
|
||||||
}
|
|
||||||
Volume::Volume(VolumeType vtype, VolumeSettings settings)
|
|
||||||
{
|
{
|
||||||
switch (vtype)
|
switch (vtype)
|
||||||
{
|
{
|
||||||
@ -239,8 +234,11 @@ Volume::~Volume() {}
|
|||||||
void Volume::integrate(const OdometryFrame& frame, InputArray pose) { this->impl->integrate(frame, pose); }
|
void Volume::integrate(const OdometryFrame& frame, InputArray pose) { this->impl->integrate(frame, pose); }
|
||||||
void Volume::integrate(InputArray depth, InputArray pose) { this->impl->integrate(depth, pose); }
|
void Volume::integrate(InputArray depth, InputArray pose) { this->impl->integrate(depth, pose); }
|
||||||
void Volume::integrate(InputArray depth, InputArray image, InputArray pose) { this->impl->integrate(depth, image, pose); }
|
void Volume::integrate(InputArray depth, InputArray image, InputArray pose) { this->impl->integrate(depth, image, pose); }
|
||||||
|
void Volume::raycast(InputArray cameraPose, OutputArray _points, OutputArray _normals) const { this->impl->raycast(cameraPose, _points, _normals, noArray()); }
|
||||||
void Volume::raycast(InputArray cameraPose, OutputArray _points, OutputArray _normals, OutputArray _colors) const { this->impl->raycast(cameraPose, _points, _normals, _colors); }
|
void Volume::raycast(InputArray cameraPose, OutputArray _points, OutputArray _normals, OutputArray _colors) const { this->impl->raycast(cameraPose, _points, _normals, _colors); }
|
||||||
|
void Volume::raycast(InputArray cameraPose, int height, int width, InputArray _intr, OutputArray _points, OutputArray _normals) const { this->impl->raycast(cameraPose, height, width, _intr, _points, _normals, noArray()); }
|
||||||
void Volume::raycast(InputArray cameraPose, int height, int width, InputArray _intr, OutputArray _points, OutputArray _normals, OutputArray _colors) const { this->impl->raycast(cameraPose, height, width, _intr, _points, _normals, _colors); }
|
void Volume::raycast(InputArray cameraPose, int height, int width, InputArray _intr, OutputArray _points, OutputArray _normals, OutputArray _colors) const { this->impl->raycast(cameraPose, height, width, _intr, _points, _normals, _colors); }
|
||||||
|
|
||||||
void Volume::fetchNormals(InputArray points, OutputArray normals) const { this->impl->fetchNormals(points, normals); }
|
void Volume::fetchNormals(InputArray points, OutputArray normals) const { this->impl->fetchNormals(points, normals); }
|
||||||
void Volume::fetchPointsNormals(OutputArray points, OutputArray normals) const { this->impl->fetchPointsNormals(points, normals); }
|
void Volume::fetchPointsNormals(OutputArray points, OutputArray normals) const { this->impl->fetchPointsNormals(points, normals); }
|
||||||
void Volume::fetchPointsNormalsColors(OutputArray points, OutputArray normals, OutputArray colors) const { this->impl->fetchPointsNormalsColors(points, normals, colors); };
|
void Volume::fetchPointsNormalsColors(OutputArray points, OutputArray normals, OutputArray colors) const { this->impl->fetchPointsNormalsColors(points, normals, colors); };
|
||||||
@ -249,7 +247,7 @@ void Volume::reset() { this->impl->reset(); }
|
|||||||
int Volume::getVisibleBlocks() const { return this->impl->getVisibleBlocks(); }
|
int Volume::getVisibleBlocks() const { return this->impl->getVisibleBlocks(); }
|
||||||
size_t Volume::getTotalVolumeUnits() const { return this->impl->getTotalVolumeUnits(); }
|
size_t Volume::getTotalVolumeUnits() const { return this->impl->getTotalVolumeUnits(); }
|
||||||
|
|
||||||
Vec6f Volume::getBoundingBox(int precision) const { return this->impl->getBoundingBox(precision); }
|
void Volume::getBoundingBox(OutputArray bb, int precision) const { this->impl->getBoundingBox(bb, precision); }
|
||||||
void Volume::setEnableGrowth(bool v) { this->impl->setEnableGrowth(v); }
|
void Volume::setEnableGrowth(bool v) { this->impl->setEnableGrowth(v); }
|
||||||
bool Volume::getEnableGrowth() const { return this->impl->getEnableGrowth(); }
|
bool Volume::getEnableGrowth() const { return this->impl->getEnableGrowth(); }
|
||||||
|
|
||||||
|
@ -243,11 +243,6 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
VolumeSettings::VolumeSettings()
|
|
||||||
{
|
|
||||||
this->impl = makePtr<VolumeSettingsImpl>();
|
|
||||||
}
|
|
||||||
|
|
||||||
VolumeSettings::VolumeSettings(VolumeType volumeType)
|
VolumeSettings::VolumeSettings(VolumeType volumeType)
|
||||||
{
|
{
|
||||||
this->impl = makePtr<VolumeSettingsImpl>(volumeType);
|
this->impl = makePtr<VolumeSettingsImpl>(volumeType);
|
||||||
@ -258,6 +253,12 @@ VolumeSettings::VolumeSettings(const VolumeSettings& vs)
|
|||||||
this->impl = makePtr<VolumeSettingsImpl>(*vs.impl.dynamicCast<VolumeSettingsImpl>());
|
this->impl = makePtr<VolumeSettingsImpl>(*vs.impl.dynamicCast<VolumeSettingsImpl>());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VolumeSettings& VolumeSettings::operator=(const VolumeSettings& vs)
|
||||||
|
{
|
||||||
|
this->impl = makePtr<VolumeSettingsImpl>(*vs.impl.dynamicCast<VolumeSettingsImpl>());
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
VolumeSettings::~VolumeSettings() {}
|
VolumeSettings::~VolumeSettings() {}
|
||||||
|
|
||||||
void VolumeSettings::setIntegrateWidth(int val) { this->impl->setIntegrateWidth(val); };
|
void VolumeSettings::setIntegrateWidth(int val) { this->impl->setIntegrateWidth(val); };
|
||||||
|
@ -143,7 +143,7 @@ void OdometryTest::checkUMats()
|
|||||||
OdometrySettings ods;
|
OdometrySettings ods;
|
||||||
ods.setCameraMatrix(K);
|
ods.setCameraMatrix(K);
|
||||||
Odometry odometry = Odometry(otype, ods, algtype);
|
Odometry odometry = Odometry(otype, ods, algtype);
|
||||||
OdometryFrame odf(uimage, udepth);
|
OdometryFrame odf(udepth, uimage);
|
||||||
|
|
||||||
Mat calcRt;
|
Mat calcRt;
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ void OdometryTest::run()
|
|||||||
OdometrySettings ods;
|
OdometrySettings ods;
|
||||||
ods.setCameraMatrix(K);
|
ods.setCameraMatrix(K);
|
||||||
Odometry odometry = Odometry(otype, ods, algtype);
|
Odometry odometry = Odometry(otype, ods, algtype);
|
||||||
OdometryFrame odf(image, depth);
|
OdometryFrame odf(depth, image);
|
||||||
Mat calcRt;
|
Mat calcRt;
|
||||||
|
|
||||||
// 1. Try to find Rt between the same frame (try masks also).
|
// 1. Try to find Rt between the same frame (try masks also).
|
||||||
@ -199,8 +199,8 @@ void OdometryTest::run()
|
|||||||
warpFrame(depth, image, noArray(), rt.matrix, K, warpedDepth, warpedImage);
|
warpFrame(depth, image, noArray(), rt.matrix, K, warpedDepth, warpedImage);
|
||||||
dilateFrame(warpedImage, warpedDepth); // due to inaccuracy after warping
|
dilateFrame(warpedImage, warpedDepth); // due to inaccuracy after warping
|
||||||
|
|
||||||
OdometryFrame odfSrc(image, depth);
|
OdometryFrame odfSrc(depth, image);
|
||||||
OdometryFrame odfDst(warpedImage, warpedDepth);
|
OdometryFrame odfDst(warpedDepth, warpedImage);
|
||||||
|
|
||||||
odometry.prepareFrames(odfSrc, odfDst);
|
odometry.prepareFrames(odfSrc, odfDst);
|
||||||
isComputed = odometry.compute(odfSrc, odfDst, calcRt);
|
isComputed = odometry.compute(odfSrc, odfDst, calcRt);
|
||||||
@ -272,7 +272,7 @@ void OdometryTest::prepareFrameCheck()
|
|||||||
OdometrySettings ods;
|
OdometrySettings ods;
|
||||||
ods.setCameraMatrix(K);
|
ods.setCameraMatrix(K);
|
||||||
Odometry odometry = Odometry(otype, ods, algtype);
|
Odometry odometry = Odometry(otype, ods, algtype);
|
||||||
OdometryFrame odf(gtImage, gtDepth);
|
OdometryFrame odf(gtDepth, gtImage);
|
||||||
|
|
||||||
odometry.prepareFrame(odf);
|
odometry.prepareFrame(odf);
|
||||||
|
|
||||||
|
@ -502,7 +502,8 @@ void staticBoundingBoxTest(VolumeType volumeType)
|
|||||||
vs.getVolumePose(pose);
|
vs.getVolumePose(pose);
|
||||||
Vec3f end = voxelSize * Vec3f(res);
|
Vec3f end = voxelSize * Vec3f(res);
|
||||||
Vec6f truebb(0, 0, 0, end[0], end[1], end[2]);
|
Vec6f truebb(0, 0, 0, end[0], end[1], end[2]);
|
||||||
Vec6f bb = volume.getBoundingBox(Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
Vec6f bb;
|
||||||
|
volume.getBoundingBox(bb, Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
||||||
Vec6f diff = bb - truebb;
|
Vec6f diff = bb - truebb;
|
||||||
double normdiff = std::sqrt(diff.ddot(diff));
|
double normdiff = std::sqrt(diff.ddot(diff));
|
||||||
ASSERT_LE(normdiff, std::numeric_limits<double>::epsilon());
|
ASSERT_LE(normdiff, std::numeric_limits<double>::epsilon());
|
||||||
@ -534,7 +535,8 @@ void boundingBoxGrowthTest(bool enableGrowth)
|
|||||||
for (int i = 0; i < nIntegrations; i++)
|
for (int i = 0; i < nIntegrations; i++)
|
||||||
volume.integrate(udepth, poses[0].matrix);
|
volume.integrate(udepth, poses[0].matrix);
|
||||||
|
|
||||||
Vec6f bb = volume.getBoundingBox(Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
Vec6f bb;
|
||||||
|
volume.getBoundingBox(bb, Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
||||||
Vec6f truebb(-0.9375f, 1.3125f, -0.8906f, 3.9375f, 2.6133f, 1.4004f);
|
Vec6f truebb(-0.9375f, 1.3125f, -0.8906f, 3.9375f, 2.6133f, 1.4004f);
|
||||||
Vec6f diff = bb - truebb;
|
Vec6f diff = bb - truebb;
|
||||||
double bbnorm = std::sqrt(diff.ddot(diff));
|
double bbnorm = std::sqrt(diff.ddot(diff));
|
||||||
@ -562,7 +564,8 @@ void boundingBoxGrowthTest(bool enableGrowth)
|
|||||||
for (int i = 0; i < nIntegrations; i++)
|
for (int i = 0; i < nIntegrations; i++)
|
||||||
volume.integrate(udepth2, poses[0].matrix);
|
volume.integrate(udepth2, poses[0].matrix);
|
||||||
|
|
||||||
Vec6f bb2 = volume.getBoundingBox(Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
Vec6f bb2;
|
||||||
|
volume.getBoundingBox(bb2, Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
||||||
|
|
||||||
Vec6f truebb2 = truebb + Vec6f(0, -(1.3125f - 1.0723f), -(-0.8906f - (-1.4238f)), 0, 0, 0);
|
Vec6f truebb2 = truebb + Vec6f(0, -(1.3125f - 1.0723f), -(-0.8906f - (-1.4238f)), 0, 0, 0);
|
||||||
Vec6f diff2 = enableGrowth ? bb2 - truebb2 : bb2 - bb;
|
Vec6f diff2 = enableGrowth ? bb2 - truebb2 : bb2 - bb;
|
||||||
@ -577,7 +580,8 @@ void boundingBoxGrowthTest(bool enableGrowth)
|
|||||||
// Reset check
|
// Reset check
|
||||||
|
|
||||||
volume.reset();
|
volume.reset();
|
||||||
Vec6f bb3 = volume.getBoundingBox(Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
Vec6f bb3;
|
||||||
|
volume.getBoundingBox(bb3, Volume::BoundingBoxPrecision::VOLUME_UNIT);
|
||||||
double bbnorm3 = std::sqrt(bb3.ddot(bb3));
|
double bbnorm3 = std::sqrt(bb3.ddot(bb3));
|
||||||
EXPECT_LE(bbnorm3, std::numeric_limits<double>::epsilon());
|
EXPECT_LE(bbnorm3, std::numeric_limits<double>::epsilon());
|
||||||
}
|
}
|
||||||
@ -909,7 +913,7 @@ protected:
|
|||||||
depth.copyTo(udepth);
|
depth.copyTo(udepth);
|
||||||
rgb.copyTo(urgb);
|
rgb.copyTo(urgb);
|
||||||
|
|
||||||
OdometryFrame odf(urgb, udepth);
|
OdometryFrame odf(udepth, urgb);
|
||||||
|
|
||||||
if (testSrcType == VolumeTestSrcType::MAT)
|
if (testSrcType == VolumeTestSrcType::MAT)
|
||||||
{
|
{
|
||||||
|
140
samples/python/volume.py
Normal file
140
samples/python/volume.py
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
import numpy as np
|
||||||
|
import cv2 as cv
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
# Use source data from this site:
|
||||||
|
# https://vision.in.tum.de/data/datasets/rgbd-dataset/download
|
||||||
|
# For example if you use rgbd_dataset_freiburg1_xyz sequence, your prompt should be:
|
||||||
|
# python /path_to_opencv/samples/python/volume.py --source_folder /path_to_datasets/rgbd_dataset_freiburg1_xyz --algo <some algo>
|
||||||
|
# so that the folder contains files groundtruth.txt and depth.txt
|
||||||
|
|
||||||
|
# for more info about this function look cv::Quat::toRotMat3x3(...)
|
||||||
|
def quatToMat3(a, b, c, d):
|
||||||
|
return np.array([
|
||||||
|
[1 - 2 * (c * c + d * d), 2 * (b * c - a * d) , 2 * (b * d + a * c)],
|
||||||
|
[2 * (b * c + a * d) , 1 - 2 * (b * b + d * d), 2 * (c * d - a * b)],
|
||||||
|
[2 * (b * d - a * c) , 2 * (c * d + a * b) , 1 - 2 * (b * b + c * c)]
|
||||||
|
])
|
||||||
|
|
||||||
|
def make_Rt(val):
|
||||||
|
R = quatToMat3(val[6], val[3], val[4] ,val[5])
|
||||||
|
t = np.array([ [val[0]], [val[1]], [val[2]] ])
|
||||||
|
tmp = np.array([0, 0, 0, 1])
|
||||||
|
|
||||||
|
Rt = np.append(R, t , axis=1 )
|
||||||
|
Rt = np.vstack([Rt, tmp])
|
||||||
|
|
||||||
|
return Rt
|
||||||
|
|
||||||
|
def get_image_info(path, is_depth):
|
||||||
|
image_info = {}
|
||||||
|
source = 'depth.txt'
|
||||||
|
if not is_depth:
|
||||||
|
source = 'rgb.txt'
|
||||||
|
with open(path+source) as file:
|
||||||
|
lines = file.readlines()
|
||||||
|
for line in lines:
|
||||||
|
words = line.split(' ')
|
||||||
|
if words[0] == '#':
|
||||||
|
continue
|
||||||
|
image_info[float(words[0])] = words[1][:-1]
|
||||||
|
return image_info
|
||||||
|
|
||||||
|
def get_groundtruth_info(path):
|
||||||
|
groundtruth_info = {}
|
||||||
|
with open(path+'groundtruth.txt') as file:
|
||||||
|
lines = file.readlines()
|
||||||
|
for line in lines:
|
||||||
|
words = line.split(' ')
|
||||||
|
if words[0] == '#':
|
||||||
|
continue
|
||||||
|
groundtruth_info[float(words[0])] = [float(i) for i in words[1:]]
|
||||||
|
return groundtruth_info
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
'--algo',
|
||||||
|
help="""TSDF - reconstruct data in volume with bounds,
|
||||||
|
HashTSDF - reconstruct data in volume without bounds (infinite volume),
|
||||||
|
ColorTSDF - like TSDF but also keeps color data,
|
||||||
|
default - runs TSDF""",
|
||||||
|
default="")
|
||||||
|
parser.add_argument(
|
||||||
|
'-src',
|
||||||
|
'--source_folder',
|
||||||
|
default="")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
path = args.source_folder
|
||||||
|
if path[-1] != '/':
|
||||||
|
path += '/'
|
||||||
|
|
||||||
|
depth_info = get_image_info(path, True)
|
||||||
|
rgb_info = get_image_info(path, False)
|
||||||
|
groundtruth_info = get_groundtruth_info(path)
|
||||||
|
|
||||||
|
volume_type = cv.VolumeType_TSDF
|
||||||
|
if args.algo == "HashTSDF":
|
||||||
|
volume_type = cv.VolumeType_HashTSDF
|
||||||
|
elif args.algo == "ColorTSDF":
|
||||||
|
volume_type = cv.VolumeType_ColorTSDF
|
||||||
|
|
||||||
|
settings = cv.VolumeSettings(volume_type)
|
||||||
|
volume = cv.Volume(volume_type, settings)
|
||||||
|
|
||||||
|
for key in list(depth_info.keys())[:]:
|
||||||
|
Rt = np.eye(4)
|
||||||
|
for key1 in groundtruth_info:
|
||||||
|
if np.abs(key1 - key) < 0.01:
|
||||||
|
Rt = make_Rt(groundtruth_info[key1])
|
||||||
|
break
|
||||||
|
|
||||||
|
rgb_path = ''
|
||||||
|
for key1 in rgb_info:
|
||||||
|
if np.abs(key1 - key) < 0.05:
|
||||||
|
rgb_path = path + rgb_info[key1]
|
||||||
|
break
|
||||||
|
|
||||||
|
depthPath = path + depth_info[key]
|
||||||
|
depth = cv.imread(depthPath, cv.IMREAD_ANYDEPTH).astype(np.float32)
|
||||||
|
if depth.size <= 0:
|
||||||
|
raise Exception('Failed to load depth file: %s' % depthPath)
|
||||||
|
|
||||||
|
rgb = cv.imread(rgb_path, cv.IMREAD_COLOR).astype(np.float32)
|
||||||
|
if rgb.size <= 0:
|
||||||
|
raise Exception('Failed to load RGB file: %s' % rgb_path)
|
||||||
|
|
||||||
|
if volume_type != cv.VolumeType_ColorTSDF:
|
||||||
|
volume.integrate(depth, Rt)
|
||||||
|
else:
|
||||||
|
volume.integrateColor(depth, rgb, Rt)
|
||||||
|
|
||||||
|
size = (480, 640, 4)
|
||||||
|
|
||||||
|
points = np.zeros(size, np.float32)
|
||||||
|
normals = np.zeros(size, np.float32)
|
||||||
|
colors = np.zeros(size, np.float32)
|
||||||
|
|
||||||
|
if volume_type != cv.VolumeType_ColorTSDF:
|
||||||
|
volume.raycast(Rt, points, normals)
|
||||||
|
else:
|
||||||
|
volume.raycastColor(Rt, points, normals, colors)
|
||||||
|
|
||||||
|
channels = list(cv.split(points))
|
||||||
|
|
||||||
|
cv.imshow("X", np.absolute(channels[0]))
|
||||||
|
cv.imshow("Y", np.absolute(channels[1]))
|
||||||
|
cv.imshow("Z", channels[2])
|
||||||
|
|
||||||
|
if volume_type == cv.VolumeType_ColorTSDF:
|
||||||
|
cv.imshow("Color", colors.astype(np.uint8))
|
||||||
|
|
||||||
|
#TODO: also display normals
|
||||||
|
|
||||||
|
cv.waitKey(10)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
Loading…
Reference in New Issue
Block a user