mirror of
https://github.com/opencv/opencv.git
synced 2024-12-12 23:49:36 +08:00
Merge pull request #20755 from DumDereDum:new_odometry
New odometry Pipeline * first intergation * tests run, but not pass * add previous version of sigma calc * add minor comment * strange fixes * fix fast ICP * test changes; fast icp still not work correctly * finaly, it works * algtype fix * change affine comparison * boolean return * fix bug with angle and cos * test pass correctly * fix for kinfu pipeline * add compute points normals * update for new odometry * change odometry_evaluation * odometry_evaluation works * change debug logs * minor changes * change depth setting in odometryFrame * fastICP works with 4num points * all odometries work with 4mun points * odometry full works on 4num points and normals * replace ICP with DEPTH; comments replacements * create prepareFrame; add docs for Odometry * change getPyramids() * delete extra code * add intrinsics; but dont works * bugfix with nan checking * add gpu impl * change createOdometryFrame func * remove old fastICP code * comments fix * add comments * minor fixes * other minor fixes * add channels assert * add impl for odometry settings * add pimpl to odometry * linux warning fix * linux warning fix 1 * linux warning fix 2 * linux error fix * linux warning fix 3 * linux warning fix 4 * linux error fix 2 * fix test warnings * python build fix * doxygen fix * docs fix * change normal tests for 4channel point * all Normal tests pass * plane works * add warp frame body * minor fix * warning fixes * try to fix * try to fix 1 * review fix * lvls fix * createOdometryFrame fix * add comment * const reference * OPENCV_3D_ prefix * const methods * OdometryFramePyramidType ifx * add assert * precomp moved upper * delete types_c * add assert for get and set functions * minor fixes * remove core.hpp from header * ocl_run add * warning fix * delete extra comment * minor fix * setDepth fix * delete underscore * odometry settings fix * show debug image fix * build error fix * other minor fix * add const to signatures * fix * conflict fix * getter fix
This commit is contained in:
parent
0cf0a5e9d4
commit
6ab4659840
@ -9,6 +9,9 @@
|
||||
#include "opencv2/core/types_c.h"
|
||||
|
||||
#include "opencv2/3d/depth.hpp"
|
||||
#include "opencv2/3d/odometry.hpp"
|
||||
#include "opencv2/3d/odometry_frame.hpp"
|
||||
#include "opencv2/3d/odometry_settings.hpp"
|
||||
#include "opencv2/3d/volume.hpp"
|
||||
#include "opencv2/3d/ptcloud.hpp"
|
||||
|
||||
|
@ -98,7 +98,7 @@ CV_EXPORTS_W void registerDepth(InputArray unregisteredCameraMatrix, InputArray
|
||||
* @param depth the depth image
|
||||
* @param in_K
|
||||
* @param in_points the list of xy coordinates
|
||||
* @param points3d the resulting 3d points
|
||||
* @param points3d the resulting 3d points (point is represented by 4 chanels value [x, y, z, 0])
|
||||
*/
|
||||
CV_EXPORTS_W void depthTo3dSparse(InputArray depth, InputArray in_K, InputArray in_points, OutputArray points3d);
|
||||
|
||||
@ -107,7 +107,7 @@ CV_EXPORTS_W void depthTo3dSparse(InputArray depth, InputArray in_K, InputArray
|
||||
* @param depth the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
|
||||
* (as done with the Microsoft Kinect), otherwise, if given as CV_32F or CV_64F, it is assumed in meters)
|
||||
* @param K The calibration matrix
|
||||
* @param points3d the resulting 3d points. They are of depth the same as `depth` if it is CV_32F or CV_64F, and the
|
||||
* @param points3d the resulting 3d points (point is represented by 4 chanels value [x, y, z, 0]). They are of depth the same as `depth` if it is CV_32F or CV_64F, and the
|
||||
* depth of `K` if `depth` is of depth CV_U
|
||||
* @param mask the mask of the points to consider (can be empty)
|
||||
*/
|
||||
@ -166,331 +166,7 @@ CV_EXPORTS_W void findPlanes(InputArray points3d, InputArray normals, OutputArra
|
||||
double sensor_error_c = 0,
|
||||
RgbdPlaneMethod method = RGBD_PLANE_METHOD_DEFAULT);
|
||||
|
||||
/** Object that contains a frame data that is possibly needed for the Odometry.
|
||||
* It's used for the efficiency (to pass precomputed/cached data of the frame that participates
|
||||
* in the Odometry processing several times).
|
||||
*/
|
||||
struct CV_EXPORTS_W OdometryFrame
|
||||
{
|
||||
public:
|
||||
/** These constants are used to set a type of cache which has to be prepared depending on the frame role:
|
||||
* srcFrame or dstFrame (see compute method of the Odometry class). For the srcFrame and dstFrame different cache data may be required,
|
||||
* some part of a cache may be common for both frame roles.
|
||||
* @param CACHE_SRC The cache data for the srcFrame will be prepared.
|
||||
* @param CACHE_DST The cache data for the dstFrame will be prepared.
|
||||
* @param CACHE_ALL The cache data for both srcFrame and dstFrame roles will be computed.
|
||||
* @param CACHE_DEPTH The frame will be generated from depth image
|
||||
* @param CACHE_PTS The frame will be built from point cloud
|
||||
*/
|
||||
enum OdometryFrameCacheType
|
||||
{
|
||||
CACHE_SRC = 1, CACHE_DST = 2, CACHE_ALL = CACHE_SRC + CACHE_DST
|
||||
};
|
||||
|
||||
/** Indicates what pyramid is to access using get/setPyramid... methods:
|
||||
* @param PYR_IMAGE The pyramid of RGB images
|
||||
* @param PYR_DEPTH The pyramid of depth images
|
||||
* @param PYR_MASK The pyramid of masks
|
||||
* @param PYR_CLOUD The pyramid of point clouds, produced from the pyramid of depths
|
||||
* @param PYR_DIX The pyramid of dI/dx derivative images
|
||||
* @param PYR_DIY The pyramid of dI/dy derivative images
|
||||
* @param PYR_TEXMASK The pyramid of textured masks
|
||||
* @param PYR_NORM The pyramid of normals
|
||||
* @param PYR_NORMMASK The pyramid of normals masks
|
||||
*/
|
||||
enum OdometryFramePyramidType
|
||||
{
|
||||
PYR_IMAGE = 0, PYR_DEPTH = 1, PYR_MASK = 2, PYR_CLOUD = 3, PYR_DIX = 4, PYR_DIY = 5, PYR_TEXMASK = 6, PYR_NORM = 7, PYR_NORMMASK = 8,
|
||||
N_PYRAMIDS
|
||||
};
|
||||
|
||||
OdometryFrame() : ID(-1) { }
|
||||
virtual ~OdometryFrame() { }
|
||||
|
||||
CV_WRAP static Ptr<OdometryFrame> create(InputArray image = noArray(), InputArray depth = noArray(),
|
||||
InputArray mask = noArray(), InputArray normals = noArray(), int ID = -1);
|
||||
|
||||
CV_WRAP virtual void setImage(InputArray image) = 0;
|
||||
CV_WRAP virtual void getImage(OutputArray image) = 0;
|
||||
CV_WRAP virtual void setDepth(InputArray depth) = 0;
|
||||
CV_WRAP virtual void getDepth(OutputArray depth) = 0;
|
||||
CV_WRAP virtual void setMask(InputArray mask) = 0;
|
||||
CV_WRAP virtual void getMask(OutputArray mask) = 0;
|
||||
CV_WRAP virtual void setNormals(InputArray normals) = 0;
|
||||
CV_WRAP virtual void getNormals(OutputArray normals) = 0;
|
||||
|
||||
CV_WRAP virtual void setPyramidLevels(size_t nLevels) = 0;
|
||||
CV_WRAP virtual size_t getPyramidLevels(OdometryFrame::OdometryFramePyramidType pyrType) = 0;
|
||||
|
||||
CV_WRAP virtual void setPyramidAt(InputArray pyrImage, OdometryFrame::OdometryFramePyramidType pyrType, size_t level) = 0;
|
||||
CV_WRAP virtual void getPyramidAt(OutputArray pyrImage, OdometryFrame::OdometryFramePyramidType pyrType, size_t level) = 0;
|
||||
|
||||
CV_PROP int ID;
|
||||
};
|
||||
|
||||
|
||||
/** Base class for computation of odometry.
|
||||
*/
|
||||
class CV_EXPORTS_W Odometry
|
||||
{
|
||||
public:
|
||||
|
||||
/** A class of transformation*/
|
||||
enum OdometryTransformType
|
||||
{
|
||||
ROTATION = 1, TRANSLATION = 2, RIGID_BODY_MOTION = 4
|
||||
};
|
||||
|
||||
virtual ~Odometry() { }
|
||||
|
||||
/** Create a new Odometry object based on its name. Currently supported names are:
|
||||
* "RgbdOdometry", "ICPOdometry", "RgbdICPOdometry", "FastICPOdometry".
|
||||
* @param odometryType algorithm's name
|
||||
*/
|
||||
CV_WRAP static Ptr<Odometry> createFromName(const std::string& odometryType);
|
||||
|
||||
/** Method to compute a transformation from the source frame to the destination one.
|
||||
* Some odometry algorithms do not used some data of frames (eg. ICP does not use images).
|
||||
* In such case corresponding arguments can be set as empty Mat.
|
||||
* The method returns true if all internal computations were possible (e.g. there were enough correspondences,
|
||||
* system of equations has a solution, etc) and resulting transformation satisfies some test if it's provided
|
||||
* by the Odometry inheritor implementation (e.g. thresholds for maximum translation and rotation).
|
||||
* @param srcImage Image data of the source frame (CV_8UC1)
|
||||
* @param srcDepth Depth data of the source frame (CV_32FC1, in meters)
|
||||
* @param srcMask Mask that sets which pixels have to be used from the source frame (CV_8UC1)
|
||||
* @param dstImage Image data of the destination frame (CV_8UC1)
|
||||
* @param dstDepth Depth data of the destination frame (CV_32FC1, in meters)
|
||||
* @param dstMask Mask that sets which pixels have to be used from the destination frame (CV_8UC1)
|
||||
* @param Rt Resulting transformation from the source frame to the destination one (rigid body motion):
|
||||
dst_p = Rt * src_p, where dst_p is a homogeneous point in the destination frame and src_p is
|
||||
homogeneous point in the source frame,
|
||||
Rt is 4x4 matrix of CV_64FC1 type.
|
||||
* @param initRt Initial transformation from the source frame to the destination one (optional)
|
||||
*/
|
||||
CV_WRAP virtual bool compute(InputArray srcImage, InputArray srcDepth, InputArray srcMask, InputArray dstImage, InputArray dstDepth,
|
||||
InputArray dstMask, OutputArray Rt, InputArray initRt = noArray()) const = 0;
|
||||
|
||||
/** One more method to compute a transformation from the source frame to the destination one.
|
||||
* It is designed to save on computing the frame data (image pyramids, normals, etc.).
|
||||
*/
|
||||
CV_WRAP_AS(compute2) virtual bool compute(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame,
|
||||
OutputArray Rt, InputArray initRt = noArray()) const = 0;
|
||||
|
||||
/** Prepare a cache for the frame. The function checks the precomputed/passed data (throws the error if this data
|
||||
* does not satisfy) and computes all remaining cache data needed for the frame. Returned size is a resolution
|
||||
* of the prepared frame.
|
||||
* @param frame The odometry which will process the frame.
|
||||
* @param cacheType The cache type: CACHE_SRC, CACHE_DST or CACHE_ALL.
|
||||
*/
|
||||
CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame> frame, OdometryFrame::OdometryFrameCacheType cacheType) const = 0;
|
||||
|
||||
/** Create odometry frame for current Odometry implementation
|
||||
* @param image Image data of the frame (CV_8UC1)
|
||||
* @param depth Depth data of the frame (CV_32FC1, in meters)
|
||||
* @param mask Mask that sets which pixels have to be used from the frame (CV_8UC1)
|
||||
*/
|
||||
CV_WRAP virtual Ptr<OdometryFrame> makeOdometryFrame(InputArray image, InputArray depth, InputArray mask) const = 0;
|
||||
|
||||
CV_WRAP virtual void getCameraMatrix(OutputArray val) const = 0;
|
||||
CV_WRAP virtual void setCameraMatrix(InputArray val) = 0;
|
||||
CV_WRAP virtual Odometry::OdometryTransformType getTransformType() const = 0;
|
||||
CV_WRAP virtual void setTransformType(Odometry::OdometryTransformType val) = 0;
|
||||
CV_WRAP virtual void getIterationCounts(OutputArray val) const = 0;
|
||||
CV_WRAP virtual void setIterationCounts(InputArray val) = 0;
|
||||
/** For each pyramid level the pixels will be filtered out if they have gradient magnitude less than minGradientMagnitudes[level].
|
||||
* Makes sense for RGB-based algorithms only.
|
||||
*/
|
||||
CV_WRAP virtual void getMinGradientMagnitudes(OutputArray val) const = 0;
|
||||
CV_WRAP virtual void setMinGradientMagnitudes(InputArray val) = 0;
|
||||
|
||||
/** Get max allowed translation in meters, default is 0.15 meters
|
||||
Found delta transform is considered successful only if the translation is in given limits. */
|
||||
CV_WRAP virtual double getMaxTranslation() const = 0;
|
||||
/** Set max allowed translation in meters.
|
||||
* Found delta transform is considered successful only if the translation is in given limits. */
|
||||
CV_WRAP virtual void setMaxTranslation(double val) = 0;
|
||||
/** Get max allowed rotation in degrees, default is 15 degrees.
|
||||
* Found delta transform is considered successful only if the rotation is in given limits. */
|
||||
CV_WRAP virtual double getMaxRotation() const = 0;
|
||||
/** Set max allowed rotation in degrees.
|
||||
* Found delta transform is considered successful only if the rotation is in given limits. */
|
||||
CV_WRAP virtual void setMaxRotation(double val) = 0;
|
||||
};
|
||||
|
||||
/** Odometry based on the paper "Real-Time Visual Odometry from Dense RGB-D Images",
|
||||
* F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
|
||||
*/
|
||||
class CV_EXPORTS_W RgbdOdometry: public Odometry
|
||||
{
|
||||
public:
|
||||
RgbdOdometry() { }
|
||||
|
||||
/** Creates RgbdOdometry object
|
||||
* @param cameraMatrix Camera matrix
|
||||
* @param minDepth Pixels with depth less than minDepth will not be used (in meters)
|
||||
* @param maxDepth Pixels with depth larger than maxDepth will not be used (in meters)
|
||||
* @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out
|
||||
* if their depth difference is larger than maxDepthDiff (in meters)
|
||||
* @param iterCounts Count of iterations on each pyramid level, defaults are [7, 7, 7, 10]
|
||||
* @param minGradientMagnitudes For each pyramid level the pixels will be filtered out, default is 10 per each level
|
||||
* if they have gradient magnitude less than minGradientMagnitudes[level].
|
||||
* @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart (from 0 to 1)
|
||||
* @param transformType Class of transformation
|
||||
*/
|
||||
CV_WRAP static Ptr<RgbdOdometry> create(InputArray cameraMatrix = noArray(),
|
||||
float minDepth = 0.f,
|
||||
float maxDepth = 4.f,
|
||||
float maxDepthDiff = 0.07f,
|
||||
InputArray iterCounts = noArray(),
|
||||
InputArray minGradientMagnitudes = noArray(),
|
||||
float maxPointsPart = 0.07f,
|
||||
Odometry::OdometryTransformType transformType = Odometry::RIGID_BODY_MOTION);
|
||||
|
||||
CV_WRAP virtual double getMinDepth() const = 0;
|
||||
CV_WRAP virtual void setMinDepth(double val) = 0;
|
||||
CV_WRAP virtual double getMaxDepth() const = 0;
|
||||
CV_WRAP virtual void setMaxDepth(double val) = 0;
|
||||
CV_WRAP virtual double getMaxDepthDiff() const = 0;
|
||||
CV_WRAP virtual void setMaxDepthDiff(double val) = 0;
|
||||
CV_WRAP virtual double getMaxPointsPart() const = 0;
|
||||
CV_WRAP virtual void setMaxPointsPart(double val) = 0;
|
||||
|
||||
};
|
||||
|
||||
/** Odometry based on the paper "KinectFusion: Real-Time Dense Surface Mapping and Tracking",
|
||||
* Richard A. Newcombe, Andrew Fitzgibbon, at al, SIGGRAPH, 2011.
|
||||
*/
|
||||
class CV_EXPORTS_W ICPOdometry: public Odometry
|
||||
{
|
||||
public:
|
||||
ICPOdometry() { }
|
||||
|
||||
/** Creates new ICPOdometry object
|
||||
* @param cameraMatrix Camera matrix
|
||||
* @param minDepth Pixels with depth less than minDepth will not be used (in meters)
|
||||
* @param maxDepth Pixels with depth larger than maxDepth will not be used (in meters)
|
||||
* @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out
|
||||
* if their depth difference is larger than maxDepthDiff (in meters)
|
||||
* @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart (from 0 to 1)
|
||||
* @param iterCounts Count of iterations on each pyramid level, defaults are [7, 7, 7, 10]
|
||||
* @param transformType Class of trasformation
|
||||
*/
|
||||
CV_WRAP static Ptr<ICPOdometry> create(InputArray cameraMatrix = noArray(),
|
||||
float minDepth = 0.f,
|
||||
float maxDepth = 4.f,
|
||||
float maxDepthDiff = 0.07f,
|
||||
float maxPointsPart = 0.07f,
|
||||
InputArray iterCounts = noArray(),
|
||||
Odometry::OdometryTransformType transformType = Odometry::RIGID_BODY_MOTION);
|
||||
|
||||
CV_WRAP virtual double getMinDepth() const = 0;
|
||||
CV_WRAP virtual void setMinDepth(double val) = 0;
|
||||
CV_WRAP virtual double getMaxDepth() const = 0;
|
||||
CV_WRAP virtual void setMaxDepth(double val) = 0;
|
||||
CV_WRAP virtual double getMaxDepthDiff() const = 0;
|
||||
CV_WRAP virtual void setMaxDepthDiff(double val) = 0;
|
||||
CV_WRAP virtual double getMaxPointsPart() const = 0;
|
||||
CV_WRAP virtual void setMaxPointsPart(double val) = 0;
|
||||
CV_WRAP virtual Ptr<RgbdNormals> getNormalsComputer() const = 0;
|
||||
};
|
||||
|
||||
/** Odometry that merges RgbdOdometry and ICPOdometry by minimize sum of their energy functions.
|
||||
*/
|
||||
class CV_EXPORTS_W RgbdICPOdometry: public Odometry
|
||||
{
|
||||
public:
|
||||
RgbdICPOdometry() { }
|
||||
|
||||
/** Creates RgbdICPOdometry object
|
||||
* @param cameraMatrix Camera matrix
|
||||
* @param minDepth Pixels with depth less than minDepth will not be used (in meters)
|
||||
* @param maxDepth Pixels with depth larger than maxDepth will not be used (in meters)
|
||||
* @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out
|
||||
* if their depth difference is larger than maxDepthDiff (in meters)
|
||||
* @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart (from 0 to 1)
|
||||
* @param iterCounts Count of iterations on each pyramid level, defaults are [7, 7, 7, 10]
|
||||
* @param minGradientMagnitudes For each pyramid level the pixels will be filtered out
|
||||
* if they have gradient magnitude less than minGradientMagnitudes[level], default is 10 per each level
|
||||
* @param transformType Class of trasformation
|
||||
*/
|
||||
CV_WRAP static Ptr<RgbdICPOdometry> create(InputArray cameraMatrix = noArray(),
|
||||
float minDepth = 0.f,
|
||||
float maxDepth = 4.f,
|
||||
float maxDepthDiff = 0.07f,
|
||||
float maxPointsPart = 0.07f,
|
||||
InputArray iterCounts = noArray(),
|
||||
InputArray minGradientMagnitudes = noArray(),
|
||||
Odometry::OdometryTransformType transformType = Odometry::RIGID_BODY_MOTION);
|
||||
|
||||
CV_WRAP virtual double getMinDepth() const = 0;
|
||||
CV_WRAP virtual void setMinDepth(double val) = 0;
|
||||
CV_WRAP virtual double getMaxDepth() const = 0;
|
||||
CV_WRAP virtual void setMaxDepth(double val) = 0;
|
||||
CV_WRAP virtual double getMaxDepthDiff() const = 0;
|
||||
CV_WRAP virtual void setMaxDepthDiff(double val) = 0;
|
||||
CV_WRAP virtual double getMaxPointsPart() const = 0;
|
||||
CV_WRAP virtual void setMaxPointsPart(double val) = 0;
|
||||
|
||||
CV_WRAP virtual Ptr<RgbdNormals> getNormalsComputer() const = 0;
|
||||
};
|
||||
|
||||
/** A faster version of ICPOdometry which is used in KinectFusion implementation
|
||||
* Partial list of differences:
|
||||
* - UMats are processed using OpenCL
|
||||
* - CPU version is written in universal intrinsics
|
||||
* - Filters points by angle
|
||||
* - Interpolates points and normals
|
||||
* - Doesn't use masks or min/max depth filtering
|
||||
* - Doesn't use random subsets of points
|
||||
* - Supports only Rt transform type
|
||||
* - Initial transform is not supported and always ignored
|
||||
* - Supports only 4-float vectors as input type
|
||||
*/
|
||||
class CV_EXPORTS_W FastICPOdometry: public Odometry
|
||||
{
|
||||
public:
|
||||
FastICPOdometry() { }
|
||||
|
||||
/** Creates FastICPOdometry object
|
||||
* @param cameraMatrix Camera matrix
|
||||
* @param maxDistDiff Correspondences between pixels of two given frames will be filtered out
|
||||
* if their depth difference is larger than maxDepthDiff (in meters)
|
||||
* @param angleThreshold Correspondence will be filtered out
|
||||
* if an angle between their normals is bigger than threshold
|
||||
* @param sigmaDepth Depth sigma in meters for bilateral smooth
|
||||
* @param sigmaSpatial Spatial sigma in pixels for bilateral smooth
|
||||
* @param kernelSize Kernel size in pixels for bilateral smooth
|
||||
* @param iterCounts Count of iterations on each pyramid level, defaults are [7, 7, 7, 10]
|
||||
* @param depthFactor pre-scale per 1 meter for input values
|
||||
* @param truncateThreshold Threshold for depth truncation in meters
|
||||
* All depth values beyond this threshold will be set to zero
|
||||
*/
|
||||
CV_WRAP static Ptr<FastICPOdometry> create(InputArray cameraMatrix = noArray(),
|
||||
float maxDistDiff = 0.07f,
|
||||
float angleThreshold = (float)(30. * CV_PI / 180.),
|
||||
float sigmaDepth = 0.04f,
|
||||
float sigmaSpatial = 4.5f,
|
||||
int kernelSize = 7,
|
||||
InputArray iterCounts = noArray(),
|
||||
float depthFactor = 1.f,
|
||||
float truncateThreshold = 0.f);
|
||||
|
||||
CV_WRAP virtual double getMaxDistDiff() const = 0;
|
||||
CV_WRAP virtual void setMaxDistDiff(float val) = 0;
|
||||
CV_WRAP virtual float getAngleThreshold() const = 0;
|
||||
CV_WRAP virtual void setAngleThreshold(float f) = 0;
|
||||
CV_WRAP virtual float getSigmaDepth() const = 0;
|
||||
CV_WRAP virtual void setSigmaDepth(float f) = 0;
|
||||
CV_WRAP virtual float getSigmaSpatial() const = 0;
|
||||
CV_WRAP virtual void setSigmaSpatial(float f) = 0;
|
||||
CV_WRAP virtual int getKernelSize() const = 0;
|
||||
CV_WRAP virtual void setKernelSize(int f) = 0;
|
||||
CV_WRAP virtual float getDepthFactor() const = 0;
|
||||
CV_WRAP virtual void setDepthFactor(float depthFactor) = 0;
|
||||
CV_WRAP virtual float getTruncateThreshold() const = 0;
|
||||
CV_WRAP virtual void setTruncateThreshold(float truncateThreshold) = 0;
|
||||
|
||||
};
|
||||
|
||||
// TODO Depth interpolation
|
||||
// Curvature
|
||||
|
@ -52,7 +52,7 @@ public:
|
||||
virtual ~Submap() = default;
|
||||
|
||||
virtual void integrate(InputArray _depth, float depthFactor, const cv::Matx33f& intrinsics, const int currframeId);
|
||||
virtual void raycast(const cv::Affine3f& cameraPose, const cv::Matx33f& intrinsics, cv::Size frameSize,
|
||||
virtual void raycast(const Odometry& icp, const cv::Affine3f& cameraPose, const cv::Matx33f& intrinsics, cv::Size frameSize,
|
||||
OutputArray points = noArray(), OutputArray normals = noArray());
|
||||
|
||||
virtual int getTotalAllocatedBlocks() const { return int(volume->getTotalVolumeUnits()); };
|
||||
@ -94,7 +94,8 @@ public:
|
||||
static constexpr int FRAME_VISIBILITY_THRESHOLD = 5;
|
||||
|
||||
//! TODO: Add support for GPU arrays (UMat)
|
||||
Ptr<OdometryFrame> frame;
|
||||
OdometryFrame frame;
|
||||
OdometryFrame renderFrame;
|
||||
|
||||
std::shared_ptr<Volume> volume;
|
||||
};
|
||||
@ -109,17 +110,25 @@ void Submap<MatType>::integrate(InputArray _depth, float depthFactor, const cv::
|
||||
}
|
||||
|
||||
template<typename MatType>
|
||||
void Submap<MatType>::raycast(const cv::Affine3f& _cameraPose, const cv::Matx33f& intrinsics, cv::Size frameSize,
|
||||
void Submap<MatType>::raycast(const Odometry& icp, const cv::Affine3f& _cameraPose, const cv::Matx33f& intrinsics, cv::Size frameSize,
|
||||
OutputArray points, OutputArray normals)
|
||||
{
|
||||
if (!points.needed() && !normals.needed())
|
||||
{
|
||||
MatType pts, nrm;
|
||||
frame->getPyramidAt(pts, OdometryFrame::PYR_CLOUD, 0);
|
||||
frame->getPyramidAt(nrm, OdometryFrame::PYR_NORM, 0);
|
||||
|
||||
frame.getPyramidAt(pts, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
frame.getPyramidAt(nrm, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
volume->raycast(_cameraPose.matrix, intrinsics, frameSize, pts, nrm);
|
||||
frame->setPyramidAt(pts, OdometryFrame::PYR_CLOUD, 0);
|
||||
frame->setPyramidAt(nrm, OdometryFrame::PYR_NORM, 0);
|
||||
frame.setPyramidAt(pts, OdometryFramePyramidType::PYR_CLOUD, 0);
|
||||
frame.setPyramidAt(nrm, OdometryFramePyramidType::PYR_NORM, 0);
|
||||
|
||||
renderFrame = frame;
|
||||
|
||||
Mat depth;
|
||||
frame.getDepth(depth);
|
||||
frame = icp.createOdometryFrame();
|
||||
frame.setDepth(depth);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -195,7 +204,7 @@ public:
|
||||
Ptr<SubmapT> getCurrentSubmap(void) const;
|
||||
|
||||
int estimateConstraint(int fromSubmapId, int toSubmapId, int& inliers, Affine3f& inlierPose);
|
||||
bool updateMap(int _frameId, Ptr<OdometryFrame> _frame);
|
||||
bool updateMap(int frameId, const OdometryFrame& frame);
|
||||
|
||||
bool addEdgeToCurrentSubmap(const int currentSubmapID, const int tarSubmapID);
|
||||
|
||||
@ -418,7 +427,7 @@ bool SubmapManager<MatType>::addEdgeToCurrentSubmap(const int currentSubmapID, c
|
||||
}
|
||||
|
||||
template<typename MatType>
|
||||
bool SubmapManager<MatType>::updateMap(int _frameId, Ptr<OdometryFrame> _frame)
|
||||
bool SubmapManager<MatType>::updateMap(int _frameId, const OdometryFrame& _frame)
|
||||
{
|
||||
bool mapUpdated = false;
|
||||
int changedCurrentMapId = -1;
|
||||
|
81
modules/3d/include/opencv2/3d/odometry.hpp
Normal file
81
modules/3d/include/opencv2/3d/odometry.hpp
Normal file
@ -0,0 +1,81 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef OPENCV_3D_ODOMETRY_HPP
|
||||
#define OPENCV_3D_ODOMETRY_HPP
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
|
||||
#include "odometry_frame.hpp"
|
||||
#include "odometry_settings.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
/** These constants are used to set a type of data which odometry will use
|
||||
* @param DEPTH only depth data
|
||||
* @param RGB only rgb image
|
||||
* @param RGB_DEPTH only depth and rgb data simultaneously
|
||||
*/
|
||||
enum class OdometryType
|
||||
{
|
||||
DEPTH = 0,
|
||||
RGB = 1,
|
||||
RGB_DEPTH = 2
|
||||
};
|
||||
|
||||
/** These constants are used to set the speed and accuracy of odometry
|
||||
* @param COMMON only accurate but not so fast
|
||||
* @param FAST only less accurate but faster
|
||||
*/
|
||||
enum class OdometryAlgoType
|
||||
{
|
||||
COMMON = 0,
|
||||
FAST = 1
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W Odometry
|
||||
{
|
||||
public:
|
||||
Odometry();
|
||||
Odometry(OdometryType otype, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
~Odometry();
|
||||
|
||||
/** Create new odometry frame
|
||||
* The Type (Mat or UMat) depends on odometry type
|
||||
*/
|
||||
OdometryFrame createOdometryFrame() const;
|
||||
|
||||
// Deprecated
|
||||
OdometryFrame createOdometryFrame(OdometryFrameStoreType matType) const;
|
||||
|
||||
/** Prepare frame for odometry calculation
|
||||
* @param frame odometry prepare this frame as src frame and dst frame simultaneously
|
||||
*/
|
||||
void prepareFrame(OdometryFrame& frame);
|
||||
|
||||
/** Prepare frame for odometry calculation
|
||||
* @param srcFrame frame will be prepared as src frame ("original" image)
|
||||
* @param dstFrame frame will be prepared as dsr frame ("rotated" image)
|
||||
*/
|
||||
void prepareFrames(OdometryFrame& srcFrame, OdometryFrame& dstFrame);
|
||||
|
||||
/** Prepare frame for odometry calculation
|
||||
* @param srcFrame src frame ("original" image)
|
||||
* @param dstFrame dsr frame ("rotated" image)
|
||||
* @param Rt Rigid transformation, which will be calculated, in form:
|
||||
* { R_11 R_12 R_13 t_1
|
||||
* R_21 R_22 R_23 t_2
|
||||
* R_31 R_32 R_33 t_3
|
||||
* 0 0 0 1 }
|
||||
*/
|
||||
bool compute(const OdometryFrame& srcFrame, const OdometryFrame& dstFrame, OutputArray Rt);
|
||||
|
||||
class Impl;
|
||||
private:
|
||||
Ptr<Impl> impl;
|
||||
};
|
||||
|
||||
}
|
||||
#endif //OPENCV_3D_ODOMETRY_HPP
|
70
modules/3d/include/opencv2/3d/odometry_frame.hpp
Normal file
70
modules/3d/include/opencv2/3d/odometry_frame.hpp
Normal file
@ -0,0 +1,70 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef OPENCV_3D_ODOMETRY_FRAME_HPP
|
||||
#define OPENCV_3D_ODOMETRY_FRAME_HPP
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
/** Indicates what pyramid is to access using get/setPyramid... methods:
|
||||
* @param PYR_IMAGE The pyramid of RGB images
|
||||
* @param PYR_DEPTH The pyramid of depth images
|
||||
* @param PYR_MASK The pyramid of masks
|
||||
* @param PYR_CLOUD The pyramid of point clouds, produced from the pyramid of depths
|
||||
* @param PYR_DIX The pyramid of dI/dx derivative images
|
||||
* @param PYR_DIY The pyramid of dI/dy derivative images
|
||||
* @param PYR_TEXMASK The pyramid of textured masks
|
||||
* @param PYR_NORM The pyramid of normals
|
||||
* @param PYR_NORMMASK The pyramid of normals masks
|
||||
*/
|
||||
|
||||
enum OdometryFramePyramidType
|
||||
{
|
||||
PYR_IMAGE = 0,
|
||||
PYR_DEPTH = 1,
|
||||
PYR_MASK = 2,
|
||||
PYR_CLOUD = 3,
|
||||
PYR_DIX = 4,
|
||||
PYR_DIY = 5,
|
||||
PYR_TEXMASK = 6,
|
||||
PYR_NORM = 7,
|
||||
PYR_NORMMASK = 8,
|
||||
N_PYRAMIDS
|
||||
};
|
||||
|
||||
enum class OdometryFrameStoreType
|
||||
{
|
||||
MAT = 0,
|
||||
UMAT = 1
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W OdometryFrame
|
||||
{
|
||||
public:
|
||||
OdometryFrame();
|
||||
OdometryFrame(OdometryFrameStoreType matType);
|
||||
~OdometryFrame() {};
|
||||
void setImage(InputArray image);
|
||||
void getImage(OutputArray image) const;
|
||||
void getGrayImage(OutputArray image) const;
|
||||
void setDepth(InputArray depth);
|
||||
void getDepth(OutputArray depth) const;
|
||||
void setMask(InputArray mask);
|
||||
void getMask(OutputArray mask) const;
|
||||
void setNormals(InputArray normals);
|
||||
void getNormals(OutputArray normals) const;
|
||||
void setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype);
|
||||
void setPyramidLevels(size_t _nLevels);
|
||||
size_t getPyramidLevels(OdometryFramePyramidType oftype) const;
|
||||
void setPyramidAt(InputArray img, OdometryFramePyramidType pyrType, size_t level);
|
||||
void getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const;
|
||||
|
||||
class Impl;
|
||||
private:
|
||||
Ptr<Impl> impl;
|
||||
};
|
||||
}
|
||||
#endif // !OPENCV_3D_ODOMETRY_FRAME_HPP
|
58
modules/3d/include/opencv2/3d/odometry_settings.hpp
Normal file
58
modules/3d/include/opencv2/3d/odometry_settings.hpp
Normal file
@ -0,0 +1,58 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef OPENCV_3D_ODOMETRY_SETTINGS_HPP
|
||||
#define OPENCV_3D_ODOMETRY_SETTINGS_HPP
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class CV_EXPORTS_W OdometrySettings
|
||||
{
|
||||
public:
|
||||
OdometrySettings();
|
||||
~OdometrySettings() {};
|
||||
void setCameraMatrix(InputArray val);
|
||||
void getCameraMatrix(OutputArray val) const;
|
||||
void setIterCounts(InputArray val);
|
||||
void getIterCounts(OutputArray val) const;
|
||||
|
||||
void setMinDepth(float val);
|
||||
float getMinDepth() const;
|
||||
void setMaxDepth(float val);
|
||||
float getMaxDepth() const;
|
||||
void setMaxDepthDiff(float val);
|
||||
float getMaxDepthDiff() const;
|
||||
void setMaxPointsPart(float val);
|
||||
float getMaxPointsPart() const;
|
||||
|
||||
void setSobelSize(int val);
|
||||
int getSobelSize() const;
|
||||
void setSobelScale(double val);
|
||||
double getSobelScale() const;
|
||||
void setNormalWinSize(int val);
|
||||
int getNormalWinSize() const;
|
||||
|
||||
void setAngleThreshold(float val);
|
||||
float getAngleThreshold() const;
|
||||
void setMaxTranslation(float val);
|
||||
float getMaxTranslation() const;
|
||||
void setMaxRotation(float val);
|
||||
float getMaxRotation() const;
|
||||
|
||||
void setMinGradientMagnitude(float val);
|
||||
float getMinGradientMagnitude() const;
|
||||
void setMinGradientMagnitudes(InputArray val);
|
||||
void getMinGradientMagnitudes(OutputArray val) const;
|
||||
|
||||
class Impl;
|
||||
|
||||
private:
|
||||
Ptr<Impl> impl;
|
||||
};
|
||||
|
||||
}
|
||||
#endif //OPENCV_3D_ODOMETRY_SETTINGS_HPP
|
@ -80,8 +80,14 @@ int main(int argc, char** argv)
|
||||
if( !file.is_open() )
|
||||
return -1;
|
||||
|
||||
char dlmrt = '/';
|
||||
size_t pos = filename.rfind(dlmrt);
|
||||
char dlmrt1 = '/';
|
||||
char dlmrt2 = '\\';
|
||||
|
||||
size_t pos1 = filename.rfind(dlmrt1);
|
||||
size_t pos2 = filename.rfind(dlmrt2);
|
||||
size_t pos = pos1 < pos2 ? pos1 : pos2;
|
||||
char dlmrt = pos1 < pos2 ? dlmrt1 : dlmrt2;
|
||||
|
||||
string dirname = pos == string::npos ? "" : filename.substr(0, pos) + dlmrt;
|
||||
|
||||
const int timestampLength = 17;
|
||||
@ -104,15 +110,26 @@ int main(int argc, char** argv)
|
||||
cameraMatrix.at<float>(1,2) = cy;
|
||||
}
|
||||
|
||||
Ptr<OdometryFrame> frame_prev = OdometryFrame::create(),
|
||||
frame_curr = OdometryFrame::create();
|
||||
Ptr<Odometry> odometry = Odometry::createFromName(string(argv[3]) + "Odometry");
|
||||
if(odometry.empty())
|
||||
OdometrySettings ods;
|
||||
ods.setCameraMatrix(cameraMatrix);
|
||||
Odometry odometry;
|
||||
String odname = string(argv[3]);
|
||||
if (odname == "Rgbd")
|
||||
odometry = Odometry(OdometryType::RGB, ods, OdometryAlgoType::COMMON);
|
||||
else if (odname == "ICP")
|
||||
odometry = Odometry(OdometryType::DEPTH, ods, OdometryAlgoType::COMMON);
|
||||
else if (odname == "RgbdICP")
|
||||
odometry = Odometry(OdometryType::RGB_DEPTH, ods, OdometryAlgoType::COMMON);
|
||||
else if (odname == "FastICP")
|
||||
odometry = Odometry(OdometryType::DEPTH, ods, OdometryAlgoType::FAST);
|
||||
else
|
||||
{
|
||||
cout << "Can not create Odometry algorithm. Check the passed odometry name." << endl;
|
||||
std::cout << "Can not create Odometry algorithm. Check the passed odometry name." << std::endl;
|
||||
return -1;
|
||||
}
|
||||
odometry->setCameraMatrix(cameraMatrix);
|
||||
|
||||
OdometryFrame frame_prev = odometry.createOdometryFrame(),
|
||||
frame_curr = odometry.createOdometryFrame();
|
||||
|
||||
TickMeter gtm;
|
||||
int count = 0;
|
||||
@ -141,8 +158,6 @@ int main(int argc, char** argv)
|
||||
CV_Assert(!depth.empty());
|
||||
CV_Assert(depth.type() == CV_16UC1);
|
||||
|
||||
cout << i << " " << rgbFilename << " " << depthFilename << endl;
|
||||
|
||||
// scale depth
|
||||
Mat depth_flt;
|
||||
depth.convertTo(depth_flt, CV_32FC1, 1.f/5000.f);
|
||||
@ -167,8 +182,8 @@ int main(int argc, char** argv)
|
||||
{
|
||||
Mat gray;
|
||||
cvtColor(image, gray, COLOR_BGR2GRAY);
|
||||
frame_curr->setImage(gray);
|
||||
frame_curr->setDepth(depth);
|
||||
frame_curr.setImage(gray);
|
||||
frame_curr.setDepth(depth);
|
||||
|
||||
Mat Rt;
|
||||
if(!Rts.empty())
|
||||
@ -176,7 +191,8 @@ int main(int argc, char** argv)
|
||||
TickMeter tm;
|
||||
tm.start();
|
||||
gtm.start();
|
||||
bool res = odometry->compute(frame_curr, frame_prev, Rt);
|
||||
odometry.prepareFrames(frame_curr, frame_prev);
|
||||
bool res = odometry.compute(frame_curr, frame_prev, Rt);
|
||||
gtm.stop();
|
||||
tm.stop();
|
||||
count++;
|
||||
@ -197,9 +213,11 @@ int main(int argc, char** argv)
|
||||
Rts.push_back( prevRt * Rt );
|
||||
}
|
||||
|
||||
if (!frame_prev.empty())
|
||||
frame_prev.release();
|
||||
std::swap(frame_prev, frame_curr);
|
||||
//if (!frame_prev.empty())
|
||||
// frame_prev.release();
|
||||
frame_prev = frame_curr;
|
||||
frame_curr = odometry.createOdometryFrame();
|
||||
//std::swap(frame_prev, frame_curr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ static void depthTo3d_from_uvz(const cv::Mat& in_K, const cv::Mat& u_mat, const
|
||||
float cx = K(0, 2);
|
||||
float cy = K(1, 2);
|
||||
|
||||
std::vector<cv::Mat> coordinates(3);
|
||||
std::vector<cv::Mat> coordinates(4);
|
||||
|
||||
coordinates[0] = (u_mat - cx) / fx;
|
||||
|
||||
@ -46,6 +46,7 @@ static void depthTo3d_from_uvz(const cv::Mat& in_K, const cv::Mat& u_mat, const
|
||||
coordinates[0] = coordinates[0].mul(z_mat);
|
||||
coordinates[1] = (v_mat - cy).mul(z_mat) * (1. / fy);
|
||||
coordinates[2] = z_mat;
|
||||
coordinates[3] = 0;
|
||||
cv::merge(coordinates, points3d);
|
||||
}
|
||||
|
||||
@ -86,7 +87,7 @@ static void depthTo3dMask(const cv::Mat& depth, const cv::Mat& K, const cv::Mat&
|
||||
z_mat.resize(n_points);
|
||||
|
||||
depthTo3d_from_uvz(K, u_mat, v_mat, z_mat, points3d);
|
||||
points3d = points3d.reshape(3, 1);
|
||||
points3d = points3d.reshape(4, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -120,7 +121,7 @@ void depthTo3dNoMask(const cv::Mat& in_depth, const cv::Mat_<T>& K, cv::Mat& poi
|
||||
y_cache_ptr = y_cache[0];
|
||||
for (int y = 0; y < in_depth.rows; ++y, ++y_cache_ptr)
|
||||
{
|
||||
cv::Vec<T, 3>* point = points3d.ptr<cv::Vec<T, 3> >(y);
|
||||
cv::Vec<T, 4>* point = points3d.ptr<cv::Vec<T, 4> >(y);
|
||||
const T* x_cache_ptr_end = x_cache[0] + in_depth.cols;
|
||||
const T* depth = z_mat[y];
|
||||
for (x_cache_ptr = x_cache[0]; x_cache_ptr != x_cache_ptr_end; ++x_cache_ptr, ++point, ++depth)
|
||||
@ -129,6 +130,7 @@ void depthTo3dNoMask(const cv::Mat& in_depth, const cv::Mat_<T>& K, cv::Mat& poi
|
||||
(*point)[0] = (*x_cache_ptr) * z;
|
||||
(*point)[1] = (*y_cache_ptr) * z;
|
||||
(*point)[2] = z;
|
||||
(*point)[3] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -170,7 +172,7 @@ void depthTo3dSparse(InputArray depth_in, InputArray K_in, InputArray points_in,
|
||||
std::vector<cv::Mat> channels(2);
|
||||
cv::split(points_float, channels);
|
||||
|
||||
points3d_out.create(channels[0].rows, channels[0].cols, CV_32FC3);
|
||||
points3d_out.create(channels[0].rows, channels[0].cols, CV_32FC4);
|
||||
cv::Mat points3d = points3d_out.getMat();
|
||||
depthTo3d_from_uvz(K_in.getMat(), channels[0], channels[1], z_mat, points3d);
|
||||
}
|
||||
@ -200,12 +202,12 @@ void depthTo3d(InputArray depth_in, InputArray K_in, OutputArray points3d_out, I
|
||||
{
|
||||
cv::Mat points3d;
|
||||
depthTo3dMask(depth, K_new, mask, points3d);
|
||||
points3d_out.create(points3d.size(), CV_MAKETYPE(K_new.depth(), 3));
|
||||
points3d_out.create(points3d.size(), CV_MAKETYPE(K_new.depth(), 4));
|
||||
points3d.copyTo(points3d_out.getMat());
|
||||
}
|
||||
else
|
||||
{
|
||||
points3d_out.create(depth.size(), CV_MAKETYPE(K_new.depth(), 3));
|
||||
points3d_out.create(depth.size(), CV_MAKETYPE(K_new.depth(), 4));
|
||||
cv::Mat points3d = points3d_out.getMat();
|
||||
if (K_new.depth() == CV_64F)
|
||||
depthTo3dNoMask<double>(depth, K_new, points3d);
|
||||
|
@ -1,656 +0,0 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
// Partially rewritten from https://github.com/Nerei/kinfu_remake
|
||||
// Copyright(c) 2012, Anatoly Baksheev. All rights reserved.
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "fast_icp.hpp"
|
||||
#include "opencl_kernels_3d.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace cv {
|
||||
namespace kinfu {
|
||||
|
||||
enum
|
||||
{
|
||||
UTSIZE = 27
|
||||
};
|
||||
|
||||
ICP::ICP(const Matx33f _intrinsics, const std::vector<int>& _iterations, float _angleThreshold, float _distanceThreshold) :
|
||||
iterations(_iterations), angleThreshold(_angleThreshold), distanceThreshold(_distanceThreshold),
|
||||
intrinsics(_intrinsics)
|
||||
{ }
|
||||
|
||||
class ICPImpl : public ICP
|
||||
{
|
||||
public:
|
||||
ICPImpl(const cv::Matx33f _intrinsics, const std::vector<int> &_iterations, float _angleThreshold, float _distanceThreshold);
|
||||
|
||||
virtual bool estimateTransform(cv::Affine3f& transform,
|
||||
InputArray oldPoints, InputArray oldNormals,
|
||||
InputArray newPoints, InputArray newNormals
|
||||
) const override;
|
||||
template < typename T >
|
||||
bool estimateTransformT(cv::Affine3f& transform,
|
||||
const vector<T>& oldPoints, const vector<T>& oldNormals,
|
||||
const vector<T>& newPoints, const vector<T>& newNormals
|
||||
) const;
|
||||
|
||||
virtual ~ICPImpl() { }
|
||||
|
||||
template < typename T >
|
||||
void getAb(const T& oldPts, const T& oldNrm, const T& newPts, const T& newNrm,
|
||||
cv::Affine3f pose, int level, cv::Matx66f& A, cv::Vec6f& b) const;
|
||||
|
||||
private:
|
||||
|
||||
mutable vector<UMat> groupedSumBuffers;
|
||||
|
||||
};
|
||||
|
||||
ICPImpl::ICPImpl(const Matx33f _intrinsics, const std::vector<int> &_iterations, float _angleThreshold, float _distanceThreshold) :
|
||||
ICP(_intrinsics, _iterations, _angleThreshold, _distanceThreshold),
|
||||
groupedSumBuffers(_iterations.size())
|
||||
{ }
|
||||
|
||||
|
||||
bool ICPImpl::estimateTransform(cv::Affine3f& transform,
|
||||
InputArray _oldPoints, InputArray _oldNormals,
|
||||
InputArray _newPoints, InputArray _newNormals
|
||||
) const
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
CV_Assert(_oldPoints.size() == _oldNormals.size());
|
||||
CV_Assert(_newPoints.size() == _newNormals.size());
|
||||
CV_Assert(_oldPoints.size() == _newPoints.size());
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
if(cv::ocl::isOpenCLActivated() &&
|
||||
_oldPoints.isUMatVector() && _oldNormals.isUMatVector() &&
|
||||
_newPoints.isUMatVector() && _newNormals.isUMatVector())
|
||||
{
|
||||
std::vector<UMat> op, np, on, nn;
|
||||
_oldPoints.getUMatVector(op);
|
||||
_newPoints.getUMatVector(np);
|
||||
_oldNormals.getUMatVector(on);
|
||||
_newNormals.getUMatVector(nn);
|
||||
return estimateTransformT<UMat>(transform, op, on, np, nn);
|
||||
}
|
||||
#endif
|
||||
|
||||
std::vector<Mat> op, on, np, nn;
|
||||
_oldPoints.getMatVector(op);
|
||||
_newPoints.getMatVector(np);
|
||||
_oldNormals.getMatVector(on);
|
||||
_newNormals.getMatVector(nn);
|
||||
return estimateTransformT<Mat>(transform, op, on, np, nn);
|
||||
}
|
||||
|
||||
template < typename T >
|
||||
bool ICPImpl::estimateTransformT(cv::Affine3f& transform,
|
||||
const vector<T>& oldPoints, const vector<T>& oldNormals,
|
||||
const vector<T>& newPoints, const vector<T>& newNormals
|
||||
) const
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
transform = Affine3f::Identity();
|
||||
for(size_t l = 0; l < iterations.size(); l++)
|
||||
{
|
||||
size_t level = iterations.size() - 1 - l;
|
||||
|
||||
const T& oldPts = oldPoints [level], newPts = newPoints [level];
|
||||
const T& oldNrm = oldNormals[level], newNrm = newNormals[level];
|
||||
|
||||
for(int iter = 0; iter < iterations[level]; iter++)
|
||||
{
|
||||
Matx66f A;
|
||||
Vec6f b;
|
||||
|
||||
getAb(oldPts, oldNrm, newPts, newNrm, transform, (int)level, A, b);
|
||||
|
||||
double det = cv::determinant(A);
|
||||
|
||||
if (abs (det) < 1e-15 || cvIsNaN(det))
|
||||
return false;
|
||||
|
||||
Vec6f x;
|
||||
// theoretically, any method of solving is applicable
|
||||
// since there are usual least square matrices
|
||||
solve(A, b, x, DECOMP_SVD);
|
||||
Affine3f tinc(Vec3f(x.val), Vec3f(x.val+3));
|
||||
transform = tinc * transform;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
///////// CPU implementation /////////
|
||||
|
||||
// 1 any coord to check is enough since we know the generation
|
||||
|
||||
|
||||
#if USE_INTRINSICS
|
||||
static inline bool fastCheck(const v_float32x4& p0, const v_float32x4& p1)
|
||||
{
|
||||
float check = (p0.get0() + p1.get0());
|
||||
return !cvIsNaN(check);
|
||||
}
|
||||
|
||||
static inline void getCrossPerm(const v_float32x4& a, v_float32x4& yzx, v_float32x4& zxy)
|
||||
{
|
||||
v_uint32x4 aa = v_reinterpret_as_u32(a);
|
||||
v_uint32x4 yz00 = v_extract<1>(aa, v_setzero_u32());
|
||||
v_uint32x4 x0y0, tmp;
|
||||
v_zip(aa, v_setzero_u32(), x0y0, tmp);
|
||||
v_uint32x4 yzx0 = v_combine_low(yz00, x0y0);
|
||||
v_uint32x4 y000 = v_extract<2>(x0y0, v_setzero_u32());
|
||||
v_uint32x4 zx00 = v_extract<1>(yzx0, v_setzero_u32());
|
||||
zxy = v_reinterpret_as_f32(v_combine_low(zx00, y000));
|
||||
yzx = v_reinterpret_as_f32(yzx0);
|
||||
}
|
||||
|
||||
static inline v_float32x4 crossProduct(const v_float32x4& a, const v_float32x4& b)
|
||||
{
|
||||
v_float32x4 ayzx, azxy, byzx, bzxy;
|
||||
getCrossPerm(a, ayzx, azxy);
|
||||
getCrossPerm(b, byzx, bzxy);
|
||||
return ayzx*bzxy - azxy*byzx;
|
||||
}
|
||||
#else
|
||||
static inline bool fastCheck(const Point3f& p)
|
||||
{
|
||||
return !cvIsNaN(p.x);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
typedef Matx<float, 6, 7> ABtype;
|
||||
|
||||
struct GetAbInvoker : ParallelLoopBody
|
||||
{
|
||||
GetAbInvoker(ABtype& _globalAb, Mutex& _mtx,
|
||||
const Points& _oldPts, const Normals& _oldNrm, const Points& _newPts, const Normals& _newNrm,
|
||||
Affine3f _pose, Intr::Projector _proj, float _sqDistanceThresh, float _minCos) :
|
||||
ParallelLoopBody(),
|
||||
globalSumAb(_globalAb), mtx(_mtx),
|
||||
oldPts(_oldPts), oldNrm(_oldNrm), newPts(_newPts), newNrm(_newNrm), pose(_pose),
|
||||
proj(_proj), sqDistanceThresh(_sqDistanceThresh), minCos(_minCos)
|
||||
{ }
|
||||
|
||||
virtual void operator ()(const Range& range) const override
|
||||
{
|
||||
#if USE_INTRINSICS
|
||||
CV_Assert(ptype::channels == 4);
|
||||
|
||||
const size_t utBufferSize = 9;
|
||||
float CV_DECL_ALIGNED(16) upperTriangle[utBufferSize*4];
|
||||
for(size_t i = 0; i < utBufferSize*4; i++)
|
||||
upperTriangle[i] = 0;
|
||||
// how values are kept in upperTriangle
|
||||
const int NA = 0;
|
||||
const size_t utPos[] =
|
||||
{
|
||||
0, 1, 2, 4, 5, 6, 3,
|
||||
NA, 9, 10, 12, 13, 14, 11,
|
||||
NA, NA, 18, 20, 21, 22, 19,
|
||||
NA, NA, NA, 24, 28, 30, 32,
|
||||
NA, NA, NA, NA, 25, 29, 33,
|
||||
NA, NA, NA, NA, NA, 26, 34
|
||||
};
|
||||
|
||||
const float (&pm)[16] = pose.matrix.val;
|
||||
v_float32x4 poseRot0(pm[0], pm[4], pm[ 8], 0);
|
||||
v_float32x4 poseRot1(pm[1], pm[5], pm[ 9], 0);
|
||||
v_float32x4 poseRot2(pm[2], pm[6], pm[10], 0);
|
||||
v_float32x4 poseTrans(pm[3], pm[7], pm[11], 0);
|
||||
|
||||
v_float32x4 vfxy(proj.fx, proj.fy, 0, 0), vcxy(proj.cx, proj.cy, 0, 0);
|
||||
v_float32x4 vframe((float)(oldPts.cols - 1), (float)(oldPts.rows - 1), 1.f, 1.f);
|
||||
|
||||
float sqThresh = sqDistanceThresh;
|
||||
float cosThresh = minCos;
|
||||
|
||||
for(int y = range.start; y < range.end; y++)
|
||||
{
|
||||
const CV_DECL_ALIGNED(16) float* newPtsRow = (const float*)newPts[y];
|
||||
const CV_DECL_ALIGNED(16) float* newNrmRow = (const float*)newNrm[y];
|
||||
|
||||
for(int x = 0; x < newPts.cols; x++)
|
||||
{
|
||||
v_float32x4 newP = v_load_aligned(newPtsRow + x*4);
|
||||
v_float32x4 newN = v_load_aligned(newNrmRow + x*4);
|
||||
|
||||
if(!fastCheck(newP, newN))
|
||||
continue;
|
||||
|
||||
//transform to old coord system
|
||||
newP = v_matmuladd(newP, poseRot0, poseRot1, poseRot2, poseTrans);
|
||||
newN = v_matmuladd(newN, poseRot0, poseRot1, poseRot2, v_setzero_f32());
|
||||
|
||||
//find correspondence by projecting the point
|
||||
v_float32x4 oldCoords;
|
||||
float pz = (v_reinterpret_as_f32(v_rotate_right<2>(v_reinterpret_as_u32(newP))).get0());
|
||||
// x, y, 0, 0
|
||||
oldCoords = v_muladd(newP/v_setall_f32(pz), vfxy, vcxy);
|
||||
|
||||
if(!v_check_all((oldCoords >= v_setzero_f32()) & (oldCoords < vframe)))
|
||||
continue;
|
||||
|
||||
// bilinearly interpolate oldPts and oldNrm under oldCoords point
|
||||
v_float32x4 oldP;
|
||||
v_float32x4 oldN;
|
||||
{
|
||||
v_int32x4 ixy = v_floor(oldCoords);
|
||||
v_float32x4 txy = oldCoords - v_cvt_f32(ixy);
|
||||
int xi = ixy.get0();
|
||||
int yi = v_rotate_right<1>(ixy).get0();
|
||||
v_float32x4 tx = v_setall_f32(txy.get0());
|
||||
txy = v_reinterpret_as_f32(v_rotate_right<1>(v_reinterpret_as_u32(txy)));
|
||||
v_float32x4 ty = v_setall_f32(txy.get0());
|
||||
|
||||
const float* prow0 = (const float*)oldPts[yi+0];
|
||||
const float* prow1 = (const float*)oldPts[yi+1];
|
||||
|
||||
v_float32x4 p00 = v_load(prow0 + (xi+0)*4);
|
||||
v_float32x4 p01 = v_load(prow0 + (xi+1)*4);
|
||||
v_float32x4 p10 = v_load(prow1 + (xi+0)*4);
|
||||
v_float32x4 p11 = v_load(prow1 + (xi+1)*4);
|
||||
|
||||
// do not fix missing data
|
||||
// NaN check is done later
|
||||
|
||||
const float* nrow0 = (const float*)oldNrm[yi+0];
|
||||
const float* nrow1 = (const float*)oldNrm[yi+1];
|
||||
|
||||
v_float32x4 n00 = v_load(nrow0 + (xi+0)*4);
|
||||
v_float32x4 n01 = v_load(nrow0 + (xi+1)*4);
|
||||
v_float32x4 n10 = v_load(nrow1 + (xi+0)*4);
|
||||
v_float32x4 n11 = v_load(nrow1 + (xi+1)*4);
|
||||
|
||||
// NaN check is done later
|
||||
|
||||
v_float32x4 p0 = p00 + tx*(p01 - p00);
|
||||
v_float32x4 p1 = p10 + tx*(p11 - p10);
|
||||
oldP = p0 + ty*(p1 - p0);
|
||||
|
||||
v_float32x4 n0 = n00 + tx*(n01 - n00);
|
||||
v_float32x4 n1 = n10 + tx*(n11 - n10);
|
||||
oldN = n0 + ty*(n1 - n0);
|
||||
}
|
||||
|
||||
bool oldPNcheck = fastCheck(oldP, oldN);
|
||||
|
||||
//filter by distance
|
||||
v_float32x4 diff = newP - oldP;
|
||||
bool distCheck = !(v_reduce_sum(diff*diff) > sqThresh);
|
||||
|
||||
//filter by angle
|
||||
bool angleCheck = !(abs(v_reduce_sum(newN*oldN)) < cosThresh);
|
||||
|
||||
if(!(oldPNcheck && distCheck && angleCheck))
|
||||
continue;
|
||||
|
||||
// build point-wise vector ab = [ A | b ]
|
||||
v_float32x4 VxNv = crossProduct(newP, oldN);
|
||||
Point3f VxN;
|
||||
VxN.x = VxNv.get0();
|
||||
VxN.y = v_reinterpret_as_f32(v_extract<1>(v_reinterpret_as_u32(VxNv), v_setzero_u32())).get0();
|
||||
VxN.z = v_reinterpret_as_f32(v_extract<2>(v_reinterpret_as_u32(VxNv), v_setzero_u32())).get0();
|
||||
|
||||
float dotp = -v_reduce_sum(oldN*diff);
|
||||
|
||||
// build point-wise upper-triangle matrix [ab^T * ab] w/o last row
|
||||
// which is [A^T*A | A^T*b]
|
||||
// and gather sum
|
||||
|
||||
v_float32x4 vd = VxNv | v_float32x4(0, 0, 0, dotp);
|
||||
v_float32x4 n = oldN;
|
||||
v_float32x4 nyzx;
|
||||
{
|
||||
v_uint32x4 aa = v_reinterpret_as_u32(n);
|
||||
v_uint32x4 yz00 = v_extract<1>(aa, v_setzero_u32());
|
||||
v_uint32x4 x0y0, tmp;
|
||||
v_zip(aa, v_setzero_u32(), x0y0, tmp);
|
||||
nyzx = v_reinterpret_as_f32(v_combine_low(yz00, x0y0));
|
||||
}
|
||||
|
||||
v_float32x4 vutg[utBufferSize];
|
||||
for(size_t i = 0; i < utBufferSize; i++)
|
||||
vutg[i] = v_load_aligned(upperTriangle + i*4);
|
||||
|
||||
int p = 0;
|
||||
v_float32x4 v;
|
||||
// vx * vd, vx * n
|
||||
v = v_setall_f32(VxN.x);
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(v, vd, vutg[p])); p++;
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(v, n, vutg[p])); p++;
|
||||
// vy * vd, vy * n
|
||||
v = v_setall_f32(VxN.y);
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(v, vd, vutg[p])); p++;
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(v, n, vutg[p])); p++;
|
||||
// vz * vd, vz * n
|
||||
v = v_setall_f32(VxN.z);
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(v, vd, vutg[p])); p++;
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(v, n, vutg[p])); p++;
|
||||
// nx^2, ny^2, nz^2
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(n, n, vutg[p])); p++;
|
||||
// nx*ny, ny*nz, nx*nz
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(n, nyzx, vutg[p])); p++;
|
||||
// nx*d, ny*d, nz*d
|
||||
v = v_setall_f32(dotp);
|
||||
v_store_aligned(upperTriangle + p*4, v_muladd(n, v, vutg[p])); p++;
|
||||
}
|
||||
}
|
||||
|
||||
ABtype sumAB = ABtype::zeros();
|
||||
for(int i = 0; i < 6; i++)
|
||||
{
|
||||
for(int j = i; j < 7; j++)
|
||||
{
|
||||
size_t p = utPos[i*7+j];
|
||||
sumAB(i, j) = upperTriangle[p];
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
float upperTriangle[UTSIZE];
|
||||
for(int i = 0; i < UTSIZE; i++)
|
||||
upperTriangle[i] = 0;
|
||||
|
||||
for(int y = range.start; y < range.end; y++)
|
||||
{
|
||||
const ptype* newPtsRow = newPts[y];
|
||||
const ptype* newNrmRow = newNrm[y];
|
||||
|
||||
for(int x = 0; x < newPts.cols; x++)
|
||||
{
|
||||
Point3f newP = fromPtype(newPtsRow[x]);
|
||||
Point3f newN = fromPtype(newNrmRow[x]);
|
||||
|
||||
Point3f oldP(nan3), oldN(nan3);
|
||||
|
||||
if(!(fastCheck(newP) && fastCheck(newN)))
|
||||
continue;
|
||||
|
||||
//transform to old coord system
|
||||
newP = pose * newP;
|
||||
newN = pose.rotation() * newN;
|
||||
|
||||
//find correspondence by projecting the point
|
||||
Point2f oldCoords = proj(newP);
|
||||
if(!(oldCoords.x >= 0 && oldCoords.x < oldPts.cols - 1 &&
|
||||
oldCoords.y >= 0 && oldCoords.y < oldPts.rows - 1))
|
||||
continue;
|
||||
|
||||
// bilinearly interpolate oldPts and oldNrm under oldCoords point
|
||||
int xi = cvFloor(oldCoords.x), yi = cvFloor(oldCoords.y);
|
||||
float tx = oldCoords.x - xi, ty = oldCoords.y - yi;
|
||||
|
||||
const ptype* prow0 = oldPts[yi+0];
|
||||
const ptype* prow1 = oldPts[yi+1];
|
||||
|
||||
Point3f p00 = fromPtype(prow0[xi+0]);
|
||||
Point3f p01 = fromPtype(prow0[xi+1]);
|
||||
Point3f p10 = fromPtype(prow1[xi+0]);
|
||||
Point3f p11 = fromPtype(prow1[xi+1]);
|
||||
|
||||
//do not fix missing data
|
||||
if(!(fastCheck(p00) && fastCheck(p01) &&
|
||||
fastCheck(p10) && fastCheck(p11)))
|
||||
continue;
|
||||
|
||||
const ptype* nrow0 = oldNrm[yi+0];
|
||||
const ptype* nrow1 = oldNrm[yi+1];
|
||||
|
||||
Point3f n00 = fromPtype(nrow0[xi+0]);
|
||||
Point3f n01 = fromPtype(nrow0[xi+1]);
|
||||
Point3f n10 = fromPtype(nrow1[xi+0]);
|
||||
Point3f n11 = fromPtype(nrow1[xi+1]);
|
||||
|
||||
if(!(fastCheck(n00) && fastCheck(n01) &&
|
||||
fastCheck(n10) && fastCheck(n11)))
|
||||
continue;
|
||||
|
||||
Point3f p0 = p00 + tx*(p01 - p00);
|
||||
Point3f p1 = p10 + tx*(p11 - p10);
|
||||
oldP = p0 + ty*(p1 - p0);
|
||||
|
||||
Point3f n0 = n00 + tx*(n01 - n00);
|
||||
Point3f n1 = n10 + tx*(n11 - n10);
|
||||
oldN = n0 + ty*(n1 - n0);
|
||||
|
||||
if(!(fastCheck(oldP) && fastCheck(oldN)))
|
||||
continue;
|
||||
|
||||
//filter by distance
|
||||
Point3f diff = newP - oldP;
|
||||
if(diff.dot(diff) > sqDistanceThresh)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
//filter by angle
|
||||
if(abs(newN.dot(oldN)) < minCos)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// build point-wise vector ab = [ A | b ]
|
||||
|
||||
//try to optimize
|
||||
Point3f VxN = newP.cross(oldN);
|
||||
float ab[7] = {VxN.x, VxN.y, VxN.z, oldN.x, oldN.y, oldN.z, oldN.dot(-diff)};
|
||||
// build point-wise upper-triangle matrix [ab^T * ab] w/o last row
|
||||
// which is [A^T*A | A^T*b]
|
||||
// and gather sum
|
||||
int pos = 0;
|
||||
for(int i = 0; i < 6; i++)
|
||||
{
|
||||
for(int j = i; j < 7; j++)
|
||||
{
|
||||
upperTriangle[pos++] += ab[i]*ab[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ABtype sumAB = ABtype::zeros();
|
||||
int pos = 0;
|
||||
for(int i = 0; i < 6; i++)
|
||||
{
|
||||
for(int j = i; j < 7; j++)
|
||||
{
|
||||
sumAB(i, j) = upperTriangle[pos++];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
AutoLock al(mtx);
|
||||
globalSumAb += sumAB;
|
||||
}
|
||||
|
||||
ABtype& globalSumAb;
|
||||
Mutex& mtx;
|
||||
const Points& oldPts;
|
||||
const Normals& oldNrm;
|
||||
const Points& newPts;
|
||||
const Normals& newNrm;
|
||||
Affine3f pose;
|
||||
const Intr::Projector proj;
|
||||
float sqDistanceThresh;
|
||||
float minCos;
|
||||
};
|
||||
|
||||
|
||||
template <>
|
||||
void ICPImpl::getAb<Mat>(const Mat& oldPts, const Mat& oldNrm, const Mat& newPts, const Mat& newNrm,
|
||||
cv::Affine3f pose, int level, cv::Matx66f& A, cv::Vec6f& b) const
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
CV_Assert(oldPts.size() == oldNrm.size());
|
||||
CV_Assert(newPts.size() == newNrm.size());
|
||||
|
||||
ABtype sumAB = ABtype::zeros();
|
||||
Mutex mutex;
|
||||
const Points op(oldPts), on(oldNrm);
|
||||
const Normals np(newPts), nn(newNrm);
|
||||
GetAbInvoker invoker(sumAB, mutex, op, on, np, nn, pose,
|
||||
intrinsics.scale(level).makeProjector(),
|
||||
distanceThreshold*distanceThreshold, std::cos(angleThreshold));
|
||||
Range range(0, newPts.rows);
|
||||
const int nstripes = -1;
|
||||
parallel_for_(range, invoker, nstripes);
|
||||
|
||||
// splitting AB matrix to A and b
|
||||
for(int i = 0; i < 6; i++)
|
||||
{
|
||||
// augment lower triangle of A by symmetry
|
||||
for(int j = i; j < 6; j++)
|
||||
{
|
||||
A(i, j) = A(j, i) = sumAB(i, j);
|
||||
}
|
||||
|
||||
b(i) = sumAB(i, 6);
|
||||
}
|
||||
}
|
||||
|
||||
///////// GPU implementation /////////
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
|
||||
template <>
|
||||
void ICPImpl::getAb<UMat>(const UMat& oldPts, const UMat& oldNrm, const UMat& newPts, const UMat& newNrm,
|
||||
Affine3f pose, int level, Matx66f &A, Vec6f &b) const
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
Size oldSize = oldPts.size(), newSize = newPts.size();
|
||||
CV_Assert(oldSize == oldNrm.size());
|
||||
CV_Assert(newSize == newNrm.size());
|
||||
|
||||
// calculate 1x7 vector ab to produce b and upper triangle of A:
|
||||
// [A|b] = ab*(ab^t)
|
||||
// and then reduce it across work groups
|
||||
|
||||
cv::String errorStr;
|
||||
ocl::ProgramSource source = ocl::_3d::icp_oclsrc;
|
||||
cv::String options = "-cl-mad-enable";
|
||||
ocl::Kernel k;
|
||||
k.create("getAb", source, options, &errorStr);
|
||||
|
||||
if(k.empty())
|
||||
throw std::runtime_error("Failed to create kernel: " + errorStr);
|
||||
|
||||
size_t globalSize[2];
|
||||
globalSize[0] = (size_t)newPts.cols;
|
||||
globalSize[1] = (size_t)newPts.rows;
|
||||
|
||||
const ocl::Device& device = ocl::Device::getDefault();
|
||||
size_t wgsLimit = device.maxWorkGroupSize();
|
||||
size_t memSize = device.localMemSize();
|
||||
// local memory should keep upperTriangles for all threads in group for reduce
|
||||
const size_t ltsz = UTSIZE*sizeof(float);
|
||||
const size_t lcols = 32;
|
||||
size_t lrows = min(memSize/ltsz, wgsLimit)/lcols;
|
||||
// round lrows down to 2^n
|
||||
lrows = roundDownPow2(lrows);
|
||||
size_t localSize[2] = {lcols, lrows};
|
||||
Size ngroups((int)divUp(globalSize[0], (unsigned int)localSize[0]),
|
||||
(int)divUp(globalSize[1], (unsigned int)localSize[1]));
|
||||
|
||||
// size of local buffer for group-wide reduce
|
||||
size_t lsz = localSize[0]*localSize[1]*ltsz;
|
||||
|
||||
Intr::Projector proj = intrinsics.scale(level).makeProjector();
|
||||
Vec2f fxy(proj.fx, proj.fy), cxy(proj.cx, proj.cy);
|
||||
|
||||
UMat& groupedSumGpu = groupedSumBuffers[level];
|
||||
groupedSumGpu.create(Size(ngroups.width*UTSIZE, ngroups.height),
|
||||
CV_32F);
|
||||
groupedSumGpu.setTo(0);
|
||||
|
||||
// TODO: optimization possible:
|
||||
// samplers instead of oldPts/oldNrm (mask needed)
|
||||
k.args(ocl::KernelArg::ReadOnlyNoSize(oldPts),
|
||||
ocl::KernelArg::ReadOnlyNoSize(oldNrm),
|
||||
oldSize,
|
||||
ocl::KernelArg::ReadOnlyNoSize(newPts),
|
||||
ocl::KernelArg::ReadOnlyNoSize(newNrm),
|
||||
newSize,
|
||||
ocl::KernelArg::Constant(pose.matrix.val,
|
||||
sizeof(pose.matrix.val)),
|
||||
fxy.val, cxy.val,
|
||||
distanceThreshold*distanceThreshold,
|
||||
std::cos(angleThreshold),
|
||||
ocl::KernelArg::Local(lsz),
|
||||
ocl::KernelArg::WriteOnlyNoSize(groupedSumGpu)
|
||||
);
|
||||
|
||||
if(!k.run(2, globalSize, localSize, true))
|
||||
throw std::runtime_error("Failed to run kernel");
|
||||
|
||||
float upperTriangle[UTSIZE];
|
||||
for(int i = 0; i < UTSIZE; i++)
|
||||
upperTriangle[i] = 0;
|
||||
|
||||
Mat groupedSumCpu = groupedSumGpu.getMat(ACCESS_READ);
|
||||
|
||||
for(int y = 0; y < ngroups.height; y++)
|
||||
{
|
||||
const float* rowr = groupedSumCpu.ptr<float>(y);
|
||||
for(int x = 0; x < ngroups.width; x++)
|
||||
{
|
||||
const float* p = rowr + x*UTSIZE;
|
||||
for(int j = 0; j < UTSIZE; j++)
|
||||
{
|
||||
upperTriangle[j] += p[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
groupedSumCpu.release();
|
||||
|
||||
ABtype sumAB = ABtype::zeros();
|
||||
int pos = 0;
|
||||
for(int i = 0; i < 6; i++)
|
||||
{
|
||||
for(int j = i; j < 7; j++)
|
||||
{
|
||||
sumAB(i, j) = upperTriangle[pos++];
|
||||
}
|
||||
}
|
||||
|
||||
// splitting AB matrix to A and b
|
||||
for(int i = 0; i < 6; i++)
|
||||
{
|
||||
// augment lower triangle of A by symmetry
|
||||
for(int j = i; j < 6; j++)
|
||||
{
|
||||
A(i, j) = A(j, i) = sumAB(i, j);
|
||||
}
|
||||
|
||||
b(i) = sumAB(i, 6);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
///
|
||||
|
||||
|
||||
cv::Ptr<ICP> makeICP(const cv::Intr _intrinsics, const std::vector<int> &_iterations,
|
||||
float _angleThreshold, float _distanceThreshold)
|
||||
{
|
||||
return makePtr<ICPImpl>(_intrinsics.getMat(), _iterations, _angleThreshold, _distanceThreshold);
|
||||
}
|
||||
|
||||
} // namespace kinfu
|
||||
} // namespace cv
|
@ -1,42 +0,0 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
// Partially rewritten from https://github.com/Nerei/kinfu_remake
|
||||
// Copyright(c) 2012, Anatoly Baksheev. All rights reserved.
|
||||
|
||||
#ifndef OPENCV_3D_FAST_ICP_HPP
|
||||
#define OPENCV_3D_FAST_ICP_HPP
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace kinfu {
|
||||
|
||||
class ICP
|
||||
{
|
||||
public:
|
||||
ICP(const cv::Matx33f _intrinsics, const std::vector<int> &_iterations, float _angleThreshold, float _distanceThreshold);
|
||||
|
||||
virtual bool estimateTransform(cv::Affine3f& transform,
|
||||
InputArray oldPoints, InputArray oldNormals,
|
||||
InputArray newPoints, InputArray newNormals
|
||||
) const = 0;
|
||||
virtual ~ICP() { }
|
||||
|
||||
protected:
|
||||
|
||||
std::vector<int> iterations;
|
||||
float angleThreshold;
|
||||
float distanceThreshold;
|
||||
cv::Intr intrinsics;
|
||||
};
|
||||
|
||||
cv::Ptr<ICP> makeICP(const cv::Intr _intrinsics, const std::vector<int> &_iterations,
|
||||
float _angleThreshold, float _distanceThreshold);
|
||||
|
||||
} // namespace kinfu
|
||||
} // namespace cv
|
||||
|
||||
#endif // include guard
|
@ -16,6 +16,11 @@ T inline norm_vec(const Vec<T, 3>& vec)
|
||||
{
|
||||
return std::sqrt(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]);
|
||||
}
|
||||
template<typename T>
|
||||
T inline norm_vec(const Vec<T, 4>& vec)
|
||||
{
|
||||
return std::sqrt(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]);
|
||||
}
|
||||
|
||||
/** Given 3d points, compute their distance to the origin
|
||||
* @param points
|
||||
@ -24,7 +29,7 @@ T inline norm_vec(const Vec<T, 3>& vec)
|
||||
template<typename T>
|
||||
Mat_<T> computeRadius(const Mat& points)
|
||||
{
|
||||
typedef Vec<T, 3> PointT;
|
||||
typedef Vec<T, 4> PointT;
|
||||
|
||||
// Compute the
|
||||
Size size(points.cols, points.rows);
|
||||
@ -54,7 +59,8 @@ void computeThetaPhi(int rows, int cols, const Matx<T, 3, 3>& K, Mat& cos_theta,
|
||||
Mat points3d;
|
||||
depthTo3d(depth_image, Mat(K), points3d);
|
||||
|
||||
typedef Vec<T, 3> Vec3T;
|
||||
//typedef Vec<T, 3> Vec3T;
|
||||
typedef Vec<T, 4> Vec4T;
|
||||
|
||||
cos_theta = Mat_<T>(rows, cols);
|
||||
sin_theta = Mat_<T>(rows, cols);
|
||||
@ -65,8 +71,8 @@ void computeThetaPhi(int rows, int cols, const Matx<T, 3, 3>& K, Mat& cos_theta,
|
||||
{
|
||||
T* row_cos_theta = cos_theta.ptr <T>(y), * row_sin_theta = sin_theta.ptr <T>(y);
|
||||
T* row_cos_phi = cos_phi.ptr <T>(y), * row_sin_phi = sin_phi.ptr <T>(y);
|
||||
const Vec3T* row_points = points3d.ptr <Vec3T>(y),
|
||||
* row_points_end = points3d.ptr <Vec3T>(y) + points3d.cols;
|
||||
const Vec4T* row_points = points3d.ptr <Vec4T>(y),
|
||||
* row_points_end = points3d.ptr <Vec4T>(y) + points3d.cols;
|
||||
const T* row_r = r.ptr < T >(y);
|
||||
for (; row_points < row_points_end;
|
||||
++row_cos_theta, ++row_sin_theta, ++row_cos_phi, ++row_sin_phi, ++row_points, ++row_r)
|
||||
@ -100,6 +106,20 @@ inline void signNormal(const Vec<T, 3>& normal_in, Vec<T, 3>& normal_out)
|
||||
normal_out[1] = res[1];
|
||||
normal_out[2] = res[2];
|
||||
}
|
||||
template<typename T>
|
||||
inline void signNormal(const Vec<T, 3>& normal_in, Vec<T, 4>& normal_out)
|
||||
{
|
||||
Vec<T, 3> res;
|
||||
if (normal_in[2] > 0)
|
||||
res = -normal_in / norm_vec(normal_in);
|
||||
else
|
||||
res = normal_in / norm_vec(normal_in);
|
||||
|
||||
normal_out[0] = res[0];
|
||||
normal_out[1] = res[1];
|
||||
normal_out[2] = res[2];
|
||||
normal_out[3] = 0;
|
||||
}
|
||||
|
||||
/** Modify normals to make sure they point towards the camera
|
||||
* @param normals
|
||||
@ -121,6 +141,25 @@ inline void signNormal(T a, T b, T c, Vec<T, 3>& normal)
|
||||
normal[2] = c * norm;
|
||||
}
|
||||
}
|
||||
template<typename T>
|
||||
inline void signNormal(T a, T b, T c, Vec<T, 4>& normal)
|
||||
{
|
||||
T norm = 1 / std::sqrt(a * a + b * b + c * c);
|
||||
if (c > 0)
|
||||
{
|
||||
normal[0] = -a * norm;
|
||||
normal[1] = -b * norm;
|
||||
normal[2] = -c * norm;
|
||||
normal[3] = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
normal[0] = a * norm;
|
||||
normal[1] = b * norm;
|
||||
normal[2] = c * norm;
|
||||
normal[3] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@ -227,7 +266,7 @@ public:
|
||||
calcRadiusAnd3d(points3d_ori, points3d, radius);
|
||||
|
||||
// Get the normals
|
||||
normals_out.create(points3d_ori.size(), CV_MAKETYPE(dtype, 3));
|
||||
normals_out.create(points3d_ori.size(), CV_MAKETYPE(dtype, 4));
|
||||
if (points3d_in.empty())
|
||||
return;
|
||||
|
||||
@ -262,6 +301,7 @@ class FALS : public RgbdNormalsImpl<T>
|
||||
public:
|
||||
typedef Matx<T, 3, 3> Mat33T;
|
||||
typedef Vec<T, 9> Vec9T;
|
||||
typedef Vec<T, 4> Vec4T;
|
||||
typedef Vec<T, 3> Vec3T;
|
||||
|
||||
FALS(int _rows, int _cols, int _windowSize, const Mat& _K) :
|
||||
@ -345,13 +385,15 @@ public:
|
||||
row_r = r.ptr < T >(0);
|
||||
const Vec3T* B_vec = B[0];
|
||||
const Mat33T* M_inv = reinterpret_cast<const Mat33T*>(M_inv_.ptr(0));
|
||||
Vec3T* normal = normals.ptr<Vec3T>(0);
|
||||
//Vec3T* normal = normals.ptr<Vec3T>(0);
|
||||
Vec4T* normal = normals.ptr<Vec4T>(0);
|
||||
for (; row_r != row_r_end; ++row_r, ++B_vec, ++normal, ++M_inv)
|
||||
if (cvIsNaN(*row_r))
|
||||
{
|
||||
(*normal)[0] = *row_r;
|
||||
(*normal)[1] = *row_r;
|
||||
(*normal)[2] = *row_r;
|
||||
(*normal)[3] = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -366,7 +408,8 @@ public:
|
||||
|
||||
virtual void assertOnBadArg(const Mat& points3d_ori) const CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(points3d_ori.channels() == 3);
|
||||
//CV_Assert(points3d_ori.channels() == 3);
|
||||
CV_Assert(points3d_ori.channels() == 4);
|
||||
CV_Assert(points3d_ori.depth() == CV_32F || points3d_ori.depth() == CV_64F);
|
||||
}
|
||||
|
||||
@ -401,6 +444,7 @@ template<typename T>
|
||||
class LINEMOD : public RgbdNormalsImpl<T>
|
||||
{
|
||||
public:
|
||||
typedef Vec<T, 4> Vec4T;
|
||||
typedef Vec<T, 3> Vec3T;
|
||||
typedef Matx<T, 3, 3> Mat33T;
|
||||
|
||||
@ -421,7 +465,8 @@ public:
|
||||
{
|
||||
// Only focus on the depth image for LINEMOD
|
||||
Mat depth_in;
|
||||
if (points3d.channels() == 3)
|
||||
//if (points3d.channels() == 3)
|
||||
if (points3d.channels() == 4)
|
||||
{
|
||||
std::vector<Mat> channels;
|
||||
split(points3d, channels);
|
||||
@ -496,7 +541,7 @@ public:
|
||||
for (int y = r; y < this->rows - r - 1; ++y)
|
||||
{
|
||||
const DepthDepth* p_line = reinterpret_cast<const DepthDepth*>(depthIn.ptr(y, r));
|
||||
Vec3T* normal = normals.ptr<Vec3T>(y, r);
|
||||
Vec4T* normal = normals.ptr<Vec4T>(y, r);
|
||||
|
||||
for (int x = r; x < this->cols - r - 1; ++x)
|
||||
{
|
||||
@ -548,7 +593,7 @@ public:
|
||||
|
||||
virtual void assertOnBadArg(const Mat& points3d_ori) const CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(((points3d_ori.channels() == 3) && (points3d_ori.depth() == CV_32F || points3d_ori.depth() == CV_64F)) ||
|
||||
CV_Assert(((points3d_ori.channels() == 4) && (points3d_ori.depth() == CV_32F || points3d_ori.depth() == CV_64F)) ||
|
||||
((points3d_ori.channels() == 1) && (points3d_ori.depth() == CV_16U || points3d_ori.depth() == CV_32F || points3d_ori.depth() == CV_64F)));
|
||||
}
|
||||
|
||||
@ -569,6 +614,7 @@ class SRI : public RgbdNormalsImpl<T>
|
||||
public:
|
||||
typedef Matx<T, 3, 3> Mat33T;
|
||||
typedef Vec<T, 9> Vec9T;
|
||||
typedef Vec<T, 4> Vec4T;
|
||||
typedef Vec<T, 3> Vec3T;
|
||||
|
||||
SRI(int _rows, int _cols, int _windowSize, const Mat& _K) :
|
||||
@ -679,13 +725,13 @@ public:
|
||||
sepFilter2D(r, r_phi, r.depth(), kx_dy_, ky_dy_);
|
||||
|
||||
// Fill the result matrix
|
||||
Mat_<Vec3T> normals(this->rows, this->cols);
|
||||
Mat_<Vec4T> normals(this->rows, this->cols);
|
||||
|
||||
const T* r_theta_ptr = r_theta[0], * r_theta_ptr_end = r_theta_ptr + this->rows * this->cols;
|
||||
const T* r_phi_ptr = r_phi[0];
|
||||
const Mat33T* R = reinterpret_cast<const Mat33T*>(R_hat_[0]);
|
||||
const T* r_ptr = r[0];
|
||||
Vec3T* normal = normals[0];
|
||||
Vec4T* normal = normals[0];
|
||||
for (; r_theta_ptr != r_theta_ptr_end; ++r_theta_ptr, ++r_phi_ptr, ++R, ++r_ptr, ++normal)
|
||||
{
|
||||
if (cvIsNaN(*r_ptr))
|
||||
@ -693,6 +739,7 @@ public:
|
||||
(*normal)[0] = *r_ptr;
|
||||
(*normal)[1] = *r_ptr;
|
||||
(*normal)[2] = *r_ptr;
|
||||
(*normal)[3] = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -706,15 +753,15 @@ public:
|
||||
}
|
||||
|
||||
remap(normals, normals_out, invxy_, invfxy_, INTER_LINEAR);
|
||||
normal = normals_out.ptr<Vec3T>(0);
|
||||
Vec3T* normal_end = normal + this->rows * this->cols;
|
||||
normal = normals_out.ptr<Vec4T>(0);
|
||||
Vec4T* normal_end = normal + this->rows * this->cols;
|
||||
for (; normal != normal_end; ++normal)
|
||||
signNormal((*normal)[0], (*normal)[1], (*normal)[2], *normal);
|
||||
}
|
||||
|
||||
virtual void assertOnBadArg(const Mat& points3d_ori) const CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(((points3d_ori.channels() == 3) && (points3d_ori.depth() == CV_32F || points3d_ori.depth() == CV_64F)));
|
||||
CV_Assert(((points3d_ori.channels() == 4) && (points3d_ori.depth() == CV_32F || points3d_ori.depth() == CV_64F)));
|
||||
}
|
||||
|
||||
// Cached data
|
||||
|
File diff suppressed because it is too large
Load Diff
251
modules/3d/src/rgbd/odometry_frame_impl.cpp
Normal file
251
modules/3d/src/rgbd/odometry_frame_impl.cpp
Normal file
@ -0,0 +1,251 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "../precomp.hpp"
|
||||
|
||||
#include <opencv2/core/ocl.hpp>
|
||||
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class OdometryFrame::Impl
|
||||
{
|
||||
public:
|
||||
Impl() {}
|
||||
virtual ~Impl() {}
|
||||
virtual void setImage(InputArray image) = 0;
|
||||
virtual void getImage(OutputArray image) const = 0;
|
||||
virtual void getGrayImage(OutputArray image) const = 0;
|
||||
virtual void setDepth(InputArray depth) = 0;
|
||||
virtual void getDepth(OutputArray depth) const = 0;
|
||||
virtual void setMask(InputArray mask) = 0;
|
||||
virtual void getMask(OutputArray mask) const = 0;
|
||||
virtual void setNormals(InputArray normals) = 0;
|
||||
virtual void getNormals(OutputArray normals) const = 0;
|
||||
virtual void setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype) = 0;
|
||||
virtual void setPyramidLevels(size_t _nLevels) = 0;
|
||||
virtual size_t getPyramidLevels(OdometryFramePyramidType oftype) const = 0;
|
||||
virtual void setPyramidAt(InputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) = 0;
|
||||
virtual void getPyramidAt(OutputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) const = 0;
|
||||
};
|
||||
|
||||
template<typename TMat>
|
||||
class OdometryFrameImplTMat : public OdometryFrame::Impl
|
||||
{
|
||||
public:
|
||||
OdometryFrameImplTMat();
|
||||
~OdometryFrameImplTMat() {};
|
||||
|
||||
virtual void setImage(InputArray image) override;
|
||||
virtual void getImage(OutputArray image) const override;
|
||||
virtual void getGrayImage(OutputArray image) const override;
|
||||
virtual void setDepth(InputArray depth) override;
|
||||
virtual void getDepth(OutputArray depth) const override;
|
||||
virtual void setMask(InputArray mask) override;
|
||||
virtual void getMask(OutputArray mask) const override;
|
||||
virtual void setNormals(InputArray normals) override;
|
||||
virtual void getNormals(OutputArray normals) const override;
|
||||
virtual void setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype) override;
|
||||
virtual void setPyramidLevels(size_t _nLevels) override;
|
||||
virtual size_t getPyramidLevels(OdometryFramePyramidType oftype) const override;
|
||||
virtual void setPyramidAt(InputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) override;
|
||||
virtual void getPyramidAt(OutputArray img,
|
||||
OdometryFramePyramidType pyrType, size_t level) const override;
|
||||
|
||||
private:
|
||||
void findMask(InputArray image);
|
||||
|
||||
TMat image;
|
||||
TMat imageGray;
|
||||
TMat depth;
|
||||
TMat mask;
|
||||
TMat normals;
|
||||
std::vector< std::vector<TMat> > pyramids;
|
||||
};
|
||||
|
||||
OdometryFrame::OdometryFrame()
|
||||
{
|
||||
this->impl = makePtr<OdometryFrameImplTMat<Mat>>();
|
||||
};
|
||||
|
||||
OdometryFrame::OdometryFrame(OdometryFrameStoreType matType)
|
||||
{
|
||||
if (matType == OdometryFrameStoreType::UMAT)
|
||||
this->impl = makePtr<OdometryFrameImplTMat<UMat>>();
|
||||
else
|
||||
this->impl = makePtr<OdometryFrameImplTMat<Mat>>();
|
||||
};
|
||||
|
||||
void OdometryFrame::setImage(InputArray image) { this->impl->setImage(image); }
|
||||
void OdometryFrame::getImage(OutputArray image) const { this->impl->getImage(image); }
|
||||
void OdometryFrame::getGrayImage(OutputArray image) const { this->impl->getGrayImage(image); }
|
||||
void OdometryFrame::setDepth(InputArray depth) { this->impl->setDepth(depth); }
|
||||
void OdometryFrame::getDepth(OutputArray depth) const { this->impl->getDepth(depth); }
|
||||
void OdometryFrame::setMask(InputArray mask) { this->impl->setMask(mask); }
|
||||
void OdometryFrame::getMask(OutputArray mask) const { this->impl->getMask(mask); }
|
||||
void OdometryFrame::setNormals(InputArray normals) { this->impl->setNormals(normals); }
|
||||
void OdometryFrame::getNormals(OutputArray normals) const { this->impl->getNormals(normals); }
|
||||
void OdometryFrame::setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype)
|
||||
{
|
||||
this->impl->setPyramidLevel(_nLevels, oftype);
|
||||
}
|
||||
void OdometryFrame::setPyramidLevels(size_t _nLevels) { this->impl->setPyramidLevels(_nLevels); }
|
||||
size_t OdometryFrame::getPyramidLevels(OdometryFramePyramidType oftype) const { return this->impl->getPyramidLevels(oftype); }
|
||||
void OdometryFrame::setPyramidAt(InputArray img, OdometryFramePyramidType pyrType, size_t level)
|
||||
{
|
||||
this->impl->setPyramidAt(img, pyrType, level);
|
||||
}
|
||||
void OdometryFrame::getPyramidAt(OutputArray img, OdometryFramePyramidType pyrType, size_t level) const
|
||||
{
|
||||
this->impl->getPyramidAt(img, pyrType, level);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
OdometryFrameImplTMat<TMat>::OdometryFrameImplTMat()
|
||||
: pyramids(OdometryFramePyramidType::N_PYRAMIDS)
|
||||
{
|
||||
};
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setImage(InputArray _image)
|
||||
{
|
||||
this->image = getTMat<TMat>(_image);
|
||||
Mat gray;
|
||||
if (_image.channels() != 1)
|
||||
cvtColor(_image, gray, COLOR_BGR2GRAY, 1);
|
||||
else
|
||||
gray = getTMat<Mat>(_image);
|
||||
gray.convertTo(gray, CV_8UC1);
|
||||
this->imageGray = getTMat<TMat>(gray);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getImage(OutputArray _image) const
|
||||
{
|
||||
_image.assign(this->image);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getGrayImage(OutputArray _image) const
|
||||
{
|
||||
_image.assign(this->imageGray);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setDepth(InputArray _depth)
|
||||
{
|
||||
|
||||
TMat depth_tmp, depth_flt;
|
||||
depth_tmp = getTMat<TMat>(_depth);
|
||||
// Odometry works with depth values in range [0, 10)
|
||||
// if the max depth value more than 10, it needs to devide by 5000
|
||||
double max;
|
||||
cv::minMaxLoc(depth_tmp, NULL, &max);
|
||||
if (max > 10)
|
||||
{
|
||||
depth_tmp.convertTo(depth_flt, CV_32FC1, 1.f / 5000.f);
|
||||
depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), getTMat<Mat>(depth_flt) < FLT_EPSILON);
|
||||
depth_tmp = depth_flt;
|
||||
}
|
||||
this->depth = depth_tmp;
|
||||
this->findMask(_depth);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getDepth(OutputArray _depth) const
|
||||
{
|
||||
_depth.assign(this->depth);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setMask(InputArray _mask)
|
||||
{
|
||||
this->mask = getTMat<TMat>(_mask);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getMask(OutputArray _mask) const
|
||||
{
|
||||
_mask.assign(this->mask);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setNormals(InputArray _normals)
|
||||
{
|
||||
this->normals = getTMat<TMat>(_normals);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getNormals(OutputArray _normals) const
|
||||
{
|
||||
_normals.assign(this->normals);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setPyramidLevel(size_t _nLevels, OdometryFramePyramidType oftype)
|
||||
{
|
||||
if (oftype < OdometryFramePyramidType::N_PYRAMIDS)
|
||||
pyramids[oftype].resize(_nLevels, TMat());
|
||||
else
|
||||
CV_Error(Error::StsBadArg, "Incorrect type.");
|
||||
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setPyramidLevels(size_t _nLevels)
|
||||
{
|
||||
for (auto& p : pyramids)
|
||||
{
|
||||
p.resize(_nLevels, TMat());
|
||||
}
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
size_t OdometryFrameImplTMat<TMat>::getPyramidLevels(OdometryFramePyramidType oftype) const
|
||||
{
|
||||
if (oftype < OdometryFramePyramidType::N_PYRAMIDS)
|
||||
return pyramids[oftype].size();
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::setPyramidAt(InputArray _img, OdometryFramePyramidType pyrType, size_t level)
|
||||
{
|
||||
CV_Assert(pyrType >= 0);
|
||||
CV_Assert(pyrType < pyramids.size());
|
||||
CV_Assert(level < pyramids[pyrType].size());
|
||||
TMat img = getTMat<TMat>(_img);
|
||||
pyramids[pyrType][level] = img;
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::getPyramidAt(OutputArray _img, OdometryFramePyramidType pyrType, size_t level) const
|
||||
{
|
||||
CV_Assert(pyrType < OdometryFramePyramidType::N_PYRAMIDS);
|
||||
CV_Assert(level < pyramids[pyrType].size());
|
||||
TMat img = pyramids[pyrType][level];
|
||||
_img.assign(img);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
void OdometryFrameImplTMat<TMat>::findMask(InputArray _depth)
|
||||
{
|
||||
Mat d = _depth.getMat();
|
||||
Mat m(d.size(), CV_8UC1, Scalar(255));
|
||||
for (int y = 0; y < d.rows; y++)
|
||||
for (int x = 0; x < d.cols; x++)
|
||||
{
|
||||
if (cvIsNaN(d.at<float>(y, x)) || d.at<float>(y, x) <= FLT_EPSILON)
|
||||
m.at<uchar>(y, x) = 0;
|
||||
}
|
||||
this->setMask(m);
|
||||
}
|
||||
|
||||
}
|
1642
modules/3d/src/rgbd/odometry_functions.cpp
Normal file
1642
modules/3d/src/rgbd/odometry_functions.cpp
Normal file
File diff suppressed because it is too large
Load Diff
212
modules/3d/src/rgbd/odometry_functions.hpp
Normal file
212
modules/3d/src/rgbd/odometry_functions.hpp
Normal file
@ -0,0 +1,212 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef OPENCV_3D_ODOMETRY_FUNCTIONS_HPP
|
||||
#define OPENCV_3D_ODOMETRY_FUNCTIONS_HPP
|
||||
|
||||
#include "utils.hpp"
|
||||
#include <opencv2/imgproc.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
enum class OdometryTransformType
|
||||
{
|
||||
ROTATION = 1, TRANSLATION = 2, RIGID_TRANSFORMATION = 4
|
||||
};
|
||||
|
||||
static inline
|
||||
void checkImage(InputArray image)
|
||||
{
|
||||
if (image.empty())
|
||||
CV_Error(Error::StsBadSize, "Image is empty.");
|
||||
if (image.type() != CV_8UC1)
|
||||
CV_Error(Error::StsBadSize, "Image type has to be CV_8UC1.");
|
||||
}
|
||||
static inline
|
||||
void checkDepth(InputArray depth, const Size& imageSize)
|
||||
{
|
||||
if (depth.empty())
|
||||
CV_Error(Error::StsBadSize, "Depth is empty.");
|
||||
if (depth.size() != imageSize)
|
||||
CV_Error(Error::StsBadSize, "Depth has to have the size equal to the image size.");
|
||||
if (depth.type() != CV_32FC1)
|
||||
CV_Error(Error::StsBadSize, "Depth type has to be CV_32FC1.");
|
||||
}
|
||||
|
||||
static inline
|
||||
void checkMask(InputArray mask, const Size& imageSize)
|
||||
{
|
||||
if (!mask.empty())
|
||||
{
|
||||
if (mask.size() != imageSize)
|
||||
CV_Error(Error::StsBadSize, "Mask has to have the size equal to the image size.");
|
||||
if (mask.type() != CV_8UC1)
|
||||
CV_Error(Error::StsBadSize, "Mask type has to be CV_8UC1.");
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void checkNormals(InputArray normals, const Size& depthSize)
|
||||
{
|
||||
if (normals.size() != depthSize)
|
||||
CV_Error(Error::StsBadSize, "Normals has to have the size equal to the depth size.");
|
||||
if (normals.type() != CV_32FC3)
|
||||
CV_Error(Error::StsBadSize, "Normals type has to be CV_32FC3.");
|
||||
}
|
||||
|
||||
|
||||
static inline
|
||||
void calcRgbdEquationCoeffs(double* C, double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
|
||||
{
|
||||
double invz = 1. / p3d.z,
|
||||
v0 = dIdx * fx * invz,
|
||||
v1 = dIdy * fy * invz,
|
||||
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
|
||||
|
||||
C[0] = -p3d.z * v1 + p3d.y * v2;
|
||||
C[1] = p3d.z * v0 - p3d.x * v2;
|
||||
C[2] = -p3d.y * v0 + p3d.x * v1;
|
||||
C[3] = v0;
|
||||
C[4] = v1;
|
||||
C[5] = v2;
|
||||
}
|
||||
|
||||
static inline
|
||||
void calcRgbdEquationCoeffsRotation(double* C, double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
|
||||
{
|
||||
double invz = 1. / p3d.z,
|
||||
v0 = dIdx * fx * invz,
|
||||
v1 = dIdy * fy * invz,
|
||||
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
|
||||
C[0] = -p3d.z * v1 + p3d.y * v2;
|
||||
C[1] = p3d.z * v0 - p3d.x * v2;
|
||||
C[2] = -p3d.y * v0 + p3d.x * v1;
|
||||
}
|
||||
|
||||
static inline
|
||||
void calcRgbdEquationCoeffsTranslation(double* C, double dIdx, double dIdy, const Point3f& p3d, double fx, double fy)
|
||||
{
|
||||
double invz = 1. / p3d.z,
|
||||
v0 = dIdx * fx * invz,
|
||||
v1 = dIdy * fy * invz,
|
||||
v2 = -(v0 * p3d.x + v1 * p3d.y) * invz;
|
||||
C[0] = v0;
|
||||
C[1] = v1;
|
||||
C[2] = v2;
|
||||
}
|
||||
|
||||
typedef
|
||||
void (*CalcRgbdEquationCoeffsPtr)(double*, double, double, const Point3f&, double, double);
|
||||
|
||||
static inline
|
||||
void calcICPEquationCoeffs(double* C, const Point3f& p0, const Vec3f& n1)
|
||||
{
|
||||
C[0] = -p0.z * n1[1] + p0.y * n1[2];
|
||||
C[1] = p0.z * n1[0] - p0.x * n1[2];
|
||||
C[2] = -p0.y * n1[0] + p0.x * n1[1];
|
||||
C[3] = n1[0];
|
||||
C[4] = n1[1];
|
||||
C[5] = n1[2];
|
||||
}
|
||||
|
||||
static inline
|
||||
void calcICPEquationCoeffsRotation(double* C, const Point3f& p0, const Vec3f& n1)
|
||||
{
|
||||
C[0] = -p0.z * n1[1] + p0.y * n1[2];
|
||||
C[1] = p0.z * n1[0] - p0.x * n1[2];
|
||||
C[2] = -p0.y * n1[0] + p0.x * n1[1];
|
||||
}
|
||||
|
||||
static inline
|
||||
void calcICPEquationCoeffsTranslation(double* C, const Point3f& /*p0*/, const Vec3f& n1)
|
||||
{
|
||||
C[0] = n1[0];
|
||||
C[1] = n1[1];
|
||||
C[2] = n1[2];
|
||||
}
|
||||
|
||||
typedef
|
||||
void (*CalcICPEquationCoeffsPtr)(double*, const Point3f&, const Vec3f&);
|
||||
|
||||
void prepareRGBDFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
void prepareRGBFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings);
|
||||
void prepareICPFrame(OdometryFrame& srcFrame, OdometryFrame& dstFrame, const OdometrySettings settings, OdometryAlgoType algtype);
|
||||
|
||||
void prepareRGBFrameBase(OdometryFrame& frame, const OdometrySettings settings);
|
||||
void prepareRGBFrameSrc (OdometryFrame& frame, const OdometrySettings settings);
|
||||
void prepareRGBFrameDst (OdometryFrame& frame, const OdometrySettings settings);
|
||||
|
||||
void prepareICPFrameBase(OdometryFrame& frame, const OdometrySettings settings);
|
||||
void prepareICPFrameSrc (OdometryFrame& frame, const OdometrySettings settings);
|
||||
void prepareICPFrameDst (OdometryFrame& frame, const OdometrySettings settings);
|
||||
|
||||
|
||||
void setPyramids(OdometryFrame& odf, OdometryFramePyramidType oftype, InputArrayOfArrays pyramidImage);
|
||||
void getPyramids(OdometryFrame& odf, OdometryFramePyramidType oftype, OutputArrayOfArrays _pyramid);
|
||||
|
||||
void preparePyramidImage(InputArray image, InputOutputArrayOfArrays pyramidImage, size_t levelCount);
|
||||
|
||||
template<typename TMat>
|
||||
void preparePyramidMask(InputArray mask, InputArrayOfArrays pyramidDepth, float minDepth, float maxDepth, InputArrayOfArrays pyramidNormal, InputOutputArrayOfArrays pyramidMask);
|
||||
|
||||
template<typename TMat>
|
||||
void preparePyramidCloud(InputArrayOfArrays pyramidDepth, const Matx33f& cameraMatrix, InputOutputArrayOfArrays pyramidCloud);
|
||||
|
||||
void buildPyramidCameraMatrix(const Matx33f& cameraMatrix, int levels, std::vector<Matx33f>& pyramidCameraMatrix);
|
||||
|
||||
template<typename TMat>
|
||||
void preparePyramidSobel(InputArrayOfArrays pyramidImage, int dx, int dy, InputOutputArrayOfArrays pyramidSobel, int sobelSize);
|
||||
|
||||
void preparePyramidTexturedMask(InputArrayOfArrays pyramid_dI_dx, InputArrayOfArrays pyramid_dI_dy,
|
||||
InputArray minGradMagnitudes, InputArrayOfArrays pyramidMask, double maxPointsPart,
|
||||
InputOutputArrayOfArrays pyramidTexturedMask, double sobelScale);
|
||||
|
||||
void randomSubsetOfMask(InputOutputArray _mask, float part);
|
||||
|
||||
void preparePyramidNormals(InputArray normals, InputArrayOfArrays pyramidDepth, InputOutputArrayOfArrays pyramidNormals);
|
||||
|
||||
void preparePyramidNormalsMask(InputArray pyramidNormals, InputArray pyramidMask, double maxPointsPart,
|
||||
InputOutputArrayOfArrays /*std::vector<Mat>&*/ pyramidNormalsMask);
|
||||
|
||||
|
||||
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
|
||||
const OdometryFrame srcFrame,
|
||||
const OdometryFrame dstFrame,
|
||||
const Matx33f& cameraMatrix,
|
||||
float maxDepthDiff, float angleThreshold, const std::vector<int>& iterCounts,
|
||||
double maxTranslation, double maxRotation, double sobelScale,
|
||||
OdometryType method, OdometryTransformType transfromType, OdometryAlgoType algtype);
|
||||
|
||||
void computeCorresps(const Matx33f& _K, const Matx33f& _K_inv, const Mat& Rt,
|
||||
const Mat& image0, const Mat& depth0, const Mat& validMask0,
|
||||
const Mat& image1, const Mat& depth1, const Mat& selectMask1, float maxDepthDiff,
|
||||
Mat& _corresps, Mat& _diffs, double& _sigma, OdometryType method);
|
||||
|
||||
void calcRgbdLsmMatrices(const Mat& cloud0, const Mat& Rt,
|
||||
const Mat& dI_dx1, const Mat& dI_dy1,
|
||||
const Mat& corresps, const Mat& diffs, const double sigma,
|
||||
double fx, double fy, double sobelScaleIn,
|
||||
Mat& AtA, Mat& AtB, CalcRgbdEquationCoeffsPtr func, int transformDim);
|
||||
|
||||
void calcICPLsmMatrices(const Mat& cloud0, const Mat& Rt,
|
||||
const Mat& cloud1, const Mat& normals1,
|
||||
const Mat& corresps,
|
||||
Mat& AtA, Mat& AtB, CalcICPEquationCoeffsPtr func, int transformDim);
|
||||
|
||||
void calcICPLsmMatricesFast(Matx33f cameraMatrix, const Mat& oldPts, const Mat& oldNrm, const Mat& newPts, const Mat& newNrm,
|
||||
cv::Affine3f pose, int level, float maxDepthDiff, float angleThreshold, cv::Matx66f& A, cv::Vec6f& b);
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
bool ocl_calcICPLsmMatricesFast(Matx33f cameraMatrix, const UMat& oldPts, const UMat& oldNrm, const UMat& newPts, const UMat& newNrm,
|
||||
cv::Affine3f pose, int level, float maxDepthDiff, float angleThreshold, cv::Matx66f& A, cv::Vec6f& b);
|
||||
#endif
|
||||
|
||||
void computeProjectiveMatrix(const Mat& ksi, Mat& Rt);
|
||||
|
||||
bool solveSystem(const Mat& AtA, const Mat& AtB, double detThreshold, Mat& x);
|
||||
|
||||
bool testDeltaTransformation(const Mat& deltaRt, double maxTranslation, double maxRotation);
|
||||
|
||||
}
|
||||
#endif //OPENCV_3D_ODOMETRY_FUNCTIONS_HPP
|
354
modules/3d/src/rgbd/odometry_settings_impl.cpp
Normal file
354
modules/3d/src/rgbd/odometry_settings_impl.cpp
Normal file
@ -0,0 +1,354 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class OdometrySettings::Impl
|
||||
{
|
||||
public:
|
||||
Impl() {};
|
||||
virtual ~Impl() {};
|
||||
virtual void setCameraMatrix(InputArray val) = 0;
|
||||
virtual void getCameraMatrix(OutputArray val) const = 0;
|
||||
virtual void setIterCounts(InputArray val) = 0;
|
||||
virtual void getIterCounts(OutputArray val) const = 0;
|
||||
|
||||
virtual void setMinDepth(float val) = 0;
|
||||
virtual float getMinDepth() const = 0;
|
||||
virtual void setMaxDepth(float val) = 0;
|
||||
virtual float getMaxDepth() const = 0;
|
||||
virtual void setMaxDepthDiff(float val) = 0;
|
||||
virtual float getMaxDepthDiff() const = 0;
|
||||
virtual void setMaxPointsPart(float val) = 0;
|
||||
virtual float getMaxPointsPart() const = 0;
|
||||
|
||||
virtual void setSobelSize(int val) = 0;
|
||||
virtual int getSobelSize() const = 0;
|
||||
virtual void setSobelScale(double val) = 0;
|
||||
virtual double getSobelScale() const = 0;
|
||||
virtual void setNormalWinSize(int val) = 0;
|
||||
virtual int getNormalWinSize() const = 0;
|
||||
|
||||
virtual void setAngleThreshold(float val) = 0;
|
||||
virtual float getAngleThreshold() const = 0;
|
||||
virtual void setMaxTranslation(float val) = 0;
|
||||
virtual float getMaxTranslation() const = 0;
|
||||
virtual void setMaxRotation(float val) = 0;
|
||||
virtual float getMaxRotation() const = 0;
|
||||
|
||||
virtual void setMinGradientMagnitude(float val) = 0;
|
||||
virtual float getMinGradientMagnitude() const = 0;
|
||||
virtual void setMinGradientMagnitudes(InputArray val) = 0;
|
||||
virtual void getMinGradientMagnitudes(OutputArray val) const = 0;
|
||||
};
|
||||
|
||||
class OdometrySettingsImplCommon : public OdometrySettings::Impl
|
||||
{
|
||||
public:
|
||||
OdometrySettingsImplCommon();
|
||||
~OdometrySettingsImplCommon() {};
|
||||
virtual void setCameraMatrix(InputArray val) override;
|
||||
virtual void getCameraMatrix(OutputArray val) const override;
|
||||
virtual void setIterCounts(InputArray val) override;
|
||||
virtual void getIterCounts(OutputArray val) const override;
|
||||
|
||||
virtual void setMinDepth(float val) override;
|
||||
virtual float getMinDepth() const override;
|
||||
virtual void setMaxDepth(float val) override;
|
||||
virtual float getMaxDepth() const override;
|
||||
virtual void setMaxDepthDiff(float val) override;
|
||||
virtual float getMaxDepthDiff() const override;
|
||||
virtual void setMaxPointsPart(float val) override;
|
||||
virtual float getMaxPointsPart() const override;
|
||||
|
||||
virtual void setSobelSize(int val) override;
|
||||
virtual int getSobelSize() const override;
|
||||
virtual void setSobelScale(double val) override;
|
||||
virtual double getSobelScale() const override;
|
||||
virtual void setNormalWinSize(int val) override;
|
||||
virtual int getNormalWinSize() const override;
|
||||
|
||||
virtual void setAngleThreshold(float val) override;
|
||||
virtual float getAngleThreshold() const override;
|
||||
virtual void setMaxTranslation(float val) override;
|
||||
virtual float getMaxTranslation() const override;
|
||||
virtual void setMaxRotation(float val) override;
|
||||
virtual float getMaxRotation() const override;
|
||||
|
||||
virtual void setMinGradientMagnitude(float val) override;
|
||||
virtual float getMinGradientMagnitude() const override;
|
||||
virtual void setMinGradientMagnitudes(InputArray val) override;
|
||||
virtual void getMinGradientMagnitudes(OutputArray val) const override;
|
||||
|
||||
private:
|
||||
Matx33f cameraMatrix;
|
||||
std::vector<int> iterCounts;
|
||||
|
||||
float minDepth;
|
||||
float maxDepth;
|
||||
float maxDepthDiff;
|
||||
float maxPointsPart;
|
||||
|
||||
int sobelSize;
|
||||
double sobelScale;
|
||||
int normalWinSize;
|
||||
|
||||
float angleThreshold;
|
||||
float maxTranslation;
|
||||
float maxRotation;
|
||||
|
||||
float minGradientMagnitude;
|
||||
std::vector<float> minGradientMagnitudes;
|
||||
|
||||
public:
|
||||
class DefaultSets {
|
||||
public:
|
||||
const cv::Matx33f defaultCameraMatrix =
|
||||
{ /* fx, 0, cx*/ 0, 0, 0,
|
||||
/* 0, fy, cy*/ 0, 0, 0,
|
||||
/* 0, 0, 0*/ 0, 0, 0 };
|
||||
const std::vector<int> defaultIterCounts = { 7, 7, 7, 10 };
|
||||
|
||||
static constexpr float defaultMinDepth = 0.f;
|
||||
static constexpr float defaultMaxDepth = 4.f;
|
||||
static constexpr float defaultMaxDepthDiff = 0.07f;
|
||||
static constexpr float defaultMaxPointsPart = 0.07f;
|
||||
|
||||
static const int defaultSobelSize = 3;
|
||||
static constexpr double defaultSobelScale = 1. / 8.;
|
||||
static const int defaultNormalWinSize = 5;
|
||||
|
||||
static constexpr float defaultAngleThreshold = (float)(30. * CV_PI / 180.);
|
||||
static constexpr float defaultMaxTranslation = 0.15f;
|
||||
static constexpr float defaultMaxRotation = 15.f;
|
||||
|
||||
static constexpr float defaultMinGradientMagnitude = 10.f;
|
||||
const std::vector<float> defaultMinGradientMagnitudes = std::vector<float>(defaultIterCounts.size(), 10.f /*defaultMinGradientMagnitude*/);
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
OdometrySettings::OdometrySettings()
|
||||
{
|
||||
this->impl = makePtr<OdometrySettingsImplCommon>();
|
||||
}
|
||||
|
||||
void OdometrySettings::setCameraMatrix(InputArray val) { this->impl->setCameraMatrix(val); }
|
||||
void OdometrySettings::getCameraMatrix(OutputArray val) const { this->impl->getCameraMatrix(val); }
|
||||
void OdometrySettings::setIterCounts(InputArray val) { this->impl->setIterCounts(val); }
|
||||
void OdometrySettings::getIterCounts(OutputArray val) const { this->impl->getIterCounts(val); }
|
||||
|
||||
void OdometrySettings::setMinDepth(float val) { this->impl->setMinDepth(val); }
|
||||
float OdometrySettings::getMinDepth() const { return this->impl->getMinDepth(); }
|
||||
void OdometrySettings::setMaxDepth(float val) { this->impl->setMaxDepth(val); }
|
||||
float OdometrySettings::getMaxDepth() const { return this->impl->getMaxDepth(); }
|
||||
void OdometrySettings::setMaxDepthDiff(float val) { this->impl->setMaxDepthDiff(val); }
|
||||
float OdometrySettings::getMaxDepthDiff() const { return this->impl->getMaxDepthDiff(); }
|
||||
void OdometrySettings::setMaxPointsPart(float val) { this->impl->setMaxPointsPart(val); }
|
||||
float OdometrySettings::getMaxPointsPart() const { return this->impl->getMaxPointsPart(); }
|
||||
|
||||
void OdometrySettings::setSobelSize(int val) { this->impl->setSobelSize(val); }
|
||||
int OdometrySettings::getSobelSize() const { return this->impl->getSobelSize(); }
|
||||
void OdometrySettings::setSobelScale(double val) { this->impl->setSobelScale(val); }
|
||||
double OdometrySettings::getSobelScale() const { return this->impl->getSobelScale(); }
|
||||
void OdometrySettings::setNormalWinSize(int val) { this->impl->setNormalWinSize(val); }
|
||||
int OdometrySettings::getNormalWinSize() const { return this->impl->getNormalWinSize(); }
|
||||
|
||||
void OdometrySettings::setAngleThreshold(float val) { this->impl->setAngleThreshold(val); }
|
||||
float OdometrySettings::getAngleThreshold() const { return this->impl->getAngleThreshold(); }
|
||||
void OdometrySettings::setMaxTranslation(float val) { this->impl->setMaxTranslation(val); }
|
||||
float OdometrySettings::getMaxTranslation() const { return this->impl->getMaxTranslation(); }
|
||||
void OdometrySettings::setMaxRotation(float val) { this->impl->setMaxRotation(val); }
|
||||
float OdometrySettings::getMaxRotation() const { return this->impl->getMaxRotation(); }
|
||||
|
||||
void OdometrySettings::setMinGradientMagnitude(float val) { this->impl->setMinGradientMagnitude(val); }
|
||||
float OdometrySettings::getMinGradientMagnitude() const { return this->impl->getMinGradientMagnitude(); }
|
||||
void OdometrySettings::setMinGradientMagnitudes(InputArray val) { this->impl->setMinGradientMagnitudes(val); }
|
||||
void OdometrySettings::getMinGradientMagnitudes(OutputArray val) const { this->impl->getMinGradientMagnitudes(val); }
|
||||
|
||||
|
||||
OdometrySettingsImplCommon::OdometrySettingsImplCommon()
|
||||
{
|
||||
DefaultSets ds;
|
||||
this->cameraMatrix = ds.defaultCameraMatrix;
|
||||
this->iterCounts = ds.defaultIterCounts;
|
||||
|
||||
this->minDepth = ds.defaultMinDepth;
|
||||
this->maxDepth = ds.defaultMaxDepth;
|
||||
this->maxDepthDiff = ds.defaultMaxDepthDiff;
|
||||
this->maxPointsPart = ds.defaultMaxPointsPart;
|
||||
|
||||
this->sobelSize = ds.defaultSobelSize;
|
||||
this->sobelScale = ds.defaultSobelScale;
|
||||
this->normalWinSize = ds.defaultNormalWinSize;
|
||||
|
||||
this->angleThreshold = ds.defaultAngleThreshold;
|
||||
this->maxTranslation = ds.defaultMaxTranslation;
|
||||
this->maxRotation = ds.defaultMaxRotation;
|
||||
|
||||
this->minGradientMagnitude = ds.defaultMinGradientMagnitude;
|
||||
this->minGradientMagnitudes = ds.defaultMinGradientMagnitudes;
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::setCameraMatrix(InputArray val)
|
||||
{
|
||||
if (!val.empty())
|
||||
{
|
||||
CV_Assert(val.rows() == 3 && val.cols() == 3 && val.channels() == 1);
|
||||
CV_Assert(val.type() == CV_32F);
|
||||
val.copyTo(cameraMatrix);
|
||||
}
|
||||
else
|
||||
{
|
||||
DefaultSets ds;
|
||||
this->cameraMatrix = ds.defaultCameraMatrix;
|
||||
}
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::getCameraMatrix(OutputArray val) const
|
||||
{
|
||||
Mat(this->cameraMatrix).copyTo(val);
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::setIterCounts(InputArray val)
|
||||
{
|
||||
if (!val.empty())
|
||||
{
|
||||
size_t nLevels = val.size(-1).width;
|
||||
std::vector<Mat> pyramids;
|
||||
val.getMatVector(pyramids);
|
||||
this->iterCounts.clear();
|
||||
for (size_t i = 0; i < nLevels; i++)
|
||||
this->iterCounts.push_back(pyramids[i].at<int>(0));
|
||||
}
|
||||
else
|
||||
{
|
||||
DefaultSets ds;
|
||||
this->iterCounts = ds.defaultIterCounts;
|
||||
}
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::getIterCounts(OutputArray val) const
|
||||
{
|
||||
Mat(this->iterCounts).copyTo(val);
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::setMinDepth(float val)
|
||||
{
|
||||
this->minDepth = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getMinDepth() const
|
||||
{
|
||||
return this->minDepth;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setMaxDepth(float val)
|
||||
{
|
||||
this->maxDepth = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getMaxDepth() const
|
||||
{
|
||||
return this->maxDepth;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setMaxDepthDiff(float val)
|
||||
{
|
||||
this->maxDepthDiff = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getMaxDepthDiff() const
|
||||
{
|
||||
return this->maxDepthDiff;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setMaxPointsPart(float val)
|
||||
{
|
||||
this->maxPointsPart = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getMaxPointsPart() const
|
||||
{
|
||||
return this->maxPointsPart;
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::setSobelSize(int val)
|
||||
{
|
||||
this->sobelSize = val;
|
||||
}
|
||||
int OdometrySettingsImplCommon::getSobelSize() const
|
||||
{
|
||||
return this->sobelSize;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setSobelScale(double val)
|
||||
{
|
||||
this->sobelScale = val;
|
||||
}
|
||||
double OdometrySettingsImplCommon::getSobelScale() const
|
||||
{
|
||||
return this->sobelScale;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setNormalWinSize(int val)
|
||||
{
|
||||
this->normalWinSize = val;
|
||||
}
|
||||
int OdometrySettingsImplCommon::getNormalWinSize() const
|
||||
{
|
||||
return this->normalWinSize;
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::setAngleThreshold(float val)
|
||||
{
|
||||
this->angleThreshold = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getAngleThreshold() const
|
||||
{
|
||||
return this->angleThreshold;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setMaxTranslation(float val)
|
||||
{
|
||||
this->maxTranslation = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getMaxTranslation() const
|
||||
{
|
||||
return this->maxTranslation;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setMaxRotation(float val)
|
||||
{
|
||||
this->maxRotation = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getMaxRotation() const
|
||||
{
|
||||
return this->maxRotation;
|
||||
}
|
||||
|
||||
void OdometrySettingsImplCommon::setMinGradientMagnitude(float val)
|
||||
{
|
||||
this->minGradientMagnitude = val;
|
||||
}
|
||||
float OdometrySettingsImplCommon::getMinGradientMagnitude() const
|
||||
{
|
||||
return this->minGradientMagnitude;
|
||||
}
|
||||
void OdometrySettingsImplCommon::setMinGradientMagnitudes(InputArray val)
|
||||
{
|
||||
if (!val.empty())
|
||||
{
|
||||
size_t nLevels = val.size(-1).width;
|
||||
std::vector<Mat> pyramids;
|
||||
val.getMatVector(pyramids);
|
||||
this->minGradientMagnitudes.clear();
|
||||
for (size_t i = 0; i < nLevels; i++)
|
||||
this->minGradientMagnitudes.push_back(pyramids[i].at<float>(0));
|
||||
}
|
||||
else
|
||||
{
|
||||
DefaultSets ds;
|
||||
this->minGradientMagnitudes = ds.defaultMinGradientMagnitudes;
|
||||
}
|
||||
}
|
||||
void OdometrySettingsImplCommon::getMinGradientMagnitudes(OutputArray val) const
|
||||
{
|
||||
Mat(this->minGradientMagnitudes).copyTo(val);
|
||||
}
|
||||
|
||||
}
|
@ -178,7 +178,7 @@ private:
|
||||
class PlaneGrid
|
||||
{
|
||||
public:
|
||||
PlaneGrid(const Mat_<Vec3f>& points3d, int block_size) :
|
||||
PlaneGrid(const Mat_<Vec4f>& points3d, int block_size) :
|
||||
block_size_(block_size)
|
||||
{
|
||||
// Figure out some dimensions
|
||||
@ -204,10 +204,10 @@ public:
|
||||
int K = 0;
|
||||
for (int j = y * block_size; j < std::min((y + 1) * block_size, points3d.rows); ++j)
|
||||
{
|
||||
const Vec3f* vec = points3d.ptr < Vec3f >(j, x * block_size), * vec_end;
|
||||
const Vec4f* vec = points3d.ptr < Vec4f >(j, x * block_size), * vec_end;
|
||||
float* pointpointt = reinterpret_cast<float*>(Q_.ptr < Vec<float, 9> >(j, x * block_size));
|
||||
if (x == mini_cols - 1)
|
||||
vec_end = points3d.ptr < Vec3f >(j, points3d.cols - 1) + 1;
|
||||
vec_end = points3d.ptr < Vec4f >(j, points3d.cols - 1) + 1;
|
||||
else
|
||||
vec_end = vec + block_size;
|
||||
for (; vec != vec_end; ++vec, pointpointt += 9)
|
||||
@ -226,7 +226,7 @@ public:
|
||||
*(pointpointt + 8) = vec->val[2] * vec->val[2];
|
||||
|
||||
Q += *reinterpret_cast<Matx33f*>(pointpointt);
|
||||
m += (*vec);
|
||||
m += Vec3f((*vec)[0], (*vec)[1], (*vec)[2]);
|
||||
++K;
|
||||
}
|
||||
}
|
||||
@ -327,7 +327,7 @@ private:
|
||||
class InlierFinder
|
||||
{
|
||||
public:
|
||||
InlierFinder(float err, const Mat_<Vec3f>& points3d, const Mat_<Vec3f>& normals,
|
||||
InlierFinder(float err, const Mat_<Vec4f>& points3d, const Mat_<Vec4f>& normals,
|
||||
unsigned char plane_index, int block_size) :
|
||||
err_(err),
|
||||
points3d_(points3d),
|
||||
@ -362,14 +362,14 @@ public:
|
||||
for (int yy = range_y.start; yy != range_y.end; ++yy)
|
||||
{
|
||||
uchar* data = overall_mask.ptr(yy, range_x.start), * data_end = data + range_x.size();
|
||||
const Vec3f* point = points3d_.ptr < Vec3f >(yy, range_x.start);
|
||||
const Vec4f* point = points3d_.ptr < Vec4f >(yy, range_x.start);
|
||||
const Matx33f* Q_local = reinterpret_cast<const Matx33f*>(plane_grid.Q_.ptr < Vec<float, 9>
|
||||
>(yy, range_x.start));
|
||||
|
||||
// Depending on whether you have a normal, check it
|
||||
if (!normals_.empty())
|
||||
{
|
||||
const Vec3f* normal = normals_.ptr < Vec3f >(yy, range_x.start);
|
||||
const Vec4f* normal = normals_.ptr < Vec4f >(yy, range_x.start);
|
||||
for (; data != data_end; ++data, ++point, ++normal, ++Q_local)
|
||||
{
|
||||
// Don't do anything if the point already belongs to another plane
|
||||
@ -377,13 +377,15 @@ public:
|
||||
continue;
|
||||
|
||||
// If the point is close enough to the plane
|
||||
if (plane->distance(*point) < err_)
|
||||
Vec3f _p = Vec3f((*point)[0], (*point)[1], (*point)[2]);
|
||||
if (plane->distance(_p) < err_)
|
||||
{
|
||||
// make sure the normals are similar to the plane
|
||||
if (std::abs(plane->n().dot(*normal)) > 0.3)
|
||||
Vec3f _n = Vec3f((*normal)[0], (*normal)[1], (*normal)[2]);
|
||||
if (std::abs(plane->n().dot(_n)) > 0.3)
|
||||
{
|
||||
// The point now belongs to the plane
|
||||
plane->UpdateStatistics(*point, *Q_local);
|
||||
plane->UpdateStatistics(_p, *Q_local);
|
||||
*data = plane_index_;
|
||||
++n_valid_points;
|
||||
}
|
||||
@ -399,10 +401,11 @@ public:
|
||||
continue;
|
||||
|
||||
// If the point is close enough to the plane
|
||||
if (plane->distance(*point) < err_)
|
||||
Vec3f _p = Vec3f((*point)[0], (*point)[1], (*point)[2]);
|
||||
if (plane->distance(_p) < err_)
|
||||
{
|
||||
// The point now belongs to the plane
|
||||
plane->UpdateStatistics(*point, *Q_local);
|
||||
plane->UpdateStatistics(_p, *Q_local);
|
||||
*data = plane_index_;
|
||||
++n_valid_points;
|
||||
}
|
||||
@ -461,8 +464,8 @@ public:
|
||||
|
||||
private:
|
||||
float err_;
|
||||
const Mat_<Vec3f>& points3d_;
|
||||
const Mat_<Vec3f>& normals_;
|
||||
const Mat_<Vec4f>& points3d_;
|
||||
const Mat_<Vec4f>& normals_;
|
||||
unsigned char plane_index_;
|
||||
/** THe block size as defined in the main algorithm */
|
||||
int block_size_;
|
||||
@ -478,7 +481,7 @@ void findPlanes(InputArray points3d_in, InputArray normals_in, OutputArray mask_
|
||||
{
|
||||
CV_Assert(method == RGBD_PLANE_METHOD_DEFAULT);
|
||||
|
||||
Mat_<Vec3f> points3d, normals;
|
||||
Mat_<Vec4f> points3d, normals;
|
||||
if (points3d_in.depth() == CV_32F)
|
||||
points3d = points3d_in.getMat();
|
||||
else
|
||||
|
@ -8,6 +8,36 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat getTMat(InputArray, int)
|
||||
{
|
||||
return TMat();
|
||||
}
|
||||
|
||||
template<>
|
||||
Mat getTMat<Mat>(InputArray a, int i)
|
||||
{
|
||||
return a.getMat(i);
|
||||
}
|
||||
|
||||
template<>
|
||||
UMat getTMat<UMat>(InputArray a, int i)
|
||||
{
|
||||
return a.getUMat(i);
|
||||
}
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat& getTMatRef(InputOutputArray, int)
|
||||
{
|
||||
return TMat();
|
||||
}
|
||||
|
||||
template<>
|
||||
Mat& getTMatRef<Mat>(InputOutputArray a, int i)
|
||||
{
|
||||
return a.getMatRef(i);
|
||||
}
|
||||
|
||||
/** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
|
||||
* by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
|
||||
* Otherwise, the image is simply converted to floats
|
||||
|
@ -105,6 +105,18 @@ static inline bool isNaN(const cv::v_float32x4& p)
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat getTMat(InputArray, int = -1);
|
||||
template<>
|
||||
Mat getTMat<Mat>(InputArray a, int i);
|
||||
template<>
|
||||
UMat getTMat<UMat>(InputArray a, int i);
|
||||
|
||||
template<typename TMat>
|
||||
inline TMat& getTMatRef(InputOutputArray, int = -1);
|
||||
template<>
|
||||
Mat& getTMatRef<Mat>(InputOutputArray a, int i);
|
||||
|
||||
inline size_t roundDownPow2(size_t x)
|
||||
{
|
||||
size_t shift = 0;
|
||||
@ -183,6 +195,18 @@ typedef cv::Mat_< ptype > Points;
|
||||
typedef Points Normals;
|
||||
typedef Points Colors;
|
||||
|
||||
typedef cv::Point3f _ptype;
|
||||
typedef cv::Mat_< _ptype > _Points;
|
||||
typedef _Points _Normals;
|
||||
typedef _Points _Colors;
|
||||
|
||||
enum
|
||||
{
|
||||
_DEPTH_TYPE = DataType<depthType>::type,
|
||||
_POINT_TYPE = DataType<_ptype >::type,
|
||||
_COLOR_TYPE = DataType<_ptype >::type
|
||||
};
|
||||
|
||||
typedef cv::Mat_< depthType > Depth;
|
||||
|
||||
void makeFrameFromDepth(InputArray depth, OutputArray pyrPoints, OutputArray pyrNormals,
|
||||
|
@ -17,12 +17,12 @@ rayPlaneIntersection(Point2f uv, const Mat& centroid, const Mat& normal, const M
|
||||
}
|
||||
#endif
|
||||
|
||||
Vec3f rayPlaneIntersection(const Vec3d& uv1, double centroid_dot_normal, const Vec3d& normal, const Matx33d& Kinv)
|
||||
Vec4f rayPlaneIntersection(const Vec3d& uv1, double centroid_dot_normal, const Vec4d& normal, const Matx33d& Kinv)
|
||||
{
|
||||
Matx31d L = Kinv * uv1; //a ray passing through camera optical center
|
||||
//and uv.
|
||||
L = L * (1.0 / cv::norm(L));
|
||||
double LdotNormal = L.dot(normal);
|
||||
double LdotNormal = L.dot(Vec3d(normal[0], normal[1], normal[2]));
|
||||
double d;
|
||||
if (std::fabs(LdotNormal) > 1e-9)
|
||||
{
|
||||
@ -34,7 +34,7 @@ Vec3f rayPlaneIntersection(const Vec3d& uv1, double centroid_dot_normal, const V
|
||||
std::cout << "warning, LdotNormal nearly 0! " << LdotNormal << std::endl;
|
||||
std::cout << "contents of L, Normal: " << Mat(L) << ", " << Mat(normal) << std::endl;
|
||||
}
|
||||
Vec3f xyz((float)(d * L(0)), (float)(d * L(1)), (float)(d * L(2)));
|
||||
Vec4f xyz((float)(d * L(0)), (float)(d * L(1)), (float)(d * L(2)), 0);
|
||||
return xyz;
|
||||
}
|
||||
|
||||
@ -48,9 +48,9 @@ float cy = H / 2.f + 0.5f;
|
||||
Mat K = (Mat_<double>(3, 3) << focal_length, 0, cx, 0, focal_length, cy, 0, 0, 1);
|
||||
Mat Kinv = K.inv();
|
||||
|
||||
void points3dToDepth16U(const Mat_<Vec3f>& points3d, Mat& depthMap);
|
||||
void points3dToDepth16U(const Mat_<Vec4f>& points3d, Mat& depthMap);
|
||||
|
||||
void points3dToDepth16U(const Mat_<Vec3f>& points3d, Mat& depthMap)
|
||||
void points3dToDepth16U(const Mat_<Vec4f>& points3d, Mat& depthMap)
|
||||
{
|
||||
std::vector<Point3f> points3dvec;
|
||||
for (int i = 0; i < H; i++)
|
||||
@ -80,13 +80,14 @@ void points3dToDepth16U(const Mat_<Vec3f>& points3d, Mat& depthMap)
|
||||
static RNG rng;
|
||||
struct Plane
|
||||
{
|
||||
Vec3d n, p;
|
||||
Vec4d n, p;
|
||||
double p_dot_n;
|
||||
Plane()
|
||||
{
|
||||
n[0] = rng.uniform(-0.5, 0.5);
|
||||
n[1] = rng.uniform(-0.5, 0.5);
|
||||
n[2] = -0.3; //rng.uniform(-1.f, 0.5f);
|
||||
n[3] = 0.;
|
||||
n = n / cv::norm(n);
|
||||
set_d((float)rng.uniform(-2.0, 0.6));
|
||||
}
|
||||
@ -94,11 +95,11 @@ struct Plane
|
||||
void
|
||||
set_d(float d)
|
||||
{
|
||||
p = Vec3d(0, 0, d / n[2]);
|
||||
p = Vec4d(0, 0, d / n[2], 0);
|
||||
p_dot_n = p.dot(n);
|
||||
}
|
||||
|
||||
Vec3f
|
||||
Vec4f
|
||||
intersection(float u, float v, const Matx33f& Kinv_in) const
|
||||
{
|
||||
return rayPlaneIntersection(Vec3d(u, v, 1), p_dot_n, n, Kinv_in);
|
||||
@ -118,8 +119,8 @@ void gen_points_3d(std::vector<Plane>& planes_out, Mat_<unsigned char> &plane_ma
|
||||
planes.push_back(px);
|
||||
}
|
||||
}
|
||||
Mat_ < Vec3f > outp(H, W);
|
||||
Mat_ < Vec3f > outn(H, W);
|
||||
Mat_ < Vec4f > outp(H, W);
|
||||
Mat_ < Vec4f > outn(H, W);
|
||||
plane_mask.create(H, W);
|
||||
|
||||
// n ( r - r_0) = 0
|
||||
@ -282,15 +283,15 @@ public:
|
||||
normals_computer->apply(points3d, in_normals);
|
||||
tm.stop();
|
||||
|
||||
Mat_<Vec3f> normals, ground_normals;
|
||||
in_normals.convertTo(normals, CV_32FC3);
|
||||
in_ground_normals.convertTo(ground_normals, CV_32FC3);
|
||||
Mat_<Vec4f> normals, ground_normals;
|
||||
in_normals.convertTo(normals, CV_32FC4);
|
||||
in_ground_normals.convertTo(ground_normals, CV_32FC4);
|
||||
|
||||
float err = 0;
|
||||
for (int y = 0; y < normals.rows; ++y)
|
||||
for (int x = 0; x < normals.cols; ++x)
|
||||
{
|
||||
Vec3f vec1 = normals(y, x), vec2 = ground_normals(y, x);
|
||||
Vec4f vec1 = normals(y, x), vec2 = ground_normals(y, x);
|
||||
vec1 = vec1 / cv::norm(vec1);
|
||||
vec2 = vec2 / cv::norm(vec2);
|
||||
|
||||
@ -381,7 +382,8 @@ public:
|
||||
ASSERT_LE(float(n_max - n_gt) / n_gt, 0.001);
|
||||
// Compare the normals
|
||||
Vec3d normal(plane_coefficients[i_max][0], plane_coefficients[i_max][1], plane_coefficients[i_max][2]);
|
||||
ASSERT_GE(std::abs(gt_planes[j].n.dot(normal)), 0.95);
|
||||
Vec4d n = gt_planes[j].n;
|
||||
ASSERT_GE(std::abs(Vec3d(n[0], n[1], n[2]).dot(normal)), 0.95);
|
||||
}
|
||||
|
||||
CV_LOG_INFO(NULL, "Speed: ");
|
||||
|
@ -6,8 +6,6 @@
|
||||
|
||||
namespace opencv_test { namespace {
|
||||
|
||||
#define SHOW_DEBUG_IMAGES 0
|
||||
|
||||
static
|
||||
void warpFrame(const Mat& image, const Mat& depth, const Mat& rvec, const Mat& tvec, const Mat& K,
|
||||
Mat& warpedImage, Mat& warpedDepth)
|
||||
@ -33,6 +31,12 @@ void warpFrame(const Mat& image, const Mat& depth, const Mat& rvec, const Mat& t
|
||||
|
||||
Mat cloud;
|
||||
depthTo3d(depth, K, cloud);
|
||||
|
||||
Mat cloud3, channels[4];
|
||||
cv::split(cloud, channels);
|
||||
std::vector<Mat> merged = { channels[0], channels[1], channels[2] };
|
||||
cv::merge(merged, cloud3);
|
||||
|
||||
Mat Rt = Mat::eye(4, 4, CV_64FC1);
|
||||
{
|
||||
Mat R, dst;
|
||||
@ -45,7 +49,7 @@ void warpFrame(const Mat& image, const Mat& depth, const Mat& rvec, const Mat& t
|
||||
tvec.copyTo(dst);
|
||||
}
|
||||
Mat warpedCloud, warpedImagePoints;
|
||||
perspectiveTransform(cloud, warpedCloud, Rt);
|
||||
perspectiveTransform(cloud3, warpedCloud, Rt);
|
||||
projectPoints(warpedCloud.reshape(3, 1), Mat(3,1,CV_32FC1, Scalar(0)), Mat(3,1,CV_32FC1, Scalar(0)), K, Mat(1,5,CV_32FC1, Scalar(0)), warpedImagePoints);
|
||||
warpedImagePoints = warpedImagePoints.reshape(2, cloud.rows);
|
||||
Rect r(0, 0, image.cols, image.rows);
|
||||
@ -116,11 +120,13 @@ void dilateFrame(Mat& image, Mat& depth)
|
||||
class OdometryTest
|
||||
{
|
||||
public:
|
||||
OdometryTest(const Ptr<Odometry>& _odometry,
|
||||
OdometryTest(OdometryType _otype,
|
||||
OdometryAlgoType _algtype,
|
||||
double _maxError1,
|
||||
double _maxError5,
|
||||
double _idError = DBL_EPSILON) :
|
||||
odometry(_odometry),
|
||||
otype(_otype),
|
||||
algtype(_algtype),
|
||||
maxError1(_maxError1),
|
||||
maxError5(_maxError5),
|
||||
idError(_idError)
|
||||
@ -143,7 +149,8 @@ public:
|
||||
void run();
|
||||
void checkUMats();
|
||||
|
||||
Ptr<Odometry> odometry;
|
||||
OdometryType otype;
|
||||
OdometryAlgoType algtype;
|
||||
double maxError1;
|
||||
double maxError5;
|
||||
double idError;
|
||||
@ -201,24 +208,30 @@ void OdometryTest::checkUMats()
|
||||
Mat image, depth;
|
||||
readData(image, depth);
|
||||
|
||||
odometry->setCameraMatrix(K);
|
||||
OdometrySettings ods;
|
||||
ods.setCameraMatrix(K);
|
||||
Odometry odometry = Odometry(otype, ods, algtype);
|
||||
OdometryFrame odf = odometry.createOdometryFrame(OdometryFrameStoreType::UMAT);
|
||||
|
||||
Mat calcRt;
|
||||
|
||||
UMat uimage, udepth, umask;
|
||||
UMat uimage, udepth;
|
||||
image.copyTo(uimage);
|
||||
depth.copyTo(udepth);
|
||||
Mat(image.size(), CV_8UC1, Scalar(255)).copyTo(umask);
|
||||
odf.setImage(uimage);
|
||||
odf.setDepth(udepth);
|
||||
uimage.release();
|
||||
udepth.release();
|
||||
|
||||
bool isComputed = odometry->compute(uimage, udepth, umask,
|
||||
uimage, udepth, umask,
|
||||
calcRt);
|
||||
odometry.prepareFrame(odf);
|
||||
bool isComputed = odometry.compute(odf, odf, calcRt);
|
||||
ASSERT_TRUE(isComputed);
|
||||
double diff = cv::norm(calcRt, Mat::eye(4, 4, CV_64FC1));
|
||||
if (diff > idError)
|
||||
{
|
||||
FAIL() << "Incorrect transformation between the same frame (not the identity matrix), diff = " << diff << std::endl;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void OdometryTest::run()
|
||||
@ -227,22 +240,28 @@ void OdometryTest::run()
|
||||
|
||||
Mat image, depth;
|
||||
readData(image, depth);
|
||||
|
||||
odometry->setCameraMatrix(K);
|
||||
|
||||
OdometrySettings ods;
|
||||
ods.setCameraMatrix(K);
|
||||
Odometry odometry = Odometry(otype, ods, algtype);
|
||||
OdometryFrame odf = odometry.createOdometryFrame();
|
||||
odf.setImage(image);
|
||||
odf.setDepth(depth);
|
||||
Mat calcRt;
|
||||
|
||||
// 1. Try to find Rt between the same frame (try masks also).
|
||||
Mat mask(image.size(), CV_8UC1, Scalar(255));
|
||||
bool isComputed = odometry->compute(image, depth, mask, image, depth, mask, calcRt);
|
||||
|
||||
odometry.prepareFrame(odf);
|
||||
bool isComputed = odometry.compute(odf, odf, calcRt);
|
||||
|
||||
if(!isComputed)
|
||||
{
|
||||
FAIL() << "Can not find Rt between the same frame" << std::endl;
|
||||
}
|
||||
double diff = cv::norm(calcRt, Mat::eye(4,4,CV_64FC1));
|
||||
if(diff > idError)
|
||||
double ndiff = cv::norm(calcRt, Mat::eye(4,4,CV_64FC1));
|
||||
if(ndiff > idError)
|
||||
{
|
||||
FAIL() << "Incorrect transformation between the same frame (not the identity matrix), diff = " << diff << std::endl;
|
||||
FAIL() << "Incorrect transformation between the same frame (not the identity matrix), diff = " << ndiff << std::endl;
|
||||
}
|
||||
|
||||
// 2. Generate random rigid body motion in some ranges several times (iterCount).
|
||||
@ -254,45 +273,62 @@ void OdometryTest::run()
|
||||
int iterCount = 100;
|
||||
int better_1time_count = 0;
|
||||
int better_5times_count = 0;
|
||||
for(int iter = 0; iter < iterCount; iter++)
|
||||
for (int iter = 0; iter < iterCount; iter++)
|
||||
{
|
||||
Mat rvec, tvec;
|
||||
generateRandomTransformation(rvec, tvec);
|
||||
|
||||
Mat warpedImage, warpedDepth;
|
||||
warpFrame(image, depth, rvec, tvec, K, warpedImage, warpedDepth);
|
||||
dilateFrame(warpedImage, warpedDepth); // due to inaccuracy after warping
|
||||
|
||||
isComputed = odometry->compute(image, depth, mask, warpedImage, warpedDepth, mask, calcRt);
|
||||
if(!isComputed)
|
||||
continue;
|
||||
OdometryFrame odfSrc = odometry.createOdometryFrame();
|
||||
OdometryFrame odfDst = odometry.createOdometryFrame();
|
||||
odfSrc.setImage(image);
|
||||
odfSrc.setDepth(depth);
|
||||
odfDst.setImage(warpedImage);
|
||||
odfDst.setDepth(warpedDepth);
|
||||
|
||||
odometry.prepareFrames(odfSrc, odfDst);
|
||||
isComputed = odometry.compute(odfSrc, odfDst, calcRt);
|
||||
|
||||
if (!isComputed)
|
||||
continue;
|
||||
Mat calcR = calcRt(Rect(0,0,3,3)), calcRvec;
|
||||
cv::Rodrigues(calcR, calcRvec);
|
||||
calcRvec = calcRvec.reshape(rvec.channels(), rvec.rows);
|
||||
Mat calcTvec = calcRt(Rect(3,0,1,3));
|
||||
|
||||
#if SHOW_DEBUG_IMAGES
|
||||
imshow("image", image);
|
||||
imshow("warpedImage", warpedImage);
|
||||
Mat resultImage, resultDepth;
|
||||
warpFrame(image, depth, calcRvec, calcTvec, K, resultImage, resultDepth);
|
||||
imshow("resultImage", resultImage);
|
||||
waitKey();
|
||||
#endif
|
||||
if (cvtest::debugLevel >= 10)
|
||||
{
|
||||
imshow("image", image);
|
||||
imshow("warpedImage", warpedImage);
|
||||
Mat resultImage, resultDepth;
|
||||
warpFrame(image, depth, calcRvec, calcTvec, K, resultImage, resultDepth);
|
||||
imshow("resultImage", resultImage);
|
||||
waitKey(100);
|
||||
}
|
||||
|
||||
// compare rotation
|
||||
double rdiffnorm = cv::norm(rvec - calcRvec),
|
||||
rnorm = cv::norm(rvec);
|
||||
double tdiffnorm = cv::norm(tvec - calcTvec),
|
||||
tnorm = cv::norm(tvec);
|
||||
if(rdiffnorm < rnorm && tdiffnorm < tnorm)
|
||||
double possibleError = algtype == OdometryAlgoType::COMMON ? 0.09f : 0.015f;
|
||||
|
||||
Affine3f src = Affine3f(Vec3f(rvec), Vec3f(tvec));
|
||||
Affine3f res = Affine3f(Vec3f(calcRvec), Vec3f(calcTvec));
|
||||
Affine3f src_inv = src.inv();
|
||||
Affine3f diff = res * src_inv;
|
||||
double rdiffnorm = cv::norm(diff.rvec());
|
||||
double tdiffnorm = cv::norm(diff.translation());
|
||||
|
||||
if (rdiffnorm < possibleError && tdiffnorm < possibleError)
|
||||
{
|
||||
better_1time_count++;
|
||||
if(5. * rdiffnorm < rnorm && 5 * tdiffnorm < tnorm)
|
||||
}
|
||||
if (5. * rdiffnorm < possibleError && 5 * tdiffnorm < possibleError)
|
||||
better_5times_count++;
|
||||
|
||||
CV_LOG_INFO(NULL, "Iter " << iter);
|
||||
CV_LOG_INFO(NULL, "rdiffnorm " << rdiffnorm << "; rnorm " << rnorm);
|
||||
CV_LOG_INFO(NULL, "tdiffnorm " << tdiffnorm << "; tnorm " << tnorm);
|
||||
CV_LOG_INFO(NULL, "rdiff: " << Vec3f(diff.rvec()) << "; rdiffnorm: " << rdiffnorm);
|
||||
CV_LOG_INFO(NULL, "tdiff: " << Vec3f(diff.translation()) << "; tdiffnorm: " << tdiffnorm);
|
||||
|
||||
CV_LOG_INFO(NULL, "better_1time_count " << better_1time_count << "; better_5time_count " << better_5times_count);
|
||||
}
|
||||
@ -318,89 +354,51 @@ void OdometryTest::run()
|
||||
|
||||
TEST(RGBD_Odometry_Rgbd, algorithmic)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("RgbdOdometry"), 0.99, 0.89);
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.89);
|
||||
test.run();
|
||||
}
|
||||
|
||||
TEST(RGBD_Odometry_ICP, algorithmic)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("ICPOdometry"), 0.99, 0.99);
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::COMMON, 0.99, 0.99);
|
||||
test.run();
|
||||
}
|
||||
|
||||
TEST(RGBD_Odometry_RgbdICP, algorithmic)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("RgbdICPOdometry"), 0.99, 0.99);
|
||||
OdometryTest test(OdometryType::RGB_DEPTH, OdometryAlgoType::COMMON, 0.99, 0.99);
|
||||
test.run();
|
||||
}
|
||||
|
||||
TEST(RGBD_Odometry_FastICP, algorithmic)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("FastICPOdometry"), 0.99, 0.99, FLT_EPSILON);
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.89, FLT_EPSILON);
|
||||
test.run();
|
||||
}
|
||||
|
||||
|
||||
TEST(RGBD_Odometry_Rgbd, UMats)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("RgbdOdometry"), 0.99, 0.89);
|
||||
OdometryTest test(OdometryType::RGB, OdometryAlgoType::COMMON, 0.99, 0.89);
|
||||
test.checkUMats();
|
||||
}
|
||||
|
||||
TEST(RGBD_Odometry_ICP, UMats)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("ICPOdometry"), 0.99, 0.99);
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::COMMON, 0.99, 0.99);
|
||||
test.checkUMats();
|
||||
}
|
||||
|
||||
TEST(RGBD_Odometry_RgbdICP, UMats)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("RgbdICPOdometry"), 0.99, 0.99);
|
||||
OdometryTest test(OdometryType::RGB_DEPTH, OdometryAlgoType::COMMON, 0.99, 0.99);
|
||||
test.checkUMats();
|
||||
}
|
||||
|
||||
TEST(RGBD_Odometry_FastICP, UMats)
|
||||
{
|
||||
OdometryTest test(cv::Odometry::createFromName("FastICPOdometry"), 0.99, 0.99, FLT_EPSILON);
|
||||
OdometryTest test(OdometryType::DEPTH, OdometryAlgoType::FAST, 0.99, 0.89, FLT_EPSILON);
|
||||
test.checkUMats();
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Depth to 3d tests *
|
||||
\****************************************************************************************/
|
||||
|
||||
TEST(RGBD_DepthTo3d, compute)
|
||||
{
|
||||
// K from a VGA Kinect
|
||||
Mat K = OdometryTest::getCameraMatrix();
|
||||
|
||||
// Create a random depth image
|
||||
RNG rng;
|
||||
Mat_<float> depth(480, 640);
|
||||
rng.fill(depth, RNG::UNIFORM, 0, 100);
|
||||
|
||||
// Create some 3d points on the plane
|
||||
int rows = depth.rows, cols = depth.cols;
|
||||
Mat_<Vec3f> points3d;
|
||||
depthTo3d(depth, K, points3d);
|
||||
|
||||
// Make sure the points belong to the plane
|
||||
Mat points = points3d.reshape(1, rows * cols);
|
||||
Mat image_points;
|
||||
Mat rvec;
|
||||
cv::Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
|
||||
Mat tvec = (Mat_<float>(1, 3) << 0, 0, 0);
|
||||
projectPoints(points, rvec, tvec, K, Mat(), image_points);
|
||||
image_points = image_points.reshape(2, rows);
|
||||
|
||||
float avg_diff = 0;
|
||||
for (int y = 0; y < rows; ++y)
|
||||
for (int x = 0; x < cols; ++x)
|
||||
avg_diff += (float)cv::norm(image_points.at<Vec2f>(y, x) - Vec2f((float)x, (float)y));
|
||||
|
||||
// Verify the function works
|
||||
ASSERT_LE(avg_diff / rows / cols, 1e-4) << "Average error for ground truth is: " << (avg_diff / rows / cols);
|
||||
}
|
||||
|
||||
|
||||
}} // namespace
|
||||
|
Loading…
Reference in New Issue
Block a user