Updated bundle adjustment in stitching module: 1) it minimizes reprojection error now, 2) it minimizes error over focal, aspect, p.p.x, p.p.y parameters. Refactored and updated warpers.

This commit is contained in:
Alexey Spizhevoy 2011-09-16 12:25:23 +00:00
parent aebd7ebb75
commit 23636433d7
15 changed files with 377 additions and 371 deletions

View File

@ -645,15 +645,15 @@ namespace cv
CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null());
//! builds plane warping maps
CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, double dist,
CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());
//! builds cylindrical warping maps
CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,
CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());
//! builds spherical warping maps
CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,
CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());
//! rotate 8bit single or four channel image

View File

@ -366,7 +366,8 @@ PERF_TEST_P(DevInfo_Size, buildWarpPlaneMaps, testing::Combine(testing::ValuesIn
SIMPLE_TEST_CYCLE()
{
buildWarpPlaneMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, 1.0, map_x, map_y);
buildWarpPlaneMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1),
Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);
}
Mat map_x_host(map_x);
@ -391,7 +392,8 @@ PERF_TEST_P(DevInfo_Size, buildWarpCylindricalMaps, testing::Combine(testing::Va
SIMPLE_TEST_CYCLE()
{
buildWarpCylindricalMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, map_x, map_y);
buildWarpCylindricalMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1),
Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);
}
Mat map_x_host(map_x);
@ -402,7 +404,7 @@ PERF_TEST_P(DevInfo_Size, buildWarpCylindricalMaps, testing::Combine(testing::Va
}
PERF_TEST_P(DevInfo_Size, buildWarpSphericalMaps, testing::Combine(testing::ValuesIn(devices()),
testing::Values(GPU_TYPICAL_MAT_SIZES)))
testing::Values(GPU_TYPICAL_MAT_SIZES)))
{
DeviceInfo devInfo = std::tr1::get<0>(GetParam());
Size size = std::tr1::get<1>(GetParam());
@ -416,7 +418,8 @@ PERF_TEST_P(DevInfo_Size, buildWarpSphericalMaps, testing::Combine(testing::Valu
SIMPLE_TEST_CYCLE()
{
buildWarpSphericalMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, map_x, map_y);
buildWarpSphericalMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1),
Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);
}
Mat map_x_host(map_x);

View File

@ -787,14 +787,14 @@ namespace cv { namespace gpu { namespace imgproc
//////////////////////////////////////////////////////////////////////////
// buildWarpMaps
// TODO use intrinsics like __sinf and so on
namespace build_warp_maps
{
__constant__ float cr[9];
__constant__ float crinv[9];
__constant__ float cf, cs;
__constant__ float chalf_w, chalf_h;
__constant__ float cdist;
__constant__ float ck_rinv[9];
__constant__ float cr_kinv[9];
__constant__ float cscale;
}
@ -805,16 +805,16 @@ namespace cv { namespace gpu { namespace imgproc
{
using namespace build_warp_maps;
float x_ = u / cs;
float y_ = v / cs;
float x_ = u / cscale;
float y_ = v / cscale;
float z;
x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*cdist;
y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*cdist;
z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*cdist;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2];
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5];
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8];
x = cf*x/z + chalf_w;
y = cf*y/z + chalf_h;
x /= z;
y /= z;
}
};
@ -826,18 +826,18 @@ namespace cv { namespace gpu { namespace imgproc
{
using namespace build_warp_maps;
u /= cs;
u /= cscale;
float x_ = sinf(u);
float y_ = v / cs;
float y_ = v / cscale;
float z_ = cosf(u);
float z;
x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*z_;
y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*z_;
z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*z_;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
x = cf*x/z + chalf_w;
y = cf*y/z + chalf_h;
x /= z;
y /= z;
}
};
@ -849,8 +849,8 @@ namespace cv { namespace gpu { namespace imgproc
{
using namespace build_warp_maps;
v /= cs;
u /= cs;
v /= cscale;
u /= cscale;
float sinv = sinf(v);
float x_ = sinv * sinf(u);
@ -858,12 +858,12 @@ namespace cv { namespace gpu { namespace imgproc
float z_ = sinv * cosf(u);
float z;
x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*z_;
y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*z_;
z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*z_;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
x = cf*x/z + chalf_w;
y = cf*y/z + chalf_h;
x /= z;
y /= z;
}
};
@ -887,16 +887,12 @@ namespace cv { namespace gpu { namespace imgproc
void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
const float r[9], const float rinv[9], float f, float s, float dist,
float half_w, float half_h, cudaStream_t stream)
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cdist, &dist, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
@ -912,15 +908,12 @@ namespace cv { namespace gpu { namespace imgproc
void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
const float r[9], const float rinv[9], float f, float s,
float half_w, float half_h, cudaStream_t stream)
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
@ -936,15 +929,12 @@ namespace cv { namespace gpu { namespace imgproc
void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
const float r[9], const float rinv[9], float f, float s,
float half_w, float half_h, cudaStream_t stream)
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;

View File

@ -56,9 +56,9 @@ void cv::gpu::resize(const GpuMat&, GpuMat&, Size, double, double, int, Stream&)
void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }
void cv::gpu::warpAffine(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }
void cv::gpu::warpPerspective(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, double, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::rotate(const GpuMat&, GpuMat&, Size, double, double, double, int, Stream&) { throw_nogpu(); }
void cv::gpu::integral(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
void cv::gpu::integralBuffered(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
@ -584,22 +584,25 @@ void cv::gpu::warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size
namespace cv { namespace gpu { namespace imgproc
{
void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
const float r[9], const float rinv[9], float f, float s, float dist,
float half_w, float half_h, cudaStream_t stream);
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream);
}}}
void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,
double dist, GpuMat& map_x, GpuMat& map_y, Stream& stream)
void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
GpuMat& map_x, GpuMat& map_y, Stream& stream)
{
CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);
Mat Rinv = R.inv();
CV_Assert(Rinv.isContinuous());
CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
Mat K_Rinv = K * R.t();
Mat R_Kinv = R * K.inv();
CV_Assert(K_Rinv.isContinuous());
CV_Assert(R_Kinv.isContinuous());
map_x.create(dst_roi.size(), CV_32F);
map_y.create(dst_roi.size(), CV_32F);
imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),
static_cast<float>(f), static_cast<float>(s), static_cast<float>(dist),
0.5f*src_size.width, 0.5f*src_size.height, StreamAccessor::getStream(stream));
imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
scale, StreamAccessor::getStream(stream));
}
//////////////////////////////////////////////////////////////////////////////
@ -608,22 +611,25 @@ void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, doub
namespace cv { namespace gpu { namespace imgproc
{
void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
const float r[9], const float rinv[9], float f, float s,
float half_w, float half_h, cudaStream_t stream);
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream);
}}}
void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,
void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
GpuMat& map_x, GpuMat& map_y, Stream& stream)
{
CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);
Mat Rinv = R.inv();
CV_Assert(Rinv.isContinuous());
CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
Mat K_Rinv = K * R.t();
Mat R_Kinv = R * K.inv();
CV_Assert(K_Rinv.isContinuous());
CV_Assert(R_Kinv.isContinuous());
map_x.create(dst_roi.size(), CV_32F);
map_y.create(dst_roi.size(), CV_32F);
imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),
static_cast<float>(f), static_cast<float>(s), 0.5f*src_size.width, 0.5f*src_size.height,
StreamAccessor::getStream(stream));
imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
scale, StreamAccessor::getStream(stream));
}
@ -633,22 +639,25 @@ void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R
namespace cv { namespace gpu { namespace imgproc
{
void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
const float r[9], const float rinv[9], float f, float s,
float half_w, float half_h, cudaStream_t stream);
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream);
}}}
void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,
void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
GpuMat& map_x, GpuMat& map_y, Stream& stream)
{
CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);
Mat Rinv = R.inv();
CV_Assert(Rinv.isContinuous());
CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
Mat K_Rinv = K * R.t();
Mat R_Kinv = R * K.inv();
CV_Assert(K_Rinv.isContinuous());
CV_Assert(R_Kinv.isContinuous());
map_x.create(dst_roi.size(), CV_32F);
map_y.create(dst_roi.size(), CV_32F);
imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),
static_cast<float>(f), static_cast<float>(s), 0.5f*src_size.width, 0.5f*src_size.height,
StreamAccessor::getStream(stream));
imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
scale, StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////

View File

@ -53,8 +53,12 @@ struct CV_EXPORTS CameraParams
CameraParams();
CameraParams(const CameraParams& other);
const CameraParams& operator =(const CameraParams& other);
Mat K() const;
double focal; // Focal length
double aspect; // Aspect ratio
double ppx; // Principal point X
double ppy; // Principal point Y
Mat R; // Rotation
Mat t; // Translation
};

View File

@ -58,9 +58,7 @@ public:
void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras)
{
estimate(features, pairwise_matches, cameras);
}
{ estimate(features, pairwise_matches, cameras); }
protected:
virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
@ -90,6 +88,8 @@ public:
BundleAdjuster(int cost_space = FOCAL_RAY_SPACE, float conf_thresh = 1.f)
: cost_space_(cost_space), conf_thresh_(conf_thresh) {}
Mat K;
private:
void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras);

View File

@ -55,27 +55,22 @@ namespace detail {
class CV_EXPORTS Warper
{
public:
enum { PLANE, CYLINDRICAL, SPHERICAL };
// TODO remove this method
static Ptr<Warper> createByCameraFocal(float focal, int type, bool try_gpu = false);
virtual ~Warper() {}
virtual Point warp(const Mat &src, float focal, const Mat& R, Mat &dst,
virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode = INTER_LINEAR, int border_mode = BORDER_REFLECT) = 0;
virtual Rect warpRoi(const Size &sz, float focal, const Mat &R) = 0;
virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R) = 0;
};
struct CV_EXPORTS ProjectorBase
{
void setTransformation(const Mat& R);
void setCameraParams(const Mat &K, const Mat &R);
Size size;
float focal;
float r[9];
float rinv[9];
float scale;
float k[9];
float rinv[9];
float r_kinv[9];
float k_rinv[9];
};
@ -83,10 +78,10 @@ template <class P>
class CV_EXPORTS WarperBase : public Warper
{
public:
virtual Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,
virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
virtual Rect warpRoi(const Size &sz, float focal, const Mat &R);
virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R);
protected:
// Detects ROI of the destination image. It's correct for any projection.
@ -105,7 +100,6 @@ struct CV_EXPORTS PlaneProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
float plane_dist;
};
@ -113,11 +107,7 @@ struct CV_EXPORTS PlaneProjector : ProjectorBase
class CV_EXPORTS PlaneWarper : public WarperBase<PlaneProjector>
{
public:
PlaneWarper(float plane_dist = 1.f, float scale = 1.f)
{
projector_.plane_dist = plane_dist;
projector_.scale = scale;
}
PlaneWarper(float scale = 1.f) { projector_.scale = scale; }
protected:
void detectResultRoi(Point &dst_tl, Point &dst_br);
@ -127,8 +117,8 @@ protected:
class CV_EXPORTS PlaneWarperGpu : public PlaneWarper
{
public:
PlaneWarperGpu(float plane_dist = 1.f, float scale = 1.f) : PlaneWarper(plane_dist, scale) {}
Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,
PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}
Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
private:
@ -149,7 +139,7 @@ struct CV_EXPORTS SphericalProjector : ProjectorBase
class CV_EXPORTS SphericalWarper : public WarperBase<SphericalProjector>
{
public:
SphericalWarper(float scale = 300.f) { projector_.scale = scale; }
SphericalWarper(float scale) { projector_.scale = scale; }
protected:
void detectResultRoi(Point &dst_tl, Point &dst_br);
@ -160,9 +150,9 @@ protected:
class CV_EXPORTS SphericalWarperGpu : public SphericalWarper
{
public:
SphericalWarperGpu(float scale = 300.f) : SphericalWarper(scale) {}
Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
SphericalWarperGpu(float scale) : SphericalWarper(scale) {}
Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
private:
gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;
@ -181,13 +171,11 @@ struct CV_EXPORTS CylindricalProjector : ProjectorBase
class CV_EXPORTS CylindricalWarper : public WarperBase<CylindricalProjector>
{
public:
CylindricalWarper(float scale = 300.f) { projector_.scale = scale; }
CylindricalWarper(float scale) { projector_.scale = scale; }
protected:
void detectResultRoi(Point &dst_tl, Point &dst_br)
{
WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br);
}
{ WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br); }
};
@ -195,9 +183,9 @@ protected:
class CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper
{
public:
CylindricalWarperGpu(float scale = 300.f) : CylindricalWarper(scale) {}
Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {}
Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode);
private:
gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;

View File

@ -50,14 +50,11 @@ namespace cv {
namespace detail {
template <class P>
Point WarperBase<P>::warp(const Mat &src, float focal, const Mat &R, Mat &dst,
Point WarperBase<P>::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode)
{
src_size_ = src.size();
projector_.size = src.size();
projector_.focal = focal;
projector_.setTransformation(R);
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
@ -84,13 +81,10 @@ Point WarperBase<P>::warp(const Mat &src, float focal, const Mat &R, Mat &dst,
template <class P>
Rect WarperBase<P>::warpRoi(const Size &sz, float focal, const Mat &R)
Rect WarperBase<P>::warpRoi(const Size &sz, const Mat &K, const Mat &R)
{
src_size_ = sz;
projector_.size = sz;
projector_.focal = focal;
projector_.setTransformation(R);
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
@ -165,43 +159,37 @@ void WarperBase<P>::detectResultRoiByBorder(Point &dst_tl, Point &dst_br)
inline
void PlaneProjector::mapForward(float x, float y, float &u, float &v)
{
x -= size.width * 0.5f;
y -= size.height * 0.5f;
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float x_ = r[0] * x + r[1] * y + r[2] * focal;
float y_ = r[3] * x + r[4] * y + r[5] * focal;
float z_ = r[6] * x + r[7] * y + r[8] * focal;
u = scale * x_ / z_ * plane_dist;
v = scale * y_ / z_ * plane_dist;
u = scale * x_ / z_;
v = scale * y_ / z_;
}
inline
void PlaneProjector::mapBackward(float u, float v, float &x, float &y)
{
float x_ = u / scale;
float y_ = v / scale;
u /= scale;
v /= scale;
float z;
x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * plane_dist;
y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * plane_dist;
z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * plane_dist;
x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2];
y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5];
z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8];
x = focal * x / z + size.width * 0.5f;
y = focal * y / z + size.height * 0.5f;
x /= z;
y /= z;
}
inline
void SphericalProjector::mapForward(float x, float y, float &u, float &v)
{
x -= size.width * 0.5f;
y -= size.height * 0.5f;
float x_ = r[0] * x + r[1] * y + r[2] * focal;
float y_ = r[3] * x + r[4] * y + r[5] * focal;
float z_ = r[6] * x + r[7] * y + r[8] * focal;
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
u = scale * atan2f(x_, z_);
v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));
@ -211,30 +199,30 @@ void SphericalProjector::mapForward(float x, float y, float &u, float &v)
inline
void SphericalProjector::mapBackward(float u, float v, float &x, float &y)
{
float sinv = sinf(static_cast<float>(CV_PI) - v / scale);
float x_ = sinv * sinf(u / scale);
float y_ = cosf(static_cast<float>(CV_PI) - v / scale);
float z_ = sinv * cosf(u / scale);
u /= scale;
v /= scale;
float sinv = sinf(static_cast<float>(CV_PI) - v);
float x_ = sinv * sinf(u);
float y_ = cosf(static_cast<float>(CV_PI) - v);
float z_ = sinv * cosf(u);
float z;
x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;
y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;
z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
x = focal * x / z + size.width * 0.5f;
y = focal * y / z + size.height * 0.5f;
x /= z;
y /= z;
}
inline
void CylindricalProjector::mapForward(float x, float y, float &u, float &v)
{
x -= size.width * 0.5f;
y -= size.height * 0.5f;
float x_ = r[0] * x + r[1] * y + r[2] * focal;
float y_ = r[3] * x + r[4] * y + r[5] * focal;
float z_ = r[6] * x + r[7] * y + r[8] * focal;
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
u = scale * atan2f(x_, z_);
v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);
@ -244,17 +232,20 @@ void CylindricalProjector::mapForward(float x, float y, float &u, float &v)
inline
void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)
{
float x_ = sinf(u / scale);
float y_ = v / scale;
float z_ = cosf(u / scale);
u /= scale;
v /= scale;
float x_ = sinf(u);
float y_ = v;
float z_ = cosf(u);
float z;
x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;
y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;
z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
x = focal * x / z + size.width * 0.5f;
y = focal * y / z + size.height * 0.5f;
x /= z;
y /= z;
}
} // namespace detail

View File

@ -51,28 +51,28 @@ class WarperCreator
{
public:
virtual ~WarperCreator() {}
virtual Ptr<detail::Warper> createByFocalLength(double f) const = 0;
virtual Ptr<detail::Warper> create(float scale) const = 0;
};
class PlaneWarper : public WarperCreator
{
public:
Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarper(f); }
Ptr<detail::Warper> create(float scale) const { return new detail::PlaneWarper(scale); }
};
class CylindricalWarper: public WarperCreator
{
public:
Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarper(f); }
Ptr<detail::Warper> create(float scale) const { return new detail::CylindricalWarper(scale); }
};
class SphericalWarper: public WarperCreator
{
public:
Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarper(f); }
Ptr<detail::Warper> create(float scale) const { return new detail::SphericalWarper(scale); }
};
@ -80,21 +80,21 @@ public:
class PlaneWarperGpu: public WarperCreator
{
public:
Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarperGpu(f); }
Ptr<detail::Warper> create(float scale) const { return new detail::PlaneWarperGpu(scale); }
};
class CylindricalWarperGpu: public WarperCreator
{
public:
Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarperGpu(f); }
Ptr<detail::Warper> create(float scale) const { return new detail::CylindricalWarperGpu(scale); }
};
class SphericalWarperGpu: public WarperCreator
{
public:
Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarperGpu(f); }
Ptr<detail::Warper> create(float scale) const { return new detail::SphericalWarperGpu(scale); }
};
#endif

View File

@ -47,17 +47,29 @@ using namespace std;
namespace cv {
namespace detail {
CameraParams::CameraParams() : focal(1), R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
CameraParams::CameraParams() : focal(1), aspect(1), ppx(0), ppy(0),
R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
CameraParams::CameraParams(const CameraParams &other) { *this = other; }
const CameraParams& CameraParams::operator =(const CameraParams &other)
{
focal = other.focal;
ppx = other.ppx;
ppy = other.ppy;
aspect = other.aspect;
R = other.R.clone();
t = other.t.clone();
return *this;
}
Mat CameraParams::K() const
{
Mat_<double> k = Mat::eye(3, 3, CV_64F);
k(0,0) = focal; k(0,2) = ppx;
k(1,1) = focal * aspect; k(1,2) = ppy;
return k;
}
} // namespace detail
} // namespace cv

View File

@ -142,6 +142,13 @@ void HomographyBasedEstimator::estimate(const vector<ImageFeatures> &features, c
findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);
span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));
// As calculations were performed under assumption that p.p. is in image center
for (int i = 0; i < num_images; ++i)
{
cameras[i].ppx = 0.5 * features[i].img_size.width;
cameras[i].ppy = 0.5 * features[i].img_size.height;
}
LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
}
@ -162,11 +169,14 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
pairwise_matches_ = &pairwise_matches[0];
// Prepare focals and rotations
cameras_.create(num_images_ * 4, 1, CV_64F);
cameras_.create(num_images_ * 7, 1, CV_64F);
SVD svd;
for (int i = 0; i < num_images_; ++i)
{
cameras_.at<double>(i * 4, 0) = cameras[i].focal;
cameras_.at<double>(i * 7, 0) = cameras[i].focal;
cameras_.at<double>(i * 7 + 1, 0) = cameras[i].ppx;
cameras_.at<double>(i * 7 + 2, 0) = cameras[i].ppy;
cameras_.at<double>(i * 7 + 3, 0) = cameras[i].aspect;
svd(cameras[i].R, SVD::FULL_UV);
Mat R = svd.u * svd.vt;
@ -175,9 +185,9 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
Mat rvec;
Rodrigues(R, rvec); CV_Assert(rvec.type() == CV_32F);
cameras_.at<double>(i * 4 + 1, 0) = rvec.at<float>(0, 0);
cameras_.at<double>(i * 4 + 2, 0) = rvec.at<float>(1, 0);
cameras_.at<double>(i * 4 + 3, 0) = rvec.at<float>(2, 0);
cameras_.at<double>(i * 7 + 4, 0) = rvec.at<float>(0, 0);
cameras_.at<double>(i * 7 + 5, 0) = rvec.at<float>(1, 0);
cameras_.at<double>(i * 7 + 6, 0) = rvec.at<float>(2, 0);
}
// Select only consistent image pairs for futher adjustment
@ -197,7 +207,7 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
for (size_t i = 0; i < edges_.size(); ++i)
total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ + edges_[i].second].num_inliers);
CvLevMarq solver(num_images_ * 4, total_num_matches_ * 3,
CvLevMarq solver(num_images_ * 7, total_num_matches_ * 2,
cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON));
CvMat matParams = cameras_;
@ -234,17 +244,20 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
}
}
LOGLN("");
LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)));
LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)) / total_num_matches_);
LOGLN("Bundle adjustment, iterations done: " << count);
// Obtain global motion
for (int i = 0; i < num_images_; ++i)
{
cameras[i].focal = cameras_.at<double>(i * 4, 0);
cameras[i].focal = cameras_.at<double>(i * 7, 0);
cameras[i].ppx = cameras_.at<double>(i * 7 + 1, 0);
cameras[i].ppy = cameras_.at<double>(i * 7 + 2, 0);
cameras[i].aspect = cameras_.at<double>(i * 7 + 3, 0);
Mat rvec(3, 1, CV_64F);
rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);
rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);
rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);
rvec.at<double>(0, 0) = cameras_.at<double>(i * 7 + 4, 0);
rvec.at<double>(1, 0) = cameras_.at<double>(i * 7 + 5, 0);
rvec.at<double>(2, 0) = cameras_.at<double>(i * 7 + 6, 0);
Rodrigues(rvec, cameras[i].R);
Mat Mf;
cameras[i].R.convertTo(Mf, CV_32F);
@ -265,62 +278,65 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
void BundleAdjuster::calcError(Mat &err)
{
err.create(total_num_matches_ * 3, 1, CV_64F);
err.create(total_num_matches_ * 2, 1, CV_64F);
int match_idx = 0;
for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
{
int i = edges_[edge_idx].first;
int j = edges_[edge_idx].second;
double f1 = cameras_.at<double>(i * 4, 0);
double f2 = cameras_.at<double>(j * 4, 0);
double R1[9], R2[9];
Mat R1_(3, 3, CV_64F, R1), R2_(3, 3, CV_64F, R2);
double f1 = cameras_.at<double>(i * 7, 0);
double f2 = cameras_.at<double>(j * 7, 0);
double ppx1 = cameras_.at<double>(i * 7 + 1, 0);
double ppx2 = cameras_.at<double>(j * 7 + 1, 0);
double ppy1 = cameras_.at<double>(i * 7 + 2, 0);
double ppy2 = cameras_.at<double>(j * 7 + 2, 0);
double a1 = cameras_.at<double>(i * 7 + 3, 0);
double a2 = cameras_.at<double>(j * 7 + 3, 0);
double R1[9];
Mat R1_(3, 3, CV_64F, R1);
Mat rvec(3, 1, CV_64F);
rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);
rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);
rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);
Rodrigues(rvec, R1_); CV_Assert(R1_.type() == CV_64F);
rvec.at<double>(0, 0) = cameras_.at<double>(j * 4 + 1, 0);
rvec.at<double>(1, 0) = cameras_.at<double>(j * 4 + 2, 0);
rvec.at<double>(2, 0) = cameras_.at<double>(j * 4 + 3, 0);
Rodrigues(rvec, R2_); CV_Assert(R2_.type() == CV_64F);
rvec.at<double>(0, 0) = cameras_.at<double>(i * 7 + 4, 0);
rvec.at<double>(1, 0) = cameras_.at<double>(i * 7 + 5, 0);
rvec.at<double>(2, 0) = cameras_.at<double>(i * 7 + 6, 0);
Rodrigues(rvec, R1_);
double R2[9];
Mat R2_(3, 3, CV_64F, R2);
rvec.at<double>(0, 0) = cameras_.at<double>(j * 7 + 4, 0);
rvec.at<double>(1, 0) = cameras_.at<double>(j * 7 + 5, 0);
rvec.at<double>(2, 0) = cameras_.at<double>(j * 7 + 6, 0);
Rodrigues(rvec, R2_);
const ImageFeatures& features1 = features_[i];
const ImageFeatures& features2 = features_[j];
const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
Mat_<double> K1 = Mat::eye(3, 3, CV_64F);
K1(0,0) = f1; K1(0,2) = ppx1;
K1(1,1) = f1*a1; K1(1,2) = ppy1;
Mat_<double> K2 = Mat::eye(3, 3, CV_64F);
K2(0,0) = f2; K2(0,2) = ppx2;
K2(1,1) = f2*a2; K2(1,2) = ppy2;
Mat_<double> H = K2 * R2_.inv() * R1_ * K1.inv();
for (size_t k = 0; k < matches_info.matches.size(); ++k)
{
if (!matches_info.inliers_mask[k])
continue;
const DMatch& m = matches_info.matches[k];
Point2d kp1 = features1.keypoints[m.queryIdx].pt;
kp1.x -= 0.5 * features1.img_size.width;
kp1.y -= 0.5 * features1.img_size.height;
Point2d kp2 = features2.keypoints[m.trainIdx].pt;
kp2.x -= 0.5 * features2.img_size.width;
kp2.y -= 0.5 * features2.img_size.height;
double len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1);
double len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2);
Point3d p1(kp1.x / len1, kp1.y / len1, f1 / len1);
Point3d p2(kp2.x / len2, kp2.y / len2, f2 / len2);
Point2d p1 = features1.keypoints[m.queryIdx].pt;
Point2d p2 = features2.keypoints[m.trainIdx].pt;
double x = H(0,0)*p1.x + H(0,1)*p1.y + H(0,2);
double y = H(1,0)*p1.x + H(1,1)*p1.y + H(1,2);
double z = H(2,0)*p1.x + H(2,1)*p1.y + H(2,2);
Point3d d1(p1.x * R1[0] + p1.y * R1[1] + p1.z * R1[2],
p1.x * R1[3] + p1.y * R1[4] + p1.z * R1[5],
p1.x * R1[6] + p1.y * R1[7] + p1.z * R1[8]);
Point3d d2(p2.x * R2[0] + p2.y * R2[1] + p2.z * R2[2],
p2.x * R2[3] + p2.y * R2[4] + p2.z * R2[5],
p2.x * R2[6] + p2.y * R2[7] + p2.z * R2[8]);
double mult = 1; // For cost_space_ == RAY_SPACE
if (cost_space_ == FOCAL_RAY_SPACE)
mult = sqrt(f1 * f2);
err.at<double>(3 * match_idx, 0) = mult * (d1.x - d2.x);
err.at<double>(3 * match_idx + 1, 0) = mult * (d1.y - d2.y);
err.at<double>(3 * match_idx + 2, 0) = mult * (d1.z - d2.z);
err.at<double>(2 * match_idx, 0) = p2.x - x/z;
err.at<double>(2 * match_idx + 1, 0) = p2.y - y/z;
match_idx++;
}
}
@ -329,45 +345,23 @@ void BundleAdjuster::calcError(Mat &err)
void BundleAdjuster::calcJacobian()
{
J_.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);
J_.create(total_num_matches_ * 2, num_images_ * 7, CV_64F);
double f, r;
const double df = 0.001; // Focal length step
const double dr = 0.001; // Angle step
double val;
const double step = 1e-3;
for (int i = 0; i < num_images_; ++i)
{
f = cameras_.at<double>(i * 4, 0);
cameras_.at<double>(i * 4, 0) = f - df;
calcError(err1_);
cameras_.at<double>(i * 4, 0) = f + df;
calcError(err2_);
calcDeriv(err1_, err2_, 2 * df, J_.col(i * 4));
cameras_.at<double>(i * 4, 0) = f;
r = cameras_.at<double>(i * 4 + 1, 0);
cameras_.at<double>(i * 4 + 1, 0) = r - dr;
calcError(err1_);
cameras_.at<double>(i * 4 + 1, 0) = r + dr;
calcError(err2_);
calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 1));
cameras_.at<double>(i * 4 + 1, 0) = r;
r = cameras_.at<double>(i * 4 + 2, 0);
cameras_.at<double>(i * 4 + 2, 0) = r - dr;
calcError(err1_);
cameras_.at<double>(i * 4 + 2, 0) = r + dr;
calcError(err2_);
calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 2));
cameras_.at<double>(i * 4 + 2, 0) = r;
r = cameras_.at<double>(i * 4 + 3, 0);
cameras_.at<double>(i * 4 + 3, 0) = r - dr;
calcError(err1_);
cameras_.at<double>(i * 4 + 3, 0) = r + dr;
calcError(err2_);
calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 3));
cameras_.at<double>(i * 4 + 3, 0) = r;
for (int j = 0; j < 7; ++j)
{
val = cameras_.at<double>(i * 7 + j, 0);
cameras_.at<double>(i * 7+ j, 0) = val - step;
calcError(err1_);
cameras_.at<double>(i * 7 + j, 0) = val + step;
calcError(err2_);
calcDeriv(err1_, err2_, 2 * step, J_.col(i * 7 + j));
cameras_.at<double>(i * 7 + j, 0) = val;
}
}
}

View File

@ -186,7 +186,7 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
LOGLN("Initial focal length #" << indices[i]+1 << ": " << cameras[i].focal);
LOGLN("Initial intrinsic parameters #" << indices[i]+1 << ":\n " << cameras[i].K());
}
detail::BundleAdjuster adjuster(detail::BundleAdjuster::FOCAL_RAY_SPACE, conf_thresh_);
@ -196,7 +196,7 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
vector<double> focals;
for (size_t i = 0; i < cameras.size(); ++i)
{
LOGLN("Camera #" << indices[i]+1 << " focal length: " << cameras[i].focal);
LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
focals.push_back(cameras[i].focal);
}
nth_element(focals.begin(), focals.begin() + focals.size()/2, focals.end());
@ -229,14 +229,18 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
}
// Warp images and their masks
Ptr<detail::Warper> warper = warper_->createByFocalLength(warped_image_scale * seam_work_aspect);
Ptr<detail::Warper> warper = warper_->create(warped_image_scale * seam_work_aspect);
for (int i = 0; i < num_imgs; ++i)
{
corners[i] = warper->warp(seam_est_imgs[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
cameras[i].R, images_warped[i]);
Mat_<float> K;
cameras[i].K().convertTo(K, CV_32F);
K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
corners[i] = warper->warp(seam_est_imgs[i], K, cameras[i].R, images_warped[i]);
sizes[i] = images_warped[i].size();
warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
}
vector<Mat> images_warped_f(num_imgs);
@ -281,13 +285,15 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
// Update warped image scale
warped_image_scale *= static_cast<float>(compose_work_aspect);
warper = warper_->createByFocalLength(warped_image_scale);
warper = warper_->create(warped_image_scale);
// Update corners and sizes
for (int i = 0; i < num_imgs; ++i)
{
// Update camera focal
// Update intrinsics
cameras[i].focal *= compose_work_aspect;
cameras[i].ppx *= compose_work_aspect;
cameras[i].ppy *= compose_work_aspect;
// Update corner and size
Size sz = full_img_sizes[i];
@ -297,7 +303,9 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
Mat K;
cameras[i].K().convertTo(K, CV_32F);
Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
@ -309,15 +317,16 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
full_img.release();
Size img_size = img.size();
Mat K;
cameras[img_idx].K().convertTo(K, CV_32F);
// Warp the current image
warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
img_warped);
warper->warp(img, K, cameras[img_idx].R, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
INTER_NEAREST, BORDER_CONSTANT);
warper->warp(mask, K, cameras[img_idx].R, mask_warped, INTER_NEAREST, BORDER_CONSTANT);
// Compensate exposure
exposure_comp_->apply(img_idx, corners[img_idx], img_warped, mask_warped);

View File

@ -47,46 +47,30 @@ using namespace std;
namespace cv {
namespace detail {
Ptr<Warper> Warper::createByCameraFocal(float focal, int type, bool try_gpu)
void ProjectorBase::setCameraParams(const Mat &K, const Mat &R)
{
#ifndef ANDROID
bool can_use_gpu = try_gpu && gpu::getCudaEnabledDeviceCount();
if (can_use_gpu)
{
if (type == PLANE)
return new PlaneWarperGpu(focal);
if (type == CYLINDRICAL)
return new CylindricalWarperGpu(focal);
if (type == SPHERICAL)
return new SphericalWarperGpu(focal);
}
else
#endif
{
if (type == PLANE)
return new PlaneWarper(focal);
if (type == CYLINDRICAL)
return new CylindricalWarper(focal);
if (type == SPHERICAL)
return new SphericalWarper(focal);
}
CV_Error(CV_StsBadArg, "unsupported warping type");
return NULL;
}
CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);
Mat_<float> K_(K);
k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);
k[3] = K_(1,0); k[4] = K_(1,1); k[5] = K_(1,2);
k[6] = K_(2,0); k[7] = K_(2,1); k[8] = K_(2,2);
void ProjectorBase::setTransformation(const Mat &R)
{
CV_Assert(R.size() == Size(3, 3));
CV_Assert(R.type() == CV_32F);
r[0] = R.at<float>(0, 0); r[1] = R.at<float>(0, 1); r[2] = R.at<float>(0, 2);
r[3] = R.at<float>(1, 0); r[4] = R.at<float>(1, 1); r[5] = R.at<float>(1, 2);
r[6] = R.at<float>(2, 0); r[7] = R.at<float>(2, 1); r[8] = R.at<float>(2, 2);
Mat_<float> Rinv = R.t();
rinv[0] = Rinv(0,0); rinv[1] = Rinv(0,1); rinv[2] = Rinv(0,2);
rinv[3] = Rinv(1,0); rinv[4] = Rinv(1,1); rinv[5] = Rinv(1,2);
rinv[6] = Rinv(2,0); rinv[7] = Rinv(2,1); rinv[8] = Rinv(2,2);
Mat Rinv = R.inv();
rinv[0] = Rinv.at<float>(0, 0); rinv[1] = Rinv.at<float>(0, 1); rinv[2] = Rinv.at<float>(0, 2);
rinv[3] = Rinv.at<float>(1, 0); rinv[4] = Rinv.at<float>(1, 1); rinv[5] = Rinv.at<float>(1, 2);
rinv[6] = Rinv.at<float>(2, 0); rinv[7] = Rinv.at<float>(2, 1); rinv[8] = Rinv.at<float>(2, 2);
Mat_<float> R_Kinv = R * K.inv();
r_kinv[0] = R_Kinv(0,0); r_kinv[1] = R_Kinv(0,1); r_kinv[2] = R_Kinv(0,2);
r_kinv[3] = R_Kinv(1,0); r_kinv[4] = R_Kinv(1,1); r_kinv[5] = R_Kinv(1,2);
r_kinv[6] = R_Kinv(2,0); r_kinv[7] = R_Kinv(2,1); r_kinv[8] = R_Kinv(2,2);
Mat_<float> K_Rinv = K * Rinv;
k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2);
k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2);
k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2);
}
@ -122,18 +106,16 @@ void PlaneWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
}
#ifndef ANDROID
Point PlaneWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst, int interp_mode, int border_mode)
Point PlaneWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst, int interp_mode, int border_mode)
{
src_size_ = src.size();
projector_.size = src.size();
projector_.focal = focal;
projector_.setTransformation(R);
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
gpu::buildWarpPlaneMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),
R, focal, projector_.scale, projector_.plane_dist, d_xmap_, d_ymap_);
K, R, projector_.scale, d_xmap_, d_ymap_);
gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);
d_src_.upload(src);
@ -163,9 +145,11 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
float z = projector_.rinv[7];
if (y > 0.f)
{
x = projector_.focal * x / z + src_size_.width * 0.5f;
y = projector_.focal * y / z + src_size_.height * 0.5f;
if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)
//x = projector_.focal * x / z + src_size_.width * 0.5f;
//y = projector_.focal * y / z + src_size_.height * 0.5f;
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)
{
tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(CV_PI * projector_.scale));
br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(CV_PI * projector_.scale));
@ -177,9 +161,11 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
z = projector_.rinv[7];
if (y > 0.f)
{
x = projector_.focal * x / z + src_size_.width * 0.5f;
y = projector_.focal * y / z + src_size_.height * 0.5f;
if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)
//x = projector_.focal * x / z + src_size_.width * 0.5f;
//y = projector_.focal * y / z + src_size_.height * 0.5f;
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)
{
tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(0));
br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(0));
@ -193,19 +179,17 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
}
#ifndef ANDROID
Point SphericalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst,
int interp_mode, int border_mode)
Point SphericalWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode)
{
src_size_ = src.size();
projector_.size = src.size();
projector_.focal = focal;
projector_.setTransformation(R);
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
gpu::buildWarpSphericalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),
R, focal, projector_.scale, d_xmap_, d_ymap_);
K, R, projector_.scale, d_xmap_, d_ymap_);
gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);
d_src_.upload(src);
@ -220,19 +204,17 @@ Point SphericalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &d
}
Point CylindricalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst,
int interp_mode, int border_mode)
Point CylindricalWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,
int interp_mode, int border_mode)
{
src_size_ = src.size();
projector_.size = src.size();
projector_.focal = focal;
projector_.setTransformation(R);
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(dst_tl, dst_br);
gpu::buildWarpCylindricalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),
R, focal, projector_.scale, d_xmap_, d_ymap_);
K, R, projector_.scale, d_xmap_, d_ymap_);
gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);
d_src_.upload(src);

View File

@ -103,7 +103,7 @@ int parseCmdArgs(int argc, char** argv)
printUsage();
return -1;
}
else if (string(argv[i]) == "--try_gpu")
else if (string(argv[i]) == "--try_use_gpu")
{
if (string(argv[i + 1]) == "no")
try_use_gpu = false;

View File

@ -42,6 +42,7 @@
//M*/
#include <fstream>
#include <string>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
@ -52,6 +53,7 @@
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
using namespace std;
using namespace cv;
@ -118,7 +120,7 @@ float conf_thresh = 1.f;
bool wave_correct = true;
bool save_graph = false;
std::string save_graph_to;
int warp_type = Warper::SPHERICAL;
string warp_type = "spherical";
int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
float match_conf = 0.65f;
int seam_find_type = SeamFinder::GC_COLOR;
@ -223,17 +225,7 @@ int parseCmdArgs(int argc, char** argv)
}
else if (string(argv[i]) == "--warp")
{
if (string(argv[i + 1]) == "plane")
warp_type = Warper::PLANE;
else if (string(argv[i + 1]) == "cylindrical")
warp_type = Warper::CYLINDRICAL;
else if (string(argv[i + 1]) == "spherical")
warp_type = Warper::SPHERICAL;
else
{
cout << "Bad warping method\n";
return -1;
}
warp_type = string(argv[i + 1]);
i++;
}
else if (string(argv[i]) == "--expos_comp")
@ -479,15 +471,42 @@ int main(int argc, char* argv[])
}
// Warp images and their masks
Ptr<Warper> warper = Warper::createByCameraFocal(static_cast<float>(warped_image_scale * seam_work_aspect),
warp_type, try_gpu);
Ptr<WarperCreator> warper_creator;
#ifndef ANDROID
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
{
if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
}
else
#endif
{
if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
}
if (warper_creator.empty())
{
cout << "Can't create the following warper '" << warp_type << "'\n";
return 1;
}
Ptr<Warper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
for (int i = 0; i < num_images; ++i)
{
corners[i] = warper->warp(images[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
cameras[i].R, images_warped[i]);
Mat_<float> K;
cameras[i].K().convertTo(K, CV_32F);
K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
corners[i] = warper->warp(images[i], K, cameras[i].R, images_warped[i]);
sizes[i] = images_warped[i].size();
warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
}
vector<Mat> images_warped_f(num_images);
@ -535,23 +554,27 @@ int main(int argc, char* argv[])
// Update warped image scale
warped_image_scale *= static_cast<float>(compose_work_aspect);
warper = Warper::createByCameraFocal(warped_image_scale, warp_type, try_gpu);
warper = warper_creator->create(warped_image_scale);
// Update corners and sizes
for (int i = 0; i < num_images; ++i)
{
// Update camera focal
// Update intrinsics
cameras[i].focal *= compose_work_aspect;
cameras[i].ppx *= compose_work_aspect;
cameras[i].ppy *= compose_work_aspect;
// Update corner and size
Size sz = full_img_sizes[i];
if (abs(compose_scale - 1) > 1e-1)
if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes[i].width * compose_scale);
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
Mat K;
cameras[i].K().convertTo(K, CV_32F);
Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
@ -563,15 +586,16 @@ int main(int argc, char* argv[])
full_img.release();
Size img_size = img.size();
Mat K;
cameras[img_idx].K().convertTo(K, CV_32F);
// Warp the current image
warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
img_warped);
warper->warp(img, K, cameras[img_idx].R, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
INTER_NEAREST, BORDER_CONSTANT);
warper->warp(mask, K, cameras[img_idx].R, mask_warped, INTER_NEAREST, BORDER_CONSTANT);
// Compensate exposure
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);