added variational stereo correspondence (by Sergey Kosov) and polynomial fitting (by Onkar Raut)

This commit is contained in:
Vadim Pisarevsky 2011-06-15 12:10:33 +00:00
parent 0d09352fca
commit 0876f69dbf
4 changed files with 581 additions and 132 deletions

View File

@ -431,137 +431,179 @@ namespace cv
DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
};
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
class LevMarqSparse {
public:
LevMarqSparse();
LevMarqSparse(int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data, // user-specific data passed to the callbacks
BundleAdjustCallback cb, void* user_data
);
virtual ~LevMarqSparse();
virtual void run( int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data // user-specific data passed to the callbacks
);
virtual void clear();
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
// useful function to do simple bundle adjustment tasks
static void bundleAdjust(vector<Point3d>& points, // positions of points in global coordinate system (input and output)
const vector<vector<Point2d> >& imagePoints, // projections of 3d points for every camera
const vector<vector<int> >& visibility, // visibility of 3d points for every camera
vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
vector<Mat>& R, // rotation matrices of all cameras (input and output)
vector<Mat>& T, // translation vector of all cameras (input and output)
vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
const TermCriteria& criteria=
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
BundleAdjustCallback cb = 0, void* user_data = 0);
public:
virtual void optimize(CvMat &_vis); //main function that runs minimization
//iteratively asks for measurement for visible camera-point pairs
void ask_for_proj(CvMat &_vis,bool once=false);
//iteratively asks for Jacobians for every camera_point pair
void ask_for_projac(CvMat &_vis);
class LevMarqSparse {
public:
LevMarqSparse();
LevMarqSparse(int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data, // user-specific data passed to the callbacks
BundleAdjustCallback cb, void* user_data
);
CvMat* err; //error X-hX
double prevErrNorm, errNorm;
double lambda;
CvTermCriteria criteria;
int iters;
CvMat** U; //size of array is equal to number of cameras
CvMat** V; //size of array is equal to number of points
CvMat** inv_V_star; //inverse of V*
CvMat** A;
CvMat** B;
CvMat** W;
CvMat* X; //measurement
CvMat* hX; //current measurement extimation given new parameter vector
CvMat* prevP; //current already accepted parameter.
CvMat* P; // parameters used to evaluate function with new params
// this parameters may be rejected
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
// length of array is j = number of cameras
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
// length of array is i = number of points
CvMat** Yj; //length of array is i = num_points
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
int num_cams;
int num_points;
int num_err_param;
int num_cam_param;
int num_point_param;
//target function and jacobian pointers, which needs to be initialized
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
void* data;
BundleAdjustCallback cb;
void* user_data;
};
virtual ~LevMarqSparse();
virtual void run( int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data // user-specific data passed to the callbacks
);
virtual void clear();
// useful function to do simple bundle adjustment tasks
static void bundleAdjust(vector<Point3d>& points, // positions of points in global coordinate system (input and output)
const vector<vector<Point2d> >& imagePoints, // projections of 3d points for every camera
const vector<vector<int> >& visibility, // visibility of 3d points for every camera
vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
vector<Mat>& R, // rotation matrices of all cameras (input and output)
vector<Mat>& T, // translation vector of all cameras (input and output)
vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
const TermCriteria& criteria=
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
BundleAdjustCallback cb = 0, void* user_data = 0);
public:
virtual void optimize(CvMat &_vis); //main function that runs minimization
//iteratively asks for measurement for visible camera-point pairs
void ask_for_proj(CvMat &_vis,bool once=false);
//iteratively asks for Jacobians for every camera_point pair
void ask_for_projac(CvMat &_vis);
CvMat* err; //error X-hX
double prevErrNorm, errNorm;
double lambda;
CvTermCriteria criteria;
int iters;
CvMat** U; //size of array is equal to number of cameras
CvMat** V; //size of array is equal to number of points
CvMat** inv_V_star; //inverse of V*
CvMat** A;
CvMat** B;
CvMat** W;
CvMat* X; //measurement
CvMat* hX; //current measurement extimation given new parameter vector
CvMat* prevP; //current already accepted parameter.
CvMat* P; // parameters used to evaluate function with new params
// this parameters may be rejected
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
// length of array is j = number of cameras
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
// length of array is i = number of points
CvMat** Yj; //length of array is i = num_points
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
int num_cams;
int num_points;
int num_err_param;
int num_cam_param;
int num_point_param;
//target function and jacobian pointers, which needs to be initialized
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
void* data;
BundleAdjustCallback cb;
void* user_data;
};
CV_EXPORTS int chamerMatching( Mat& img, Mat& templ,
vector<vector<Point> >& results, vector<float>& cost,
double templScale=1, int maxMatches = 20,
double minMatchDistance = 1.0, int padX = 3,
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
double orientationWeight = 0.5, double truncate = 20);
vector<vector<Point> >& results, vector<float>& cost,
double templScale=1, int maxMatches = 20,
double minMatchDistance = 1.0, int padX = 3,
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
double orientationWeight = 0.5, double truncate = 20);
class CV_EXPORTS StereoVar
{
public:
// Flags
enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_MEDIAN_FILTERING = 8};
enum {CYCLE_O, CYCLE_V};
enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
//! the default constructor
CV_WRAP StereoVar();
//! the full constructor taking all the necessary algorithm parameters
CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
//! the destructor
virtual ~StereoVar();
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, Mat& disp);
CV_PROP_RW int levels;
CV_PROP_RW double pyrScale;
CV_PROP_RW int nIt;
CV_PROP_RW int minDisp;
CV_PROP_RW int maxDisp;
CV_PROP_RW int poly_n;
CV_PROP_RW double poly_sigma;
CV_PROP_RW float fi;
CV_PROP_RW float lambda;
CV_PROP_RW int penalization;
CV_PROP_RW int cycle;
CV_PROP_RW int flags;
private:
void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
};
CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
}

View File

@ -0,0 +1,72 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// This original code was written by
// Onkar Raut
// Graduate Student,
// University of North Carolina at Charlotte
#include "precomp.hpp"
void cv::polyfit(const Mat& src_x, const Mat& src_y, Mat& dst, int order)
{
CV_Assert((src_x.rows>0)&&(src_y.rows>0)&&(src_x.cols==1)&&(src_y.cols==1)
&&(dst.cols==1)&&(dst.rows==(order+1))&&(order>=1));
Mat X;
X = Mat::zeros(src_x.rows, order+1,CV_32FC1);
Mat copy;
for(int i = 0; i <=order;i++)
{
copy = src_x.clone();
pow(copy,i,copy);
Mat M1 = X.col(i);
copy.col(0).copyTo(M1);
}
Mat X_t, X_inv;
transpose(X,X_t);
Mat temp = X_t*X;
Mat temp2;
invert (temp,temp2);
Mat temp3 = temp2*X_t;
Mat W = temp3*src_y;
W.copyTo(dst);
}

314
modules/contrib/src/stereovar.cpp Executable file
View File

@ -0,0 +1,314 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
This is a modification of the variational stereo correspondence algorithm, described in:
S. Kosov, T. Thormaehlen, H.-P. Seidel "Accurate Real-Time Disparity Estimation with Variational Methods"
Proceedings of the 5th International Symposium on Visual Computing, Vegas, USA
This code is written by Sergey G. Kosov for "Visir PX" application as part of Project X (www.project-10.de)
*/
#include "precomp.hpp"
#include <limits.h>
namespace cv
{
StereoVar::StereoVar() : levels(3), pyrScale(0.5), nIt(3), minDisp(0), maxDisp(16), poly_n(5), poly_sigma(1.2), fi(1000.0f), lambda(0.0f), penalization(PENALIZATION_TICHONOV), cycle(CYCLE_V), flags(USE_SMART_ID)
{
}
StereoVar::StereoVar(int _levels, double _pyrScale, int _nIt, int _minDisp, int _maxDisp, int _poly_n, double _poly_sigma, float _fi, float _lambda, int _penalization, int _cycle, int _flags) : levels(_levels), pyrScale(_pyrScale), nIt(_nIt), minDisp(_minDisp), maxDisp(_maxDisp), poly_n(_poly_n), poly_sigma(_poly_sigma), fi(_fi), lambda(_lambda), penalization(_penalization), cycle(_cycle), flags(_flags)
{ // No Parameters check, since they are all public
}
StereoVar::~StereoVar()
{
}
static Mat diffX(Mat &img)
{
// TODO try pointers or assm
register int x, y;
Mat dst(img.size(), img.type());
dst.setTo(0);
for (x = 0; x < img.cols - 1; x++)
for (y = 0; y < img.rows; y++)
dst.at<float>(y, x) = img.at<float>(y, x + 1) - img.at<float>(y ,x);
return dst;
}
static Mat Gradient(Mat &img)
{
Mat sobel, sobelX, sobelY;
img.copyTo(sobelX);
img.copyTo(sobelY);
Sobel(img, sobelX, sobelX.type(), 1, 0, 1);
Sobel(img, sobelY, sobelY.type(), 0, 1, 1);
sobelX = abs(sobelX);
sobelY = abs(sobelY);
add(sobelX, sobelY, sobel);
sobelX.release();
sobelY.release();
return sobel;
}
static float g_c(Mat z, int x, int y, float l)
{
return 0.5f*l / sqrtf(l*l + z.at<float>(y,x)*z.at<float>(y,x));
}
static float g_p(Mat z, int x, int y, float l)
{
return 0.5f*l*l / (l*l + z.at<float>(y,x)*z.at<float>(y,x)) ;
}
void StereoVar::VariationalSolver(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
{
register int n, x, y;
float gl = 1, gr = 1, gu = 1, gd = 1, gc = 4;
Mat U;
Mat Sobel;
u.copyTo(U);
int N = nIt;
float l = lambda;
float Fi = fi;
double scale = pow(pyrScale, (double) level);
if (flags & USE_SMART_ID) {
N = (int) (N / scale);
Fi /= (float) scale;
l *= (float) scale;
}
for (n = 0; n < N; n++) {
if (penalization != PENALIZATION_TICHONOV) {if(!Sobel.empty()) Sobel.release(); Sobel = Gradient(U);}
for (x = 1; x < u.cols - 1; x++) {
for (y = 1 ; y < u.rows - 1; y++) {
switch (penalization) {
case PENALIZATION_CHARBONNIER:
gc = g_c(Sobel, x, y, l);
gl = gc + g_c(Sobel, x - 1, y, l);
gr = gc + g_c(Sobel, x + 1, y, l);
gu = gc + g_c(Sobel, x, y + 1, l);
gd = gc + g_c(Sobel, x, y - 1, l);
gc = gl + gr + gu + gd;
break;
case PENALIZATION_PERONA_MALIK:
gc = g_p(Sobel, x, y, l);
gl = gc + g_p(Sobel, x - 1, y, l);
gr = gc + g_p(Sobel, x + 1, y, l);
gu = gc + g_p(Sobel, x, y + 1, l);
gd = gc + g_p(Sobel, x, y - 1, l);
gc = gl + gr + gu + gd;
break;
}
float fi = Fi;
if (maxDisp > minDisp) {
if (U.at<float>(y,x) > maxDisp * scale) {fi*=1000; U.at<float>(y,x) = static_cast<float>(maxDisp * scale);}
if (U.at<float>(y,x) < minDisp * scale) {fi*=1000; U.at<float>(y,x) = static_cast<float>(minDisp * scale);}
}
int A = (int) (U.at<float>(y,x));
int neg = 0; if (U.at<float>(y,x) <= 0) neg = -1;
if (x + A >= u.cols)
u.at<float>(y, x) = U.at<float>(y, u.cols - A - 1);
else if (x + A + neg < 0)
u.at<float>(y, x) = U.at<float>(y, - A + 2);
else {
u.at<float>(y, x) = A + (I2x.at<float>(y, x + A + neg) * (I1.at<float>(y, x) - I2.at<float>(y, x + A))
+ fi * (gr * U.at<float>(y, x + 1) + gl * U.at<float>(y, x - 1) + gu * U.at<float>(y + 1, x) + gd * U.at<float>(y - 1, x) - gc * A))
/ (I2x.at<float>(y, x + A + neg) * I2x.at<float>(y, x + A + neg) + gc * fi) ;
}
}//y
u.at<float>(0, x) = u.at<float>(1, x);
u.at<float>(u.rows - 1, x) = u.at<float>(u.rows - 2, x);
}//x
for (y = 0; y < u.rows; y++) {
u.at<float>(y, 0) = u.at<float>(y, 1);
u.at<float>(y, u.cols - 1) = u.at<float>(y, u.cols - 2);
}
u.copyTo(U);
}//n
}
void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level)
{
CvSize imgSize = _u.size();
CvSize frmSize = cvSize((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5));
Mat I1_h, I2_h, I2x_h, u_h, U, U_h;
//PRE relaxation
VariationalSolver(I1, I2, I2x, _u, level);
if (level >= levels - 1) return;
level ++;
//scaling DOWN
resize(I1, I1_h, frmSize, 0, 0, INTER_AREA);
resize(I2, I2_h, frmSize, 0, 0, INTER_AREA);
resize(_u, u_h, frmSize, 0, 0, INTER_AREA);
u_h.convertTo(u_h, u_h.type(), pyrScale);
I2x_h = diffX(I2_h);
//Next level
U_h = u_h.clone();
VCycle_MyFAS(I1_h, I2_h, I2x_h, U_h, level);
subtract(U_h, u_h, U_h);
U_h.convertTo(U_h, U_h.type(), 1.0 / pyrScale);
//scaling UP
resize(U_h, U, imgSize);
//correcting the solution
add(_u, U, _u);
//POST relaxation
VariationalSolver(I1, I2, I2x, _u, level - 1);
if (flags & USE_MEDIAN_FILTERING) medianBlur(_u, _u, 3);
I1_h.release();
I2_h.release();
I2x_h.release();
u_h.release();
U.release();
U_h.release();
}
void StereoVar::FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
{
double scale = pow(pyrScale, (double) level);
CvSize frmSize = cvSize((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5));
Mat I1_h, I2_h, I2x_h, u_h;
//scaling DOWN
resize(I1, I1_h, frmSize, 0, 0, INTER_AREA);
resize(I2, I2_h, frmSize, 0, 0, INTER_AREA);
resize(u, u_h, frmSize, 0, 0, INTER_AREA);
u_h.convertTo(u_h, u_h.type(), scale);
I2x_h = diffX(I2_h);
switch (cycle) {
case CYCLE_O:
VariationalSolver(I1_h, I2_h, I2x_h, u_h, level);
break;
case CYCLE_V:
VCycle_MyFAS(I1_h, I2_h, I2x_h, u_h, level);
break;
}
u_h.convertTo(u_h, u_h.type(), 1.0 / scale);
//scaling UP
resize(u_h, u, u.size(), 0, 0, INTER_CUBIC);
I1_h.release();
I2_h.release();
I2x_h.release();
u_h.release();
level--;
if (flags & USE_MEDIAN_FILTERING) medianBlur(u, u, 3);
if (level >= 0) FMG(I1, I2, I2x, u, level);
}
void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
{
CV_Assert(left.size() == right.size() && left.type() == right.type());
CvSize imgSize = left.size();
int MaxD = MAX(std::abs(minDisp), std::abs(maxDisp));
int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1;
if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;}
Mat u;
if ((flags & USE_INITIAL_DISPARITY) && (!disp.empty())) {
CV_Assert(disp.size() == left.size() && disp.type() == CV_8UC1);
disp.convertTo(u, CV_32FC1, static_cast<double>(SignD * MaxD) / 256);
} else {
u.create(imgSize, CV_32FC1);
u.setTo(0);
}
// Preprocessing
Mat leftgray, rightgray;
if (left.type() != CV_8UC1) {
cvtColor(left, leftgray, CV_BGR2GRAY);
cvtColor(right, rightgray, CV_BGR2GRAY);
} else {
left.copyTo(leftgray);
right.copyTo(rightgray);
}
if (flags & USE_EQUALIZE_HIST) {
equalizeHist(leftgray, leftgray);
equalizeHist(rightgray, rightgray);
}
if (poly_sigma > 0.0001) {
GaussianBlur(leftgray, leftgray, cvSize(poly_n, poly_n), poly_sigma);
GaussianBlur(rightgray, rightgray, cvSize(poly_n, poly_n), poly_sigma);
}
Mat I1, I2;
leftgray.convertTo(I1, CV_32FC1);
rightgray.convertTo(I2, CV_32FC1);
leftgray.release();
rightgray.release();
Mat I2x = diffX(I2);
FMG(I1, I2, I2x, u, levels - 1);
I1.release();
I2.release();
I2x.release();
disp.create( left.size(), CV_8UC1 );
u = abs(u);
u.convertTo(disp, disp.type(), 256 / MaxD, 0);
u.release();
}
} // namespace

View File

@ -10,6 +10,7 @@
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <stdio.h>
@ -18,7 +19,7 @@ using namespace cv;
void print_help()
{
printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh] [--blocksize=<block_size>]\n"
printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh|var] [--blocksize=<block_size>]\n"
"[--max-disparity=<max_disparity>] [-i <intrinsic_filename>] [-e <extrinsic_filename>]\n"
"[--no-display] [-o <disparity_image>] [-p <point_cloud_file>]\n");
}
@ -59,13 +60,14 @@ int main(int argc, char** argv)
const char* disparity_filename = 0;
const char* point_cloud_filename = 0;
enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2 };
enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 };
int alg = STEREO_SGBM;
int SADWindowSize = 0, numberOfDisparities = 0;
bool no_display = false;
StereoBM bm;
StereoSGBM sgbm;
StereoVar var;
for( int i = 1; i < argc; i++ )
{
@ -81,7 +83,8 @@ int main(int argc, char** argv)
char* _alg = argv[i] + strlen(algorithm_opt);
alg = strcmp(_alg, "bm") == 0 ? STEREO_BM :
strcmp(_alg, "sgbm") == 0 ? STEREO_SGBM :
strcmp(_alg, "hh") == 0 ? STEREO_HH : -1;
strcmp(_alg, "hh") == 0 ? STEREO_HH :
strcmp(_alg, "var") == 0 ? STEREO_VAR : -1;
if( alg < 0 )
{
printf("Command-line parameter error: Unknown stereo algorithm\n\n");
@ -192,7 +195,7 @@ int main(int argc, char** argv)
img2 = img2r;
}
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : img_size.width/8;
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width/8) + 15) & -16;
bm.state->roi1 = roi1;
bm.state->roi2 = roi2;
@ -221,6 +224,19 @@ int main(int argc, char** argv)
sgbm.disp12MaxDiff = 1;
sgbm.fullDP = alg == STEREO_HH;
var.levels = 6;
var.pyrScale = 0.6;
var.nIt = 3;
var.minDisp = -numberOfDisparities;
var.maxDisp = 0;
var.poly_n = 3;
var.poly_sigma = 0.0;
var.fi = 5.0f;
var.lambda = 0.1;
var.penalization = var.PENALIZATION_TICHONOV;
var.cycle = var.CYCLE_V;
var.flags = var.USE_SMART_ID | var.USE_INITIAL_DISPARITY | 1 * var.USE_MEDIAN_FILTERING ;
Mat disp, disp8;
//Mat img1p, img2p, dispp;
//copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
@ -229,13 +245,18 @@ int main(int argc, char** argv)
int64 t = getTickCount();
if( alg == STEREO_BM )
bm(img1, img2, disp);
else
else if( alg == STEREO_VAR )
var(img1, img2, disp);
else if( alg == STEREO_SGBM || alg == STEREO_HH )
sgbm(img1, img2, disp);
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
//disp = dispp.colRange(numberOfDisparities, img1p.cols);
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
if( alg != STEREO_VAR )
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
else
disp.convertTo(disp8, CV_8U);
if( !no_display )
{
namedWindow("left", 1);