Move C API of opencv_calib3d to separate file

This commit is contained in:
Andrey Kamaev 2013-04-11 19:27:54 +04:00
parent 199a35a105
commit e5a33723fc
70 changed files with 836 additions and 792 deletions

View File

@ -1,4 +1,4 @@
SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_photo opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy)
SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_photo opencv_ml opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy)
ocv_check_dependencies(${OPENCV_HAARTRAINING_DEPS})
if(NOT OCV_DEPENDENCIES_FOUND)

View File

@ -112,7 +112,9 @@ CV_INLINE float cvLogRatio( float val )
/* each trainData matrix row is a sample */
#define CV_ROW_SAMPLE 1
#define CV_IS_ROW_SAMPLE( flags ) ( ( flags ) & CV_ROW_SAMPLE )
#ifndef CV_IS_ROW_SAMPLE
# define CV_IS_ROW_SAMPLE( flags ) ( ( flags ) & CV_ROW_SAMPLE )
#endif
/* Classifier supports tune function */
#define CV_TUNABLE (1 << 1)

View File

@ -64,13 +64,11 @@
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/photo/photo_c.h"
#include "opencv2/video/tracking_c.h"
#include "opencv2/features2d.hpp"
#include "opencv2/flann.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include "opencv2/objdetect.hpp"
#if !defined(CV_IMPL)
#define CV_IMPL extern "C"
#endif //CV_IMPL

View File

@ -53,5 +53,7 @@
#include "opencv2/photo.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#endif

View File

@ -50,13 +50,12 @@
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/photo/photo_c.h"
#include "opencv2/video/tracking_c.h"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include "opencv2/legacy/blobtrack.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/contrib.hpp"
#endif

View File

@ -115,7 +115,7 @@ calibrateCamera
---------------
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) )
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) )
.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
@ -454,7 +454,7 @@ findChessboardCorners
-------------------------
Finds the positions of internal corners of the chessboard.
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE )
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners
@ -515,7 +515,7 @@ Finds centers in the grid of circles.
.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector() )
.. ocv:pyfunction:: cv2.findCirclesGridDefault(image, patternSize[, centers[, flags]]) -> retval, centers
.. ocv:pyfunction:: cv2.findCirclesGrid(image, patternSize[, centers[, flags[, blobDetector]]]) -> retval, centers
:param image: grid view of input circles; it must be an 8-bit grayscale or color image.
@ -694,7 +694,7 @@ findEssentialMat
------------------
Calculates an essential matrix from the corresponding points in two images.
.. ocv:function:: Mat findEssentialMat( InputArray points1, InputArray points2, double focal=1.0, Point2d pp=Point2d(0, 0), int method=CV_RANSAC, double prob=0.999, double threshold=1.0, OutputArray mask=noArray() )
.. ocv:function:: Mat findEssentialMat( InputArray points1, InputArray points2, double focal=1.0, Point2d pp=Point2d(0, 0), int method=RANSAC, double prob=0.999, double threshold=1.0, OutputArray mask=noArray() )
:param points1: Array of ``N`` ``(N >= 5)`` 2D points from the first image. The point coordinates should be floating-point (single or double precision).
@ -975,7 +975,7 @@ initCameraMatrix2D
----------------------
Finds an initial camera matrix from 3D-2D point correspondences.
.. ocv:function:: Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, double aspectRatio=1.)
.. ocv:function:: Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, double aspectRatio=1.0 )
.. ocv:pyfunction:: cv2.initCameraMatrix2D(objectPoints, imagePoints, imageSize[, aspectRatio]) -> retval

View File

@ -7,7 +7,7 @@
// copy or use the software.
//
//
// License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
@ -44,562 +44,184 @@
#ifndef __OPENCV_CALIB3D_HPP__
#define __OPENCV_CALIB3D_HPP__
#ifdef __cplusplus
# include "opencv2/core.hpp"
#endif
#include "opencv2/core/core_c.h"
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************************\
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
typedef struct CvPOSITObject CvPOSITObject;
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
an object given its model and projection in a weak-perspective case */
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
double focal_length, CvTermCriteria criteria,
float* rotation_matrix, float* translation_vector);
/* Releases CvPOSITObject structure */
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
/* updates the number of RANSAC iterations */
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
int model_points, int max_iters );
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
/* Calculates fundamental matrix given a set of corresponding points */
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
enum
{
CV_ITERATIVE = 0,
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fundamental_matrix,
int method CV_DEFAULT(CV_FM_RANSAC),
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
CvMat* status CV_DEFAULT(NULL) );
/* For each input point on one of images
computes parameters of the corresponding
epipolar line on the other image */
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
int which_image,
const CvMat* fundamental_matrix,
CvMat* correspondent_lines );
/* Triangulation functions */
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
CvMat* projPoints1, CvMat* projPoints2,
CvMat* points4D);
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
CvMat* new_points1, CvMat* new_points2);
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
alpha=0 - only valid pixels will be retained in the undistorted image
alpha=1 - all the source image pixels will be retained in the undistorted image
*/
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
const CvMat* dist_coeffs,
CvSize image_size, double alpha,
CvMat* new_camera_matrix,
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pixel_ROI CV_DEFAULT(0),
int center_principal_point CV_DEFAULT(0));
/* Converts rotation vector to rotation matrix or vice versa */
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
CvMat* jacobian CV_DEFAULT(0) );
/* Finds perspective transformation between the object plane and image (view) plane */
CVAPI(int) cvFindHomography( const CvMat* src_points,
const CvMat* dst_points,
CvMat* homography,
int method CV_DEFAULT(0),
double ransacReprojThreshold CV_DEFAULT(3),
CvMat* mask CV_DEFAULT(0));
/* Computes RQ decomposition for 3x3 matrices */
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat *matrixQx CV_DEFAULT(NULL),
CvMat *matrixQy CV_DEFAULT(NULL),
CvMat *matrixQz CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes projection matrix decomposition */
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
CvMat *rotMatr, CvMat *posVect,
CvMat *rotMatrX CV_DEFAULT(NULL),
CvMat *rotMatrY CV_DEFAULT(NULL),
CvMat *rotMatrZ CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes d(AB)/dA and d(AB)/dB */
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
const CvMat* _rvec2, const CvMat* _tvec2,
CvMat* _rvec3, CvMat* _tvec3,
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
/* Projects object points to the view plane using
the specified extrinsic and intrinsic camera parameters */
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
const CvMat* translation_vector, const CvMat* camera_matrix,
const CvMat* distortion_coeffs, CvMat* image_points,
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
CvMat* dpddist CV_DEFAULT(NULL),
double aspect_ratio CV_DEFAULT(0));
/* Finds extrinsic camera parameters from
a few known corresponding point pairs and intrinsic parameters */
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
CvMat* rotation_vector,
CvMat* translation_vector,
int use_extrinsic_guess CV_DEFAULT(0) );
/* Computes initial estimate of the intrinsic camera parameters
in case of planar calibration target (e.g. chessboard) */
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
const CvMat* image_points,
const CvMat* npoints, CvSize image_size,
CvMat* camera_matrix,
double aspect_ratio CV_DEFAULT(1.) );
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_CB_FAST_CHECK 8
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
/* Detects corners on a chessboard calibration pattern */
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
CvPoint2D32f* corners,
int count, int pattern_was_found );
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
#define CV_CALIB_THIN_PRISM_MODEL 32768
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Computes various useful characteristics of the camera from the data computed by
cvCalibrateCamera2 */
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
CvSize image_size,
double aperture_width CV_DEFAULT(0),
double aperture_height CV_DEFAULT(0),
double *fovx CV_DEFAULT(NULL),
double *fovy CV_DEFAULT(NULL),
double *focal_length CV_DEFAULT(NULL),
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
double *pixel_aspect_ratio CV_DEFAULT(NULL));
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
/* Computes the transformation from one camera coordinate system to another one
from a few correspondent views of the same calibration target. Optionally, calibrates
both cameras */
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
const CvMat* image_points2, const CvMat* npoints,
CvMat* camera_matrix1, CvMat* dist_coeffs1,
CvMat* camera_matrix2, CvMat* dist_coeffs2,
CvSize image_size, CvMat* R, CvMat* T,
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
#define CV_CALIB_ZERO_DISPARITY 1024
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
views parallel (=> to make all the epipolar lines horizontal or vertical) */
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
CvSize image_size, const CvMat* R, const CvMat* T,
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
CvMat* Q CV_DEFAULT(0),
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
double alpha CV_DEFAULT(-1),
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
/* Computes rectification transformations for uncalibrated pair of images using a set
of point correspondences */
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
const CvMat* F, CvSize img_size,
CvMat* H1, CvMat* H2,
double threshold CV_DEFAULT(5));
/* stereo correspondence parameters and functions */
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_XSOBEL 1
/* Block matching algorithm structure */
typedef struct CvStereoBMState
{
// pre-filtering (normalization of input images)
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
int preFilterSize; // averaging window size: ~5x5..21x21
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
// correspondence using Sum of Absolute Difference (SAD)
int SADWindowSize; // ~5x5..21x21
int minDisparity; // minimum disparity (can be negative)
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
// post-filtering
int textureThreshold; // the disparity is only computed for pixels
// with textured enough neighborhood
int uniquenessRatio; // accept the computed disparity d* only if
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
// for any d != d*+/-1 within the search range.
int speckleWindowSize; // disparity variation window
int speckleRange; // acceptable range of variation in window
int trySmallerWindows; // if 1, the results may be more accurate,
// at the expense of slower processing
CvRect roi1, roi2;
int disp12MaxDiff;
// temporary buffers
CvMat* preFilteredImg0;
CvMat* preFilteredImg1;
CvMat* slidingSumBuf;
CvMat* cost;
CvMat* disp;
} CvStereoBMState;
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
int numberOfDisparities CV_DEFAULT(0));
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
CvArr* disparity, CvStereoBMState* state );
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize );
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDiff CV_DEFAULT(1) );
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
#ifdef __cplusplus
}
//////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS CvLevMarq
{
public:
CvLevMarq();
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
~CvLevMarq();
void init( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
void clear();
void step();
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
cv::Ptr<CvMat> mask;
cv::Ptr<CvMat> prevParam;
cv::Ptr<CvMat> param;
cv::Ptr<CvMat> J;
cv::Ptr<CvMat> err;
cv::Ptr<CvMat> JtJ;
cv::Ptr<CvMat> JtJN;
cv::Ptr<CvMat> JtErr;
cv::Ptr<CvMat> JtJV;
cv::Ptr<CvMat> JtJW;
double prevErrNorm, errNorm;
int lambdaLg10;
CvTermCriteria criteria;
int state;
int iters;
bool completeSymmFlag;
};
namespace cv
{
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray());
//! type of the robust estimation algorithm
enum
{
LMEDS=CV_LMEDS, //!< least-median algorithm
RANSAC=CV_RANSAC //!< RANSAC algorithm
};
enum { LMEDS = 4, //!< least-median algorithm
RANSAC = 8 //!< RANSAC algorithm
};
enum { ITERATIVE = 0,
EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
enum { CALIB_CB_ADAPTIVE_THRESH = 1,
CALIB_CB_NORMALIZE_IMAGE = 2,
CALIB_CB_FILTER_QUADS = 4,
CALIB_CB_FAST_CHECK = 8
};
enum { CALIB_CB_SYMMETRIC_GRID = 1,
CALIB_CB_ASYMMETRIC_GRID = 2,
CALIB_CB_CLUSTERING = 4
};
enum { CALIB_USE_INTRINSIC_GUESS = 0x00001,
CALIB_FIX_ASPECT_RATIO = 0x00002,
CALIB_FIX_PRINCIPAL_POINT = 0x00004,
CALIB_ZERO_TANGENT_DIST = 0x00008,
CALIB_FIX_FOCAL_LENGTH = 0x00010,
CALIB_FIX_K1 = 0x00020,
CALIB_FIX_K2 = 0x00040,
CALIB_FIX_K3 = 0x00080,
CALIB_FIX_K4 = 0x00800,
CALIB_FIX_K5 = 0x01000,
CALIB_FIX_K6 = 0x02000,
CALIB_RATIONAL_MODEL = 0x04000,
CALIB_THIN_PRISM_MODEL = 0x08000,
CALIB_FIX_S1_S2_S3_S4 = 0x10000,
// only for stereo
CALIB_FIX_INTRINSIC = 0x00100,
CALIB_SAME_FOCAL_LENGTH = 0x00200,
// for stereo rectification
CALIB_ZERO_DISPARITY = 0x00400
};
//! the algorithm for finding fundamental matrix
enum { FM_7POINT = 1, //!< 7-point algorithm
FM_8POINT = 2, //!< 8-point algorithm
FM_LMEDS = 4, //!< least-median algorithm
FM_RANSAC = 8 //!< RANSAC algorithm
};
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
int method=0, double ransacReprojThreshold=3,
int method = 0, double ransacReprojThreshold = 3,
OutputArray mask=noArray());
//! variant of findHomography for backward compatibility
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
OutputArray mask, int method=0, double ransacReprojThreshold=3);
OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );
//! Computes RQ decomposition of 3x3 matrix
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
OutputArray Qx=noArray(),
OutputArray Qy=noArray(),
OutputArray Qz=noArray());
OutputArray Qx = noArray(),
OutputArray Qy = noArray(),
OutputArray Qz = noArray());
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
OutputArray rotMatrix, OutputArray transVect,
OutputArray rotMatrixX=noArray(),
OutputArray rotMatrixY=noArray(),
OutputArray rotMatrixZ=noArray(),
OutputArray eulerAngles=noArray() );
OutputArray rotMatrixX = noArray(),
OutputArray rotMatrixY = noArray(),
OutputArray rotMatrixZ = noArray(),
OutputArray eulerAngles =noArray() );
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
OutputArray dABdA,
OutputArray dABdB );
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
InputArray rvec2, InputArray tvec2,
OutputArray rvec3, OutputArray tvec3,
OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(),
OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(),
OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(),
OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() );
OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(),
OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(),
OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),
OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
InputArray rvec, InputArray tvec,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray imagePoints,
OutputArray jacobian=noArray(),
double aspectRatio=0 );
OutputArray jacobian = noArray(),
double aspectRatio = 0 );
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
enum
{
ITERATIVE=CV_ITERATIVE,
EPNP=CV_EPNP,
P3P=CV_P3P
};
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess=false, int flags=ITERATIVE);
bool useExtrinsicGuess = false, int flags = ITERATIVE );
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
InputArray imagePoints,
InputArray cameraMatrix,
InputArray distCoeffs,
OutputArray rvec,
OutputArray tvec,
bool useExtrinsicGuess = false,
int iterationsCount = 100,
float reprojectionError = 8.0,
int minInliersCount = 100,
OutputArray inliers = noArray(),
int flags = ITERATIVE);
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, int minInliersCount = 100,
OutputArray inliers = noArray(), int flags = ITERATIVE );
//! initializes camera matrix from a few 3D points and the corresponding projections.
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize, double aspectRatio=1. );
enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 };
Size imageSize, double aspectRatio = 1.0 );
//! finds checkerboard pattern of the specified size in the image
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
OutputArray corners,
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
CV_EXPORTS bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );
//! draws the checkerboard pattern (found or partly found) in the image
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2,
CALIB_CB_CLUSTERING = 4 };
//! finds circles' grid pattern of the specified size in the image
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID,
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
//! the deprecated function. Use findCirclesGrid() instead of it.
CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize,
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID );
enum
{
CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,
CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,
CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,
CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,
CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,
CALIB_FIX_K1 = CV_CALIB_FIX_K1,
CALIB_FIX_K2 = CV_CALIB_FIX_K2,
CALIB_FIX_K3 = CV_CALIB_FIX_K3,
CALIB_FIX_K4 = CV_CALIB_FIX_K4,
CALIB_FIX_K5 = CV_CALIB_FIX_K5,
CALIB_FIX_K6 = CV_CALIB_FIX_K6,
CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL,
CALIB_THIN_PRISM_MODEL = CV_CALIB_THIN_PRISM_MODEL,
CALIB_FIX_S1_S2_S3_S4=CV_CALIB_FIX_S1_S2_S3_S4,
// only for stereo
CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,
CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,
// for stereo rectification
CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY
};
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize,
InputOutputArray cameraMatrix,
InputOutputArray distCoeffs,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags=0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) );
int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
Size imageSize,
double apertureWidth,
double apertureHeight,
CV_OUT double& fovx,
CV_OUT double& fovy,
CV_OUT double& focalLength,
CV_OUT Point2d& principalPoint,
CV_OUT double& aspectRatio );
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,
double apertureWidth, double apertureHeight,
CV_OUT double& fovx, CV_OUT double& fovy,
CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,
CV_OUT double& aspectRatio );
//! finds intrinsic and extrinsic parameters of a stereo camera
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1,
InputArrayOfArrays imagePoints2,
InputOutputArray cameraMatrix1,
InputOutputArray distCoeffs1,
InputOutputArray cameraMatrix2,
InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,
OutputArray T, OutputArray E, OutputArray F,
InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
int flags=CALIB_FIX_INTRINSIC );
int flags = CALIB_FIX_INTRINSIC );
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
Size imageSize, InputArray R, InputArray T,
OutputArray R1, OutputArray R2,
OutputArray P1, OutputArray P2,
OutputArray Q, int flags=CALIB_ZERO_DISPARITY,
double alpha=-1, Size newImageSize=Size(),
CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 );
InputArray cameraMatrix2, InputArray distCoeffs2,
Size imageSize, InputArray R, InputArray T,
OutputArray R1, OutputArray R2,
OutputArray P1, OutputArray P2,
OutputArray Q, int flags = CALIB_ZERO_DISPARITY,
double alpha = -1, Size newImageSize = Size(),
CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
InputArray F, Size imgSize,
OutputArray H1, OutputArray H2,
double threshold=5 );
double threshold = 5 );
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
@ -615,8 +237,9 @@ CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distC
//! returns the optimal new camera matrix
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
Size imageSize, double alpha, Size newImgSize=Size(),
CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false);
Size imageSize, double alpha, Size newImgSize = Size(),
CV_OUT Rect* validPixROI = 0,
bool centerPrincipalPoint = false);
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
@ -627,44 +250,36 @@ CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst
//! for backward compatibility
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
//! the algorithm for finding fundamental matrix
enum
{
FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm
};
//! finds fundamental matrix from a set of corresponding 2D points
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
int method=FM_RANSAC,
double param1=3., double param2=0.99,
OutputArray mask=noArray());
int method = FM_RANSAC,
double param1 = 3., double param2 = 0.99,
OutputArray mask = noArray() );
//! variant of findFundamentalMat for backward compatibility
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
OutputArray mask, int method=FM_RANSAC,
double param1=3., double param2=0.99);
OutputArray mask, int method = FM_RANSAC,
double param1 = 3., double param2 = 0.99 );
//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2, double focal = 1.0, Point2d pp = Point2d(0, 0),
int method = CV_RANSAC,
double prob = 0.999, double threshold = 1.0, OutputArray mask = noArray() );
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2,
double focal = 1.0, Point2d pp = Point2d(0, 0),
int method = RANSAC, double prob = 0.999,
double threshold = 1.0, OutputArray mask = noArray() );
//! decompose essential matrix to possible rotation matrix and one translation vector
CV_EXPORTS void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
//! recover relative camera pose from a set of corresponding 2D points
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2, OutputArray R, OutputArray t,
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2,
OutputArray R, OutputArray t,
double focal = 1.0, Point2d pp = Point2d(0, 0),
InputOutputArray mask = noArray());
InputOutputArray mask = noArray() );
//! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F,
OutputArray lines );
CV_EXPORTS void computeCorrespondEpilines( InputArray points, int whichImage,
InputArray F, OutputArray lines );
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
InputArray projPoints1, InputArray projPoints2,
@ -673,13 +288,39 @@ CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
OutputArray newPoints1, OutputArray newPoints2 );
//! filters off speckles (small regions of incorrectly computed disparity)
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
int maxSpeckleSize, double maxDiff,
InputOutputArray buf = noArray() );
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity, int numberOfDisparities,
int SADWindowSize );
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDisp = 1 );
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
OutputArray _3dImage, InputArray Q,
bool handleMissingValues = false,
int ddepth = -1 );
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold = 3, double confidence = 0.99);
template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
class CV_EXPORTS_W StereoMatcher : public Algorithm
{
public:
enum { DISP_SHIFT=4, DISP_SCALE=(1 << DISP_SHIFT) };
enum { DISP_SHIFT = 4,
DISP_SCALE = (1 << DISP_SHIFT)
};
CV_WRAP virtual void compute( InputArray left, InputArray right,
OutputArray disparity ) = 0;
@ -704,10 +345,13 @@ public:
};
class CV_EXPORTS_W StereoBM : public StereoMatcher
{
public:
enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1 };
enum { PREFILTER_NORMALIZED_RESPONSE = 0,
PREFILTER_XSOBEL = 1
};
CV_WRAP virtual int getPreFilterType() const = 0;
CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;
@ -734,13 +378,15 @@ public:
CV_WRAP virtual void setROI2(Rect roi2) = 0;
};
CV_EXPORTS_W Ptr<StereoBM> createStereoBM(int numDisparities=0, int blockSize=21);
CV_EXPORTS_W Ptr<StereoBM> createStereoBM(int numDisparities = 0, int blockSize = 21);
class CV_EXPORTS_W StereoSGBM : public StereoMatcher
{
public:
enum { MODE_SGBM=0, MODE_HH=1 };
enum { MODE_SGBM = 0,
MODE_HH = 1
};
CV_WRAP virtual int getPreFilterCap() const = 0;
CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
@ -760,38 +406,11 @@ public:
CV_EXPORTS_W Ptr<StereoSGBM> createStereoSGBM(int minDisparity, int numDisparities, int blockSize,
int P1=0, int P2=0, int disp12MaxDiff=0,
int preFilterCap=0, int uniquenessRatio=0,
int speckleWindowSize=0, int speckleRange=0,
int mode=StereoSGBM::MODE_SGBM);
int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
int preFilterCap = 0, int uniquenessRatio = 0,
int speckleWindowSize = 0, int speckleRange = 0,
int mode = StereoSGBM::MODE_SGBM);
//! filters off speckles (small regions of incorrectly computed disparity)
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
int maxSpeckleSize, double maxDiff,
InputOutputArray buf=noArray() );
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity, int numberOfDisparities,
int SADWindowSize );
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDisp=1 );
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
OutputArray _3dImage, InputArray Q,
bool handleMissingValues=false,
int ddepth=-1 );
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold=3, double confidence=0.99);
}
#endif
} // cv
#endif

View File

@ -0,0 +1,413 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CALIB3D_C_H__
#define __OPENCV_CALIB3D_C_H__
#include "opencv2/core/core_c.h"
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************************\
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
typedef struct CvPOSITObject CvPOSITObject;
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
an object given its model and projection in a weak-perspective case */
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
double focal_length, CvTermCriteria criteria,
float* rotation_matrix, float* translation_vector);
/* Releases CvPOSITObject structure */
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
/* updates the number of RANSAC iterations */
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
int model_points, int max_iters );
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
/* Calculates fundamental matrix given a set of corresponding points */
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
enum
{
CV_ITERATIVE = 0,
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fundamental_matrix,
int method CV_DEFAULT(CV_FM_RANSAC),
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
CvMat* status CV_DEFAULT(NULL) );
/* For each input point on one of images
computes parameters of the corresponding
epipolar line on the other image */
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
int which_image,
const CvMat* fundamental_matrix,
CvMat* correspondent_lines );
/* Triangulation functions */
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
CvMat* projPoints1, CvMat* projPoints2,
CvMat* points4D);
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
CvMat* new_points1, CvMat* new_points2);
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
alpha=0 - only valid pixels will be retained in the undistorted image
alpha=1 - all the source image pixels will be retained in the undistorted image
*/
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
const CvMat* dist_coeffs,
CvSize image_size, double alpha,
CvMat* new_camera_matrix,
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pixel_ROI CV_DEFAULT(0),
int center_principal_point CV_DEFAULT(0));
/* Converts rotation vector to rotation matrix or vice versa */
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
CvMat* jacobian CV_DEFAULT(0) );
/* Finds perspective transformation between the object plane and image (view) plane */
CVAPI(int) cvFindHomography( const CvMat* src_points,
const CvMat* dst_points,
CvMat* homography,
int method CV_DEFAULT(0),
double ransacReprojThreshold CV_DEFAULT(3),
CvMat* mask CV_DEFAULT(0));
/* Computes RQ decomposition for 3x3 matrices */
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat *matrixQx CV_DEFAULT(NULL),
CvMat *matrixQy CV_DEFAULT(NULL),
CvMat *matrixQz CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes projection matrix decomposition */
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
CvMat *rotMatr, CvMat *posVect,
CvMat *rotMatrX CV_DEFAULT(NULL),
CvMat *rotMatrY CV_DEFAULT(NULL),
CvMat *rotMatrZ CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes d(AB)/dA and d(AB)/dB */
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
const CvMat* _rvec2, const CvMat* _tvec2,
CvMat* _rvec3, CvMat* _tvec3,
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
/* Projects object points to the view plane using
the specified extrinsic and intrinsic camera parameters */
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
const CvMat* translation_vector, const CvMat* camera_matrix,
const CvMat* distortion_coeffs, CvMat* image_points,
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
CvMat* dpddist CV_DEFAULT(NULL),
double aspect_ratio CV_DEFAULT(0));
/* Finds extrinsic camera parameters from
a few known corresponding point pairs and intrinsic parameters */
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
CvMat* rotation_vector,
CvMat* translation_vector,
int use_extrinsic_guess CV_DEFAULT(0) );
/* Computes initial estimate of the intrinsic camera parameters
in case of planar calibration target (e.g. chessboard) */
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
const CvMat* image_points,
const CvMat* npoints, CvSize image_size,
CvMat* camera_matrix,
double aspect_ratio CV_DEFAULT(1.) );
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_CB_FAST_CHECK 8
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
/* Detects corners on a chessboard calibration pattern */
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
CvPoint2D32f* corners,
int count, int pattern_was_found );
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
#define CV_CALIB_THIN_PRISM_MODEL 32768
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Computes various useful characteristics of the camera from the data computed by
cvCalibrateCamera2 */
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
CvSize image_size,
double aperture_width CV_DEFAULT(0),
double aperture_height CV_DEFAULT(0),
double *fovx CV_DEFAULT(NULL),
double *fovy CV_DEFAULT(NULL),
double *focal_length CV_DEFAULT(NULL),
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
double *pixel_aspect_ratio CV_DEFAULT(NULL));
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
/* Computes the transformation from one camera coordinate system to another one
from a few correspondent views of the same calibration target. Optionally, calibrates
both cameras */
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
const CvMat* image_points2, const CvMat* npoints,
CvMat* camera_matrix1, CvMat* dist_coeffs1,
CvMat* camera_matrix2, CvMat* dist_coeffs2,
CvSize image_size, CvMat* R, CvMat* T,
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
#define CV_CALIB_ZERO_DISPARITY 1024
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
views parallel (=> to make all the epipolar lines horizontal or vertical) */
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
CvSize image_size, const CvMat* R, const CvMat* T,
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
CvMat* Q CV_DEFAULT(0),
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
double alpha CV_DEFAULT(-1),
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
/* Computes rectification transformations for uncalibrated pair of images using a set
of point correspondences */
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
const CvMat* F, CvSize img_size,
CvMat* H1, CvMat* H2,
double threshold CV_DEFAULT(5));
/* stereo correspondence parameters and functions */
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_XSOBEL 1
/* Block matching algorithm structure */
typedef struct CvStereoBMState
{
// pre-filtering (normalization of input images)
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
int preFilterSize; // averaging window size: ~5x5..21x21
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
// correspondence using Sum of Absolute Difference (SAD)
int SADWindowSize; // ~5x5..21x21
int minDisparity; // minimum disparity (can be negative)
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
// post-filtering
int textureThreshold; // the disparity is only computed for pixels
// with textured enough neighborhood
int uniquenessRatio; // accept the computed disparity d* only if
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
// for any d != d*+/-1 within the search range.
int speckleWindowSize; // disparity variation window
int speckleRange; // acceptable range of variation in window
int trySmallerWindows; // if 1, the results may be more accurate,
// at the expense of slower processing
CvRect roi1, roi2;
int disp12MaxDiff;
// temporary buffers
CvMat* preFilteredImg0;
CvMat* preFilteredImg1;
CvMat* slidingSumBuf;
CvMat* cost;
CvMat* disp;
} CvStereoBMState;
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
int numberOfDisparities CV_DEFAULT(0));
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
CvArr* disparity, CvStereoBMState* state );
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize );
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDiff CV_DEFAULT(1) );
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
#ifdef __cplusplus
} // extern "C"
//////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS CvLevMarq
{
public:
CvLevMarq();
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
~CvLevMarq();
void init( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
void clear();
void step();
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
cv::Ptr<CvMat> mask;
cv::Ptr<CvMat> prevParam;
cv::Ptr<CvMat> param;
cv::Ptr<CvMat> J;
cv::Ptr<CvMat> err;
cv::Ptr<CvMat> JtJ;
cv::Ptr<CvMat> JtJN;
cv::Ptr<CvMat> JtErr;
cv::Ptr<CvMat> JtJV;
cv::Ptr<CvMat> JtJW;
double prevErrNorm, errNorm;
int lambdaLg10;
CvTermCriteria criteria;
int state;
int iters;
bool completeSymmFlag;
};
#endif
#endif /* __OPENCV_CALIB3D_C_H__ */

View File

@ -10,7 +10,7 @@ using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
CV_ENUM(pnpAlgo, CV_ITERATIVE, CV_EPNP /*, CV_P3P*/)
CV_ENUM(pnpAlgo, ITERATIVE, EPNP /*, P3P*/)
typedef std::tr1::tuple<int, pnpAlgo> PointsNum_Algo_t;
typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo;
@ -20,7 +20,7 @@ typedef perf::TestBaseWithParam<int> PointsNum;
PERF_TEST_P(PointsNum_Algo, solvePnP,
testing::Combine(
testing::Values(/*4,*/ 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Values((int)CV_ITERATIVE, (int)CV_EPNP)
testing::Values((int)ITERATIVE, (int)EPNP)
)
)
{
@ -93,7 +93,7 @@ PERF_TEST(PointsNum_Algo, solveP3P)
TEST_CYCLE_N(1000)
{
solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, CV_P3P);
solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, P3P);
}
SANITY_CHECK(rvec, 1e-6);

View File

@ -61,6 +61,7 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
#include "circlesgrid.hpp"
#include <stdarg.h>

View File

@ -42,6 +42,7 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
#include <stdio.h>
#include <iterator>
@ -825,7 +826,7 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
dpdk_p[dpdk_step+7] = fy*y*cdist*(-icdist2)*icdist2*r6;
if( _dpdk->cols > 8 )
{
dpdk_p[8] = fx*r2; //s1
dpdk_p[8] = fx*r2; //s1
dpdk_p[9] = fx*r4; //s2
dpdk_p[10] = 0;//s3
dpdk_p[11] = 0;//s4
@ -1255,7 +1256,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
//when the thin prism model is used the distortion coefficients matrix must have 12 parameters
if((flags & CV_CALIB_THIN_PRISM_MODEL) && (distCoeffs->cols*distCoeffs->rows != 12))
CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
nimages = npoints->rows*npoints->cols;
npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);

View File

@ -41,6 +41,7 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
#include <vector>
#include <algorithm>

View File

@ -202,12 +202,12 @@ void CirclesGridClusterFinder::findCorners(const std::vector<cv::Point2f> &hull2
//corners are the most sharp angles (6)
Mat anglesMat = Mat(angles);
Mat sortedIndices;
sortIdx(anglesMat, sortedIndices, CV_SORT_EVERY_COLUMN + CV_SORT_DESCENDING);
sortIdx(anglesMat, sortedIndices, SORT_EVERY_COLUMN + SORT_DESCENDING);
CV_Assert(sortedIndices.type() == CV_32SC1);
CV_Assert(sortedIndices.cols == 1);
const int cornersCount = isAsymmetricGrid ? 6 : 4;
Mat cornersIndices;
cv::sort(sortedIndices.rowRange(0, cornersCount), cornersIndices, CV_SORT_EVERY_COLUMN + CV_SORT_ASCENDING);
cv::sort(sortedIndices.rowRange(0, cornersCount), cornersIndices, SORT_EVERY_COLUMN + SORT_ASCENDING);
corners.clear();
for(int i=0; i<cornersCount; i++)
{
@ -438,15 +438,15 @@ bool Graph::doesVertexExist(size_t id) const
void Graph::addVertex(size_t id)
{
assert( !doesVertexExist( id ) );
CV_Assert( !doesVertexExist( id ) );
vertices.insert(std::pair<size_t, Vertex> (id, Vertex()));
}
void Graph::addEdge(size_t id1, size_t id2)
{
assert( doesVertexExist( id1 ) );
assert( doesVertexExist( id2 ) );
CV_Assert( doesVertexExist( id1 ) );
CV_Assert( doesVertexExist( id2 ) );
vertices[id1].neighbors.insert(id2);
vertices[id2].neighbors.insert(id1);
@ -454,8 +454,8 @@ void Graph::addEdge(size_t id1, size_t id2)
void Graph::removeEdge(size_t id1, size_t id2)
{
assert( doesVertexExist( id1 ) );
assert( doesVertexExist( id2 ) );
CV_Assert( doesVertexExist( id1 ) );
CV_Assert( doesVertexExist( id2 ) );
vertices[id1].neighbors.erase(id2);
vertices[id2].neighbors.erase(id1);
@ -463,8 +463,8 @@ void Graph::removeEdge(size_t id1, size_t id2)
bool Graph::areVerticesAdjacent(size_t id1, size_t id2) const
{
assert( doesVertexExist( id1 ) );
assert( doesVertexExist( id2 ) );
CV_Assert( doesVertexExist( id1 ) );
CV_Assert( doesVertexExist( id2 ) );
Vertices::const_iterator it = vertices.find(id1);
return it->second.neighbors.find(id2) != it->second.neighbors.end();
@ -477,7 +477,7 @@ size_t Graph::getVerticesCount() const
size_t Graph::getDegree(size_t id) const
{
assert( doesVertexExist(id) );
CV_Assert( doesVertexExist(id) );
Vertices::const_iterator it = vertices.find(id);
return it->second.neighbors.size();
@ -495,7 +495,7 @@ void Graph::floydWarshall(cv::Mat &distanceMatrix, int infinity) const
distanceMatrix.at<int> ((int)it1->first, (int)it1->first) = 0;
for (Neighbors::const_iterator it2 = it1->second.neighbors.begin(); it2 != it1->second.neighbors.end(); it2++)
{
assert( it1->first != *it2 );
CV_Assert( it1->first != *it2 );
distanceMatrix.at<int> ((int)it1->first, (int)*it2) = edgeWeight;
}
}
@ -524,7 +524,7 @@ void Graph::floydWarshall(cv::Mat &distanceMatrix, int infinity) const
const Graph::Neighbors& Graph::getNeighbors(size_t id) const
{
assert( doesVertexExist(id) );
CV_Assert( doesVertexExist(id) );
Vertices::const_iterator it = vertices.find(id);
return it->second.neighbors;
@ -604,7 +604,7 @@ bool CirclesGridFinder::findHoles()
}
default:
CV_Error(CV_StsBadArg, "Unkown pattern type");
CV_Error(Error::StsBadArg, "Unkown pattern type");
}
return (isDetectionCorrect());
//CV_Error( 0, "Detection is not correct" );
@ -813,7 +813,7 @@ void CirclesGridFinder::findMCS(const std::vector<Point2f> &basis, std::vector<G
Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const std::vector<Point2f>& centers,
const std::vector<Point2f> &keypoints, std::vector<Point2f> &warpedKeypoints)
{
assert( !centers.empty() );
CV_Assert( !centers.empty() );
const float edgeLength = 30;
const Point2f offset(150, 150);
@ -832,7 +832,7 @@ Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const std::vector<Poin
}
}
Mat H = findHomography(Mat(centers), Mat(dstPoints), CV_RANSAC);
Mat H = findHomography(Mat(centers), Mat(dstPoints), RANSAC);
//Mat H = findHomography( Mat( corners ), Mat( dstPoints ) );
std::vector<Point2f> srcKeypoints;
@ -912,7 +912,7 @@ void CirclesGridFinder::findCandidateLine(std::vector<size_t> &line, size_t seed
}
}
assert( line.size() == seeds.size() );
CV_Assert( line.size() == seeds.size() );
}
void CirclesGridFinder::findCandidateHoles(std::vector<size_t> &above, std::vector<size_t> &below, bool addRow, Point2f basisVec,
@ -927,9 +927,9 @@ void CirclesGridFinder::findCandidateHoles(std::vector<size_t> &above, std::vect
size_t lastIdx = addRow ? holes.size() - 1 : holes[0].size() - 1;
findCandidateLine(below, lastIdx, addRow, basisVec, belowSeeds);
assert( below.size() == above.size() );
assert( belowSeeds.size() == aboveSeeds.size() );
assert( below.size() == belowSeeds.size() );
CV_Assert( below.size() == above.size() );
CV_Assert( belowSeeds.size() == aboveSeeds.size() );
CV_Assert( below.size() == belowSeeds.size() );
}
bool CirclesGridFinder::areCentersNew(const std::vector<size_t> &newCenters, const std::vector<std::vector<size_t> > &holes)
@ -1000,10 +1000,10 @@ void CirclesGridFinder::insertWinner(float aboveConfidence, float belowConfidenc
float CirclesGridFinder::computeGraphConfidence(const std::vector<Graph> &basisGraphs, bool addRow,
const std::vector<size_t> &points, const std::vector<size_t> &seeds)
{
assert( points.size() == seeds.size() );
CV_Assert( points.size() == seeds.size() );
float confidence = 0;
const size_t vCount = basisGraphs[0].getVerticesCount();
assert( basisGraphs[0].getVerticesCount() == basisGraphs[1].getVerticesCount() );
CV_Assert( basisGraphs[0].getVerticesCount() == basisGraphs[1].getVerticesCount() );
for (size_t i = 0; i < seeds.size(); i++)
{
@ -1087,7 +1087,7 @@ void CirclesGridFinder::findBasis(const std::vector<Point2f> &samples, std::vect
const int clustersCount = 4;
kmeans(Mat(samples).reshape(1, 0), clustersCount, bestLabels, termCriteria, parameters.kmeansAttempts,
KMEANS_RANDOM_CENTERS, centers);
assert( centers.type() == CV_32FC1 );
CV_Assert( centers.type() == CV_32FC1 );
std::vector<int> basisIndices;
//TODO: only remove duplicate
@ -1204,7 +1204,7 @@ void CirclesGridFinder::computeRNG(Graph &rng, std::vector<cv::Point2f> &vectors
void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessorMatrix)
{
assert( dm.type() == CV_32SC1 );
CV_Assert( dm.type() == CV_32SC1 );
predecessorMatrix.create(verticesCount, verticesCount, CV_32SC1);
predecessorMatrix = -1;
for (int i = 0; i < predecessorMatrix.rows; i++)
@ -1253,7 +1253,6 @@ size_t CirclesGridFinder::findLongestPath(std::vector<Graph> &basisGraphs, Path
double maxVal;
Point maxLoc;
assert (infinity < 0);
minMaxLoc(distanceMatrix, 0, &maxVal, 0, &maxLoc);
if (maxVal > longestPaths[0].length)
@ -1594,9 +1593,3 @@ size_t CirclesGridFinder::getFirstCorner(std::vector<Point> &largeCornerIndices,
return cornerIdx;
}
bool cv::findCirclesGridDefault( InputArray image, Size patternSize,
OutputArray centers, int flags )
{
return findCirclesGrid(image, patternSize, centers, flags);
}

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
/************************************************************************************\
Some backward compatibility stuff, to be moved to legacy or compat module

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
CvStereoBMState* cvCreateStereoBMState( int /*preset*/, int numberOfDisparities )
{
@ -83,10 +84,6 @@ void cvReleaseStereoBMState( CvStereoBMState** state )
cvFree( state );
}
template<> void cv::Ptr<CvStereoBMState>::delete_obj()
{ cvReleaseStereoBMState(&obj); }
void cvFindStereoCorrespondenceBM( const CvArr* leftarr, const CvArr* rightarr,
CvArr* disparr, CvStereoBMState* state )
{

View File

@ -2,6 +2,7 @@
#define epnp_h
#include "precomp.hpp"
#include "opencv2/core/core_c.h"
class epnp {
public:

View File

@ -435,7 +435,7 @@ cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2, double f
threshold /= focal;
Mat E;
if( method == CV_RANSAC )
if( method == RANSAC )
createRANSACPointSetRegistrator(new EMEstimatorCallback, 5, threshold, prob)->run(points1, points2, E, _mask);
else
createLMeDSPointSetRegistrator(new EMEstimatorCallback, 5, prob)->run(points1, points2, E, _mask);

View File

@ -181,12 +181,12 @@ public:
LtL[j][k] += Lx[j]*Lx[k] + Ly[j]*Ly[k];
}
completeSymm( _LtL );
eigen( _LtL, matW, matV );
_Htemp = _invHnorm*_H0;
_H0 = _Htemp*_Hnorm2;
_H0.convertTo(_model, _H0.type(), 1./_H0.at<double>(2,2) );
return 1;
}
@ -292,7 +292,7 @@ cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
{
npoints = p.checkVector(3, -1, false);
if( npoints < 0 )
CV_Error(CV_StsBadArg, "The input arrays should be 2D or 3D point sets");
CV_Error(Error::StsBadArg, "The input arrays should be 2D or 3D point sets");
if( npoints == 0 )
return Mat();
convertPointsFromHomogeneous(p, p);
@ -317,7 +317,7 @@ cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
else if( method == LMEDS )
result = createLMeDSPointSetRegistrator(cb, 4, confidence, maxIters)->run(src, dst, H, tempMask);
else
CV_Error(CV_StsBadArg, "Unknown estimation method");
CV_Error(Error::StsBadArg, "Unknown estimation method");
if( result && npoints > 4 )
{
@ -475,7 +475,7 @@ static int run7Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
return n;
}
static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
{
double a[9*9], w[9], v[9*9];
@ -585,11 +585,11 @@ static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
gemm( T2, F0, 1., 0, 0., TF, GEMM_1_T );
F0 = Mat(3, 3, CV_64F, fmatrix);
gemm( TF, T1, 1., 0, 0., F0, 0 );
// make F(3,3) = 1
if( fabs(F0.at<double>(2,2)) > FLT_EPSILON )
F0 *= 1./F0.at<double>(2,2);
return 1;
}
@ -671,7 +671,7 @@ cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
{
npoints = p.checkVector(3, -1, false);
if( npoints < 0 )
CV_Error(CV_StsBadArg, "The input arrays should be 2D or 3D point sets");
CV_Error(Error::StsBadArg, "The input arrays should be 2D or 3D point sets");
if( npoints == 0 )
return Mat();
convertPointsFromHomogeneous(p, p);
@ -739,7 +739,7 @@ void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
{
npoints = points.checkVector(3);
if( npoints < 0 )
CV_Error( CV_StsBadArg, "The input should be a 2D or 3D point set");
CV_Error( Error::StsBadArg, "The input should be a 2D or 3D point set");
Mat temp;
convertPointsFromHomogeneous(points, temp);
points = temp;
@ -893,7 +893,7 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
}
}
else
CV_Error(CV_StsUnsupportedFormat, "");
CV_Error(Error::StsUnsupportedFormat, "");
}
@ -974,7 +974,7 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
}
}
else
CV_Error(CV_StsUnsupportedFormat, "");
CV_Error(Error::StsUnsupportedFormat, "");
}

View File

@ -39,6 +39,7 @@
//
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
/* POSIT structure */
struct CvPOSITObject

View File

@ -53,7 +53,7 @@ namespace cv
int RANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
{
if( modelPoints <= 0 )
CV_Error( CV_StsOutOfRange, "the number of model points should be positive" );
CV_Error( Error::StsOutOfRange, "the number of model points should be positive" );
p = MAX(p, 0.);
p = MIN(p, 1.);

View File

@ -108,7 +108,7 @@ static void findCorner(const std::vector<Point2f>& contour, Point2f point, Point
min_idx = (int)i;
}
}
assert(min_idx >= 0);
CV_Assert(min_idx >= 0);
// temporary solution, have to make something more precise
corner = contour[min_idx];

View File

@ -43,6 +43,8 @@
#include "precomp.hpp"
#include "epnp.h"
#include "p3p.h"
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>
using namespace cv;
@ -57,7 +59,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
_tvec.create(3, 1, CV_64F);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
if (flags == CV_EPNP)
if (flags == EPNP)
{
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
@ -68,7 +70,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
cv::Rodrigues(R, rvec);
return true;
}
else if (flags == CV_P3P)
else if (flags == P3P)
{
CV_Assert( npoints == 4);
cv::Mat undistortedPoints;
@ -81,7 +83,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
cv::Rodrigues(R, rvec);
return result;
}
else if (flags == CV_ITERATIVE)
else if (flags == ITERATIVE)
{
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
@ -342,7 +344,7 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
{
if (flags != CV_P3P)
if (flags != P3P)
{
int i, pointsCount = (int)localInliers.size();
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);

View File

@ -84,7 +84,7 @@ struct StereoBMParams
int disp12MaxDiff;
int dispType;
};
static void prefilterNorm( const Mat& src, Mat& dst, int winsize, int ftzero, uchar* buf )
{
@ -783,46 +783,46 @@ public:
{
params = StereoBMParams(_numDisparities, _SADWindowSize);
}
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr )
{
Mat left0 = leftarr.getMat(), right0 = rightarr.getMat();
int dtype = disparr.fixedType() ? disparr.type() : params.dispType;
if (left0.size() != right0.size())
CV_Error( CV_StsUnmatchedSizes, "All the images must have the same size" );
CV_Error( Error::StsUnmatchedSizes, "All the images must have the same size" );
if (left0.type() != CV_8UC1 || right0.type() != CV_8UC1)
CV_Error( CV_StsUnsupportedFormat, "Both input images must have CV_8UC1" );
CV_Error( Error::StsUnsupportedFormat, "Both input images must have CV_8UC1" );
if (dtype != CV_16SC1 && dtype != CV_32FC1)
CV_Error( CV_StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
CV_Error( Error::StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
disparr.create(left0.size(), dtype);
Mat disp0 = disparr.getMat();
if( params.preFilterType != PREFILTER_NORMALIZED_RESPONSE &&
params.preFilterType != PREFILTER_XSOBEL )
CV_Error( CV_StsOutOfRange, "preFilterType must be = CV_STEREO_BM_NORMALIZED_RESPONSE" );
CV_Error( Error::StsOutOfRange, "preFilterType must be = CV_STEREO_BM_NORMALIZED_RESPONSE" );
if( params.preFilterSize < 5 || params.preFilterSize > 255 || params.preFilterSize % 2 == 0 )
CV_Error( CV_StsOutOfRange, "preFilterSize must be odd and be within 5..255" );
CV_Error( Error::StsOutOfRange, "preFilterSize must be odd and be within 5..255" );
if( params.preFilterCap < 1 || params.preFilterCap > 63 )
CV_Error( CV_StsOutOfRange, "preFilterCap must be within 1..63" );
CV_Error( Error::StsOutOfRange, "preFilterCap must be within 1..63" );
if( params.SADWindowSize < 5 || params.SADWindowSize > 255 || params.SADWindowSize % 2 == 0 ||
params.SADWindowSize >= std::min(left0.cols, left0.rows) )
CV_Error( CV_StsOutOfRange, "SADWindowSize must be odd, be within 5..255 and be not larger than image width or height" );
CV_Error( Error::StsOutOfRange, "SADWindowSize must be odd, be within 5..255 and be not larger than image width or height" );
if( params.numDisparities <= 0 || params.numDisparities % 16 != 0 )
CV_Error( CV_StsOutOfRange, "numDisparities must be positive and divisble by 16" );
CV_Error( Error::StsOutOfRange, "numDisparities must be positive and divisble by 16" );
if( params.textureThreshold < 0 )
CV_Error( CV_StsOutOfRange, "texture threshold must be non-negative" );
CV_Error( Error::StsOutOfRange, "texture threshold must be non-negative" );
if( params.uniquenessRatio < 0 )
CV_Error( CV_StsOutOfRange, "uniqueness ratio must be non-negative" );
CV_Error( Error::StsOutOfRange, "uniqueness ratio must be non-negative" );
preFilteredImg0.create( left0.size(), CV_8U );
preFilteredImg1.create( left0.size(), CV_8U );
@ -887,15 +887,15 @@ public:
R2.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
params.minDisparity, params.numDisparities,
params.SADWindowSize);
parallel_for_(Range(0, nstripes),
FindStereoCorrespInvoker(left, right, disp, &params, nstripes,
bufSize0, useShorts, validDisparityRect,
slidingSumBuf, cost));
if( params.speckleRange >= 0 && params.speckleWindowSize > 0 )
filterSpeckles(disp, FILTERED, params.speckleWindowSize, params.speckleRange, slidingSumBuf);
if (disp0.data != disp.data)
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
}
@ -963,7 +963,7 @@ public:
void read(const FileNode& fn)
{
FileNode n = fn["name"];
CV_Assert( n.isString() && strcmp(n.node->data.str.ptr, name_) == 0 );
CV_Assert( n.isString() && String(n) == name_ );
params.minDisparity = (int)fn["minDisparity"];
params.numDisparities = (int)fn["numDisparities"];
params.SADWindowSize = (int)fn["blockSize"];

View File

@ -919,7 +919,7 @@ public:
void read(const FileNode& fn)
{
FileNode n = fn["name"];
CV_Assert( n.isString() && strcmp(n.node->data.str.ptr, name_) == 0 );
CV_Assert( n.isString() && String(n) == name_ );
params.minDisparity = (int)fn["minDisparity"];
params.numDisparities = (int)fn["numDisparities"];
params.SADWindowSize = (int)fn["blockSize"];

View File

@ -40,6 +40,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
// cvCorrectMatches function is Copyright (C) 2009, Jostein Austvik Jacobsen.
// cvTriangulatePoints function is derived from icvReconstructPointsFor3View, originally by Valery Mosyagin.

View File

@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <limits>

View File

@ -327,7 +327,7 @@ protected:
Mat camMat_est = Mat::eye(3, 3, CV_64F), distCoeffs_est = Mat::zeros(1, 5, CV_64F);
vector<Mat> rvecs_est, tvecs_est;
int flags = /*CV_CALIB_FIX_K3|*/CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6; //CALIB_FIX_K3; //CALIB_FIX_ASPECT_RATIO | | CALIB_ZERO_TANGENT_DIST;
int flags = /*CALIB_FIX_K3|*/CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6; //CALIB_FIX_K3; //CALIB_FIX_ASPECT_RATIO | | CALIB_ZERO_TANGENT_DIST;
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, DBL_EPSILON);
double rep_error = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs_est, tvecs_est, flags, criteria);
rep_error /= brdsNum * cornersSize.area();

View File

@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "test_chessboardgenerator.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>

View File

@ -161,15 +161,15 @@ Mat cv::ChessBoardGenerator::generateChessBoard(const Mat& bg, const Mat& camMat
if (rendererResolutionMultiplier == 1)
{
result = bg.clone();
drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
drawContours(result, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(result, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
}
else
{
Mat tmp;
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
drawContours(tmp, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
}

View File

@ -57,14 +57,14 @@ void show_points( const Mat& gray, const Mat& u, const vector<Point2f>& v, Size
merge(vector<Mat>(3, gray), rgb);
for(size_t i = 0; i < v.size(); i++ )
circle( rgb, v[i], 3, CV_RGB(255, 0, 0), CV_FILLED);
circle( rgb, v[i], 3, Scalar(255, 0, 0), FILLED);
if( !u.empty() )
{
const Point2f* u_data = u.ptr<Point2f>();
size_t count = u.cols * u.rows;
for(size_t i = 0; i < count; i++ )
circle( rgb, u_data[i], 3, CV_RGB(0, 255, 0), CV_FILLED);
circle( rgb, u_data[i], 3, Scalar(0, 255, 0), FILLED);
}
if (!v.empty())
{
@ -208,7 +208,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
}
int progress = 0;
int max_idx = board_list.node->data.seq->total/2;
int max_idx = board_list.size()/2;
double sum_error = 0.0;
int count = 0;
@ -244,7 +244,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
switch( pattern )
{
case CHESSBOARD:
result = findChessboardCorners(gray, pattern_size, v, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
result = findChessboardCorners(gray, pattern_size, v, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
break;
case CIRCLES_GRID:
result = findCirclesGrid(gray, pattern_size, v);
@ -459,7 +459,7 @@ bool CV_ChessboardDetectorTest::checkByGenerator()
vector<Point>& cnt = cnts[0];
cnt.push_back(cg[ 0]); cnt.push_back(cg[0+2]);
cnt.push_back(cg[7+0]); cnt.push_back(cg[7+2]);
cv::drawContours(cb, cnts, -1, Scalar::all(128), CV_FILLED);
cv::drawContours(cb, cnts, -1, Scalar::all(128), FILLED);
found = findChessboardCorners(cb, cbg.cornersSize(), corners_found);
if (found)

View File

@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "test_chessboardgenerator.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <limits>

View File

@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
class CV_ChessboardDetectorTimingTest : public cvtest::BaseTest
{

View File

@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
using namespace cv;
using namespace std;

View File

@ -65,7 +65,7 @@
#define METHODS_COUNT 3
int NORM_TYPE[COUNT_NORM_TYPES] = {cv::NORM_L1, cv::NORM_L2, cv::NORM_INF};
int METHOD[METHODS_COUNT] = {0, CV_RANSAC, CV_LMEDS};
int METHOD[METHODS_COUNT] = {0, cv::RANSAC, cv::LMEDS};
using namespace cv;
using namespace std;
@ -309,7 +309,7 @@ void CV_HomographyTest::run(int)
switch (method)
{
case 0:
case CV_LMEDS:
case LMEDS:
{
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, method),
cv::findHomography(src_mat_2f, dst_vec, method),
@ -339,14 +339,14 @@ void CV_HomographyTest::run(int)
continue;
}
case CV_RANSAC:
case RANSAC:
{
cv::Mat mask [4]; double diff;
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, CV_RANSAC, reproj_threshold, mask[0]),
cv::findHomography(src_mat_2f, dst_vec, CV_RANSAC, reproj_threshold, mask[1]),
cv::findHomography(src_vec, dst_mat_2f, CV_RANSAC, reproj_threshold, mask[2]),
cv::findHomography(src_vec, dst_vec, CV_RANSAC, reproj_threshold, mask[3]) };
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask[0]),
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask[1]),
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask[2]),
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask[3]) };
for (int j = 0; j < 4; ++j)
{
@ -411,7 +411,7 @@ void CV_HomographyTest::run(int)
switch (method)
{
case 0:
case CV_LMEDS:
case LMEDS:
{
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f),
cv::findHomography(src_mat_2f, dst_vec),
@ -466,14 +466,14 @@ void CV_HomographyTest::run(int)
continue;
}
case CV_RANSAC:
case RANSAC:
{
cv::Mat mask_res [4];
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, CV_RANSAC, reproj_threshold, mask_res[0]),
cv::findHomography(src_mat_2f, dst_vec, CV_RANSAC, reproj_threshold, mask_res[1]),
cv::findHomography(src_vec, dst_mat_2f, CV_RANSAC, reproj_threshold, mask_res[2]),
cv::findHomography(src_vec, dst_vec, CV_RANSAC, reproj_threshold, mask_res[3]) };
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask_res[0]),
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask_res[1]),
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask_res[2]),
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask_res[3]) };
for (int j = 0; j < 4; ++j)
{

View File

@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
using namespace cv;
using namespace std;

View File

@ -41,6 +41,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <string>
#include <limits>

View File

@ -54,9 +54,9 @@ class CV_solvePnPRansac_Test : public cvtest::BaseTest
public:
CV_solvePnPRansac_Test()
{
eps[CV_ITERATIVE] = 1.0e-2;
eps[CV_EPNP] = 1.0e-2;
eps[CV_P3P] = 1.0e-2;
eps[ITERATIVE] = 1.0e-2;
eps[EPNP] = 1.0e-2;
eps[P3P] = 1.0e-2;
totalTestsCount = 10;
}
~CV_solvePnPRansac_Test() {}
@ -193,9 +193,9 @@ class CV_solvePnP_Test : public CV_solvePnPRansac_Test
public:
CV_solvePnP_Test()
{
eps[CV_ITERATIVE] = 1.0e-6;
eps[CV_EPNP] = 1.0e-6;
eps[CV_P3P] = 1.0e-4;
eps[ITERATIVE] = 1.0e-6;
eps[EPNP] = 1.0e-6;
eps[P3P] = 1.0e-4;
totalTestsCount = 1000;
}

View File

@ -75,7 +75,7 @@ void computeTextureBasedMasks( const Mat& _img, Mat* texturelessMask, Mat* textu
if( !texturelessMask && !texturedMask )
return;
if( _img.empty() )
CV_Error( CV_StsBadArg, "img is empty" );
CV_Error( Error::StsBadArg, "img is empty" );
Mat img = _img;
if( _img.channels() > 1)
@ -95,21 +95,21 @@ void computeTextureBasedMasks( const Mat& _img, Mat* texturelessMask, Mat* textu
void checkTypeAndSizeOfDisp( const Mat& dispMap, const Size* sz )
{
if( dispMap.empty() )
CV_Error( CV_StsBadArg, "dispMap is empty" );
CV_Error( Error::StsBadArg, "dispMap is empty" );
if( dispMap.type() != CV_32FC1 )
CV_Error( CV_StsBadArg, "dispMap must have CV_32FC1 type" );
CV_Error( Error::StsBadArg, "dispMap must have CV_32FC1 type" );
if( sz && (dispMap.rows != sz->height || dispMap.cols != sz->width) )
CV_Error( CV_StsBadArg, "dispMap has incorrect size" );
CV_Error( Error::StsBadArg, "dispMap has incorrect size" );
}
void checkTypeAndSizeOfMask( const Mat& mask, Size sz )
{
if( mask.empty() )
CV_Error( CV_StsBadArg, "mask is empty" );
CV_Error( Error::StsBadArg, "mask is empty" );
if( mask.type() != CV_8UC1 )
CV_Error( CV_StsBadArg, "mask must have CV_8UC1 type" );
CV_Error( Error::StsBadArg, "mask must have CV_8UC1 type" );
if( mask.rows != sz.height || mask.cols != sz.width )
CV_Error( CV_StsBadArg, "mask has incorrect size" );
CV_Error( Error::StsBadArg, "mask has incorrect size" );
}
void checkDispMapsAndUnknDispMasks( const Mat& leftDispMap, const Mat& rightDispMap,
@ -143,7 +143,7 @@ void checkDispMapsAndUnknDispMasks( const Mat& leftDispMap, const Mat& rightDisp
minMaxLoc( rightDispMap, &rightMinVal, 0, 0, 0, ~rightUnknDispMask );
}
if( leftMinVal < 0 || rightMinVal < 0)
CV_Error( CV_StsBadArg, "known disparity values must be positive" );
CV_Error( Error::StsBadArg, "known disparity values must be positive" );
}
/*
@ -163,7 +163,7 @@ void computeOcclusionBasedMasks( const Mat& leftDisp, const Mat& _rightDisp,
if( _rightDisp.empty() )
{
if( !rightUnknDispMask.empty() )
CV_Error( CV_StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
CV_Error( Error::StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
rightDisp.create(leftDisp.size(), CV_32FC1);
rightDisp.setTo(Scalar::all(0) );
for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
@ -230,9 +230,9 @@ void computeDepthDiscontMask( const Mat& disp, Mat& depthDiscontMask, const Mat&
float dispGap = EVAL_DISP_GAP, int discontWidth = EVAL_DISCONT_WIDTH )
{
if( disp.empty() )
CV_Error( CV_StsBadArg, "disp is empty" );
CV_Error( Error::StsBadArg, "disp is empty" );
if( disp.type() != CV_32FC1 )
CV_Error( CV_StsBadArg, "disp must have CV_32FC1 type" );
CV_Error( Error::StsBadArg, "disp must have CV_32FC1 type" );
if( !unknDispMask.empty() )
checkTypeAndSizeOfMask( unknDispMask, disp.size() );
@ -571,9 +571,9 @@ int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int ca
if( isWrite )
{
fs << caseNames[caseIdx] << "{";
cvWriteComment( fs.fs, RMS_STR.c_str(), 0 );
//cvWriteComment( fs.fs, RMS_STR.c_str(), 0 );
writeErrors( RMS_STR, rmss, &fs );
cvWriteComment( fs.fs, BAD_PXLS_FRACTION_STR.c_str(), 0 );
//cvWriteComment( fs.fs, BAD_PXLS_FRACTION_STR.c_str(), 0 );
writeErrors( BAD_PXLS_FRACTION_STR, badPxlsFractions, &fs );
fs << "}"; // datasetName
}

View File

@ -41,6 +41,7 @@
#include "precomp.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>
using namespace cv;

View File

@ -131,7 +131,7 @@ Rect CvFeatureTracker::updateTrackingWindowWithSIFT(Mat image)
curr_keys.push_back(curr_keypoints[matches[i].trainIdx].pt);
}
Mat T = findHomography(prev_keys, curr_keys, CV_LMEDS);
Mat T = findHomography(prev_keys, curr_keys, LMEDS);
prev_trackwindow.x += cvRound(T.at<double> (0, 2));
prev_trackwindow.y += cvRound(T.at<double> (1, 2));

View File

@ -43,10 +43,8 @@
#ifndef __OPENCV_FEATURES_2D_HPP__
#define __OPENCV_FEATURES_2D_HPP__
#ifdef __cplusplus
#include "opencv2/core.hpp"
#include "opencv2/flann/miniflann.hpp"
#include <limits>
namespace cv
{
@ -1521,8 +1519,4 @@ protected:
} /* namespace cv */
#endif /* __cplusplus */
#endif
/* End of file. */

View File

@ -241,7 +241,7 @@ TWeight GCGraph<TWeight>::maxFlow()
// find the minimum edge weight along the path
minWeight = edgePtr[e0].weight;
assert( minWeight > 0 );
CV_Assert( minWeight > 0 );
// k = 1: source tree, k = 0: destination tree
for( int k = 1; k >= 0; k-- )
{
@ -251,11 +251,11 @@ TWeight GCGraph<TWeight>::maxFlow()
break;
weight = edgePtr[ei^k].weight;
minWeight = MIN(minWeight, weight);
assert( minWeight > 0 );
CV_Assert( minWeight > 0 );
}
weight = fabs(v->weight);
minWeight = MIN(minWeight, weight);
assert( minWeight > 0 );
CV_Assert( minWeight > 0 );
}
// modify weights of the edges along the path and collect orphans

View File

@ -188,13 +188,13 @@ public class Calib3dTest extends OpenCVTestCase {
assertTrue(!corners.empty());
}
public void testFindCirclesGridDefaultMatSizeMat() {
public void testFindCirclesGridMatSizeMat() {
int size = 300;
Mat img = new Mat(size, size, CvType.CV_8U);
img.setTo(new Scalar(255));
Mat centers = new Mat();
assertFalse(Calib3d.findCirclesGridDefault(img, new Size(5, 5), centers));
assertFalse(Calib3d.findCirclesGrid(img, new Size(5, 5), centers));
for (int i = 0; i < 5; i++)
for (int j = 0; j < 5; j++) {
@ -202,20 +202,20 @@ public class Calib3dTest extends OpenCVTestCase {
Core.circle(img, pt, 10, new Scalar(0), -1);
}
assertTrue(Calib3d.findCirclesGridDefault(img, new Size(5, 5), centers));
assertTrue(Calib3d.findCirclesGrid(img, new Size(5, 5), centers));
assertEquals(25, centers.rows());
assertEquals(1, centers.cols());
assertEquals(CvType.CV_32FC2, centers.type());
}
public void testFindCirclesGridDefaultMatSizeMatInt() {
public void testFindCirclesGridMatSizeMatInt() {
int size = 300;
Mat img = new Mat(size, size, CvType.CV_8U);
img.setTo(new Scalar(255));
Mat centers = new Mat();
assertFalse(Calib3d.findCirclesGridDefault(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
assertFalse(Calib3d.findCirclesGrid(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
int step = size * 2 / 15;
@ -227,7 +227,7 @@ public class Calib3dTest extends OpenCVTestCase {
Core.circle(img, pt, 10, new Scalar(0), -1);
}
assertTrue(Calib3d.findCirclesGridDefault(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
assertTrue(Calib3d.findCirclesGrid(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
assertEquals(15, centers.rows());

View File

@ -43,11 +43,11 @@
#define __OPENCV_LEGACY_HPP__
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include "opencv2/ml.hpp"
#ifdef __cplusplus
#include "opencv2/features2d.hpp"
extern "C" {
#endif

View File

@ -40,6 +40,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <stdio.h>
#include <map>

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
CvMat cvMatArray( int rows, int cols, int type,
int count, void* data)

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d.hpp"
#include <stdio.h>
namespace cv

View File

@ -40,6 +40,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
//#include "cvtypes.h"
#include <float.h>

View File

@ -275,12 +275,16 @@ static Mat readMatFromBin( const string& filename )
size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
uchar* data = (uchar*)cvAlloc(dataSize);
size_t elements_read = fread( (void*)data, 1, dataSize, f );
size_t step = dataSize / rows / CV_ELEM_SIZE(type);
CV_Assert(step >= (size_t)cols);
Mat m = Mat( rows, step, type).colRange(0, cols);
size_t elements_read = fread( m.ptr(), 1, dataSize, f );
CV_Assert(elements_read == (size_t)(dataSize));
fclose(f);
return Mat( rows, cols, type, data );
return m;
}
return Mat();
}
@ -402,7 +406,7 @@ protected:
double t = (double)getTickCount();
dextractor->compute( img, keypoints, calcDescriptors );
t = getTickCount() - t;
ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)cvGetTickFrequency()*1000.)/calcDescriptors.rows );
ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)getTickFrequency()*1000.)/calcDescriptors.rows );
if( calcDescriptors.rows != (int)keypoints.size() )
{

View File

@ -80,7 +80,7 @@ TEST_P(HoughCircles, Accuracy)
cv::ocl::oclMat d_src(src);
cv::ocl::oclMat d_circles;
cv::ocl::HoughCircles(d_src, d_circles, CV_HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
cv::ocl::HoughCircles(d_src, d_circles, cv::HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
ASSERT_TRUE(d_circles.rows > 0);
cv::Mat circles;

View File

@ -45,6 +45,7 @@
#include "opencv2/core.hpp"
#include "warpers.hpp" // Make your IDE see declarations
#include <limits>
namespace cv {
namespace detail {

View File

@ -55,7 +55,7 @@ Ptr<Blender> Blender::createDefault(int type, bool try_gpu)
return new FeatherBlender();
if (type == MULTI_BAND)
return new MultiBandBlender(try_gpu);
CV_Error(CV_StsBadArg, "unsupported blending method");
CV_Error(Error::StsBadArg, "unsupported blending method");
return NULL;
}

View File

@ -53,7 +53,7 @@ Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
return new GainCompensator();
if (type == GAIN_BLOCKS)
return new BlocksGainCompensator();
CV_Error(CV_StsBadArg, "unsupported exposure compensation method");
CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
return NULL;
}

View File

@ -322,7 +322,7 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int
{
surf = Algorithm::create<Feature2D>("Feature2D.SURF");
if( surf.empty() )
CV_Error( CV_StsNotImplemented, "OpenCV was built without SURF support" );
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
surf->set("hessianThreshold", hess_thresh);
surf->set("nOctaves", num_octaves);
surf->set("nOctaveLayers", num_layers);
@ -333,7 +333,7 @@ SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int
extractor_ = Algorithm::create<DescriptorExtractor>("Feature2D.SURF");
if( detector_.empty() || extractor_.empty() )
CV_Error( CV_StsNotImplemented, "OpenCV was built without SURF support" );
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
detector_->set("hessianThreshold", hess_thresh);
detector_->set("nOctaves", num_octaves);
@ -388,7 +388,7 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
} else if (image.type() == CV_8UC1) {
gray_image=image;
} else {
CV_Error(CV_StsUnsupportedFormat, "");
CV_Error(Error::StsUnsupportedFormat, "");
}
if (grid_size.area() == 1)
@ -579,7 +579,7 @@ void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFea
}
// Find pair-wise motion
matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, CV_RANSAC);
matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, RANSAC);
if (matches_info.H.empty() || std::abs(determinant(matches_info.H)) < std::numeric_limits<double>::epsilon())
return;
@ -626,7 +626,7 @@ void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFea
}
// Rerun motion estimation on inliers only
matches_info.H = findHomography(src_points, dst_points, CV_RANSAC);
matches_info.H = findHomography(src_points, dst_points, RANSAC);
}
void BestOf2NearestMatcher::collectGarbage()

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
using namespace cv;
using namespace cv::detail;

View File

@ -716,7 +716,7 @@ void DpSeamFinder::computeCosts(
else if (image1.type() == CV_8UC4 && image2.type() == CV_8UC4)
diff = diffL2Square4<uchar>;
else
CV_Error(CV_StsBadArg, "both images must have CV_32FC3(4) or CV_8UC3(4) type");
CV_Error(Error::StsBadArg, "both images must have CV_32FC3(4) or CV_8UC3(4) type");
int l = comp+1;
Rect roi(tls_[comp], brs_[comp]);
@ -1279,7 +1279,7 @@ void GraphCutSeamFinder::Impl::findInPair(size_t first, size_t second, Rect roi)
submask1, submask2, graph);
break;
default:
CV_Error(CV_StsBadArg, "unsupported pixel similarity measure");
CV_Error(Error::StsBadArg, "unsupported pixel similarity measure");
}
graph.maxFlow();
@ -1420,7 +1420,7 @@ void GraphCutSeamFinderGpu::findInPair(size_t first, size_t second, Rect roi)
submask1, submask2, terminals, leftT, rightT, top, bottom);
break;
default:
CV_Error(CV_StsBadArg, "unsupported pixel similarity measure");
CV_Error(Error::StsBadArg, "unsupported pixel similarity measure");
}
gpu::GpuMat terminals_d(terminals);

View File

@ -471,7 +471,7 @@ Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bo
else
{
std::vector<uchar> mask;
M = findHomography(points0, points1, mask, CV_LMEDS);
M = findHomography(points0, points1, mask, LMEDS);
for (int i = 0; i < npoints; ++i)
if (mask[i]) ninliers++;
}
@ -504,7 +504,7 @@ Mat MotionEstimatorL1::estimate(InputArray points0, InputArray points1, bool *ok
#ifndef HAVE_CLP
CV_Error(CV_StsError, "The library is built without Clp support");
CV_Error(Error::StsError, "The library is built without Clp support");
if (ok) *ok = false;
return Mat::eye(3, 3, CV_32F);

View File

@ -326,7 +326,7 @@ MotionInpainter::MotionInpainter()
#ifdef HAVE_OPENCV_GPU
setOptFlowEstimator(new DensePyrLkOptFlowEstimatorGpu());
#else
CV_Error(CV_StsNotImplemented, "Current implementation of MotionInpainter requires GPU");
CV_Error(Error::StsNotImplemented, "Current implementation of MotionInpainter requires GPU");
#endif
setFlowErrorThreshold(1e-4f);
setDistThreshold(5.f);

View File

@ -134,7 +134,7 @@ LpMotionStabilizer::LpMotionStabilizer(MotionModel model)
void LpMotionStabilizer::stabilize(int, const std::vector<Mat>&, std::pair<int,int>, Mat*)
{
CV_Error(CV_StsError, "The library is built without Clp support");
CV_Error(Error::StsError, "The library is built without Clp support");
}
#else

View File

@ -81,14 +81,14 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
if( flags & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs,
flags|CV_CALIB_FIX_K3/*|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6*/);
flags|CALIB_FIX_K3/*|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6*/);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
if(!ok)
{
@ -138,7 +138,7 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
cameraMatrix, distCoeffs,
imageSize, R, T, E, F,
TermCriteria(TermCriteria::COUNT, 30, 0),
CV_CALIB_FIX_INTRINSIC);
CALIB_FIX_INTRINSIC);
printf("Pair (1,%d) calibration reprojection error = %g\n", c, sqrt(err/(N*2)));
if( c == 2 )
{
@ -212,15 +212,15 @@ int main( int argc, char** argv )
{
if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
return printf("Invalid aspect ratio\n" ), -1;
flags |= CV_CALIB_FIX_ASPECT_RATIO;
flags |= CALIB_FIX_ASPECT_RATIO;
}
else if( strcmp( s, "-zt" ) == 0 )
{
flags |= CV_CALIB_ZERO_TANGENT_DIST;
flags |= CALIB_ZERO_TANGENT_DIST;
}
else if( strcmp( s, "-p" ) == 0 )
{
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
flags |= CALIB_FIX_PRINCIPAL_POINT;
}
else if( strcmp( s, "-o" ) == 0 )
{
@ -272,7 +272,7 @@ int main( int argc, char** argv )
vector<Point2f> ptvec;
imageSize = view.size();
cvtColor(view, viewGray, COLOR_BGR2GRAY);
bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
bool found = findChessboardCorners( view, boardSize, ptvec, CALIB_CB_ADAPTIVE_THRESH );
drawChessboardCorners( view, boardSize, Mat(ptvec), found );
if( found )
@ -291,13 +291,13 @@ int main( int argc, char** argv )
printf("Running calibration ...\n");
run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
boardSize, squareSize, aspectRatio, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5,
boardSize, squareSize, aspectRatio, flags|CALIB_FIX_K4|CALIB_FIX_K5,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
cameraMatrix[2], distCoeffs[2],
R12, T12, R13, T13);
fs.open(outputFilename, CV_STORAGE_WRITE);
fs.open(outputFilename, FileStorage::WRITE);
fs << "cameraMatrix1" << cameraMatrix[0];
fs << "cameraMatrix2" << cameraMatrix[1];
@ -323,7 +323,7 @@ int main( int argc, char** argv )
imgpt[0], imgpt[2],
imageSize, R12, T12, R13, T13,
R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
imageSize, 0, 0, CV_CALIB_ZERO_DISPARITY);
imageSize, 0, 0, CALIB_ZERO_DISPARITY);
Mat map1[3], map2[3];
fs << "R1" << R[0];

View File

@ -360,7 +360,7 @@ static void triangulatePoint_test(void)
pts[0] = imgpt1[i]; pts[1] = imgpt2[i];
objptt[i] = triangulatePoint(pts, Rv, tv, cameraMatrix);
}
double err = norm(Mat(objpt), Mat(objptt), CV_C);
double err = norm(Mat(objpt), Mat(objptt), NORM_INF);
CV_Assert(err < 1e-1);
}
@ -694,7 +694,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
projectPoints(Mat(model.points), Rs[i], ts[i], cameraMatrix, Mat(), imagePoints);
for( int k = 0; k < (int)imagePoints.size(); k++ )
circle(img, imagePoints[k], 2, Scalar(0,255,0), -1, CV_AA, 0);
circle(img, imagePoints[k], 2, Scalar(0,255,0), -1, LINE_AA, 0);
imshow("Test", img);
int c = waitKey();

View File

@ -96,7 +96,7 @@ static double computeReprojectionErrors(
{
projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i],
cameraMatrix, distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), NORM_L2);
int n = (int)objectPoints[i].size();
perViewErrors[i] = (float)std::sqrt(err*err/n);
totalErr += err*err;
@ -128,7 +128,7 @@ static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point
break;
default:
CV_Error(CV_StsBadArg, "Unknown pattern type\n");
CV_Error(Error::StsBadArg, "Unknown pattern type\n");
}
}
@ -141,7 +141,7 @@ static bool runCalibration( vector<vector<Point2f> > imagePoints,
double& totalAvgErr)
{
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
if( flags & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
distCoeffs = Mat::zeros(8, 1, CV_64F);
@ -152,8 +152,8 @@ static bool runCalibration( vector<vector<Point2f> > imagePoints,
objectPoints.resize(imagePoints.size(),objectPoints[0]);
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
///*|CV_CALIB_FIX_K3*/|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
distCoeffs, rvecs, tvecs, flags|CALIB_FIX_K4|CALIB_FIX_K5);
///*|CALIB_FIX_K3*/|CALIB_FIX_K4|CALIB_FIX_K5);
printf("RMS error reported by calibrateCamera: %g\n", rms);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
@ -192,17 +192,17 @@ static void saveCameraParams( const string& filename,
fs << "board_height" << boardSize.height;
fs << "square_size" << squareSize;
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
if( flags & CALIB_FIX_ASPECT_RATIO )
fs << "aspectRatio" << aspectRatio;
if( flags != 0 )
{
sprintf( buf, "flags: %s%s%s%s",
flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
flags & CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "",
flags & CV_CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
cvWriteComment( *fs, buf, 0 );
flags & CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
flags & CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "",
flags & CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
flags & CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
//cvWriteComment( *fs, buf, 0 );
}
fs << "flags" << flags;
@ -229,7 +229,7 @@ static void saveCameraParams( const string& filename,
r = rvecs[i].t();
t = tvecs[i].t();
}
cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
//cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
fs << "extrinsic_parameters" << bigmat;
}
@ -361,7 +361,7 @@ int main( int argc, char** argv )
{
if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
return printf("Invalid aspect ratio\n" ), -1;
flags |= CV_CALIB_FIX_ASPECT_RATIO;
flags |= CALIB_FIX_ASPECT_RATIO;
}
else if( strcmp( s, "-d" ) == 0 )
{
@ -378,11 +378,11 @@ int main( int argc, char** argv )
}
else if( strcmp( s, "-zt" ) == 0 )
{
flags |= CV_CALIB_ZERO_TANGENT_DIST;
flags |= CALIB_ZERO_TANGENT_DIST;
}
else if( strcmp( s, "-p" ) == 0 )
{
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
flags |= CALIB_FIX_PRINCIPAL_POINT;
}
else if( strcmp( s, "-v" ) == 0 )
{
@ -469,7 +469,7 @@ int main( int argc, char** argv )
{
case CHESSBOARD:
found = findChessboardCorners( view, boardSize, pointbuf,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
break;
case CIRCLES_GRID:
found = findCirclesGrid( view, boardSize, pointbuf );
@ -483,7 +483,7 @@ int main( int argc, char** argv )
// improve the found corners' coordinate accuracy
if( pattern == CHESSBOARD && found) cornerSubPix( viewGray, pointbuf, Size(11,11),
Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
Size(-1,-1), TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 30, 0.1 ));
if( mode == CAPTURING && found &&
(!capture.isOpened() || clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) )

View File

@ -259,15 +259,15 @@ Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat
if (rendererResolutionMultiplier == 1)
{
result = bg.clone();
drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
drawContours(result, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(result, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
}
else
{
Mat tmp;
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
drawContours(tmp, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
}
return result;

View File

@ -45,7 +45,7 @@ static int getMatcherFilterType( const string& str )
return NONE_FILTER;
if( str == "CrossCheckFilter" )
return CROSS_CHECK_FILTER;
CV_Error(CV_StsBadArg, "Invalid filter name");
CV_Error(Error::StsBadArg, "Invalid filter name");
return -1;
}
@ -109,12 +109,12 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter, bool eval,
double ransacReprojThreshold, RNG& rng )
{
assert( !img1.empty() );
CV_Assert( !img1.empty() );
Mat H12;
if( isWarpPerspective )
warpPerspectiveRand(img1, img2, H12, rng );
else
assert( !img2.empty()/* && img2.cols==img1.cols && img2.rows==img1.rows*/ );
CV_Assert( !img2.empty()/* && img2.cols==img1.cols && img2.rows==img1.rows*/ );
cout << endl << "< Extracting keypoints from second image..." << endl;
vector<KeyPoint> keypoints2;
@ -189,7 +189,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
cout << "< Computing homography (RANSAC)..." << endl;
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold );
H12 = findHomography( Mat(points1), Mat(points2), RANSAC, ransacReprojThreshold );
cout << ">" << endl;
}
@ -208,7 +208,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
matchesMask[i1] = 1;
}
// draw inliers
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(0, 0, 255), matchesMask
#if DRAW_RICH_KEYPOINTS_MODE
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
#endif
@ -218,7 +218,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
// draw outliers
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
matchesMask[i1] = !matchesMask[i1];
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask,
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 0, 255), Scalar(255, 0, 0), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif

View File

@ -80,7 +80,7 @@ Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, cons
for (size_t i = 0; i < features1.size(); i++)
{
circle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
circle(img_corr, features1[i].pt, 3, Scalar(255, 0, 0));
}
for (size_t i = 0; i < features2.size(); i++)

View File

@ -148,25 +148,25 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
if( shownFrame.data )
{
if( nobjpt == 1 )
circle(shownFrame, imgpt[0], 3, Scalar(0,255,0), -1, CV_AA);
circle(shownFrame, imgpt[0], 3, Scalar(0,255,0), -1, LINE_AA);
else if( nobjpt == 2 )
{
circle(shownFrame, imgpt[0], 3, Scalar(0,255,0), -1, CV_AA);
circle(shownFrame, imgpt[1], 3, Scalar(0,255,0), -1, CV_AA);
line(shownFrame, imgpt[0], imgpt[1], Scalar(0,255,0), 3, CV_AA);
circle(shownFrame, imgpt[0], 3, Scalar(0,255,0), -1, LINE_AA);
circle(shownFrame, imgpt[1], 3, Scalar(0,255,0), -1, LINE_AA);
line(shownFrame, imgpt[0], imgpt[1], Scalar(0,255,0), 3, LINE_AA);
}
else if( nobjpt == 3 )
for( int i = 0; i < 4; i++ )
{
circle(shownFrame, imgpt[i], 3, Scalar(0,255,0), -1, CV_AA);
line(shownFrame, imgpt[i], imgpt[(i+1)%4], Scalar(0,255,0), 3, CV_AA);
circle(shownFrame, imgpt[i], 3, Scalar(0,255,0), -1, LINE_AA);
line(shownFrame, imgpt[i], imgpt[(i+1)%4], Scalar(0,255,0), 3, LINE_AA);
}
else
for( int i = 0; i < 8; i++ )
{
circle(shownFrame, imgpt[i], 3, Scalar(0,255,0), -1, CV_AA);
line(shownFrame, imgpt[i], imgpt[(i+1)%4 + (i/4)*4], Scalar(0,255,0), 3, CV_AA);
line(shownFrame, imgpt[i], imgpt[i%4], Scalar(0,255,0), 3, CV_AA);
circle(shownFrame, imgpt[i], 3, Scalar(0,255,0), -1, LINE_AA);
line(shownFrame, imgpt[i], imgpt[(i+1)%4 + (i/4)*4], Scalar(0,255,0), 3, LINE_AA);
line(shownFrame, imgpt[i], imgpt[i%4], Scalar(0,255,0), 3, LINE_AA);
}
}

View File

@ -106,7 +106,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
else
resize(img, timg, Size(), scale, scale);
found = findChessboardCorners(timg, boardSize, corners,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if( found )
{
if( scale > 1 )
@ -135,7 +135,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
if( !found )
break;
cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
30, 0.01));
}
if( k == 2 )
@ -175,12 +175,12 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_RATIONAL_MODEL +
CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5),
CALIB_FIX_ASPECT_RATIO +
CALIB_ZERO_TANGENT_DIST +
CALIB_SAME_FOCAL_LENGTH +
CALIB_RATIONAL_MODEL +
CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5);
cout << "done with RMS error=" << rms << endl;
// CALIBRATION QUALITY CHECK
@ -214,7 +214,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
cout << "average reprojection err = " << err/npoints << endl;
// save intrinsic parameters
FileStorage fs("intrinsics.yml", CV_STORAGE_WRITE);
FileStorage fs("intrinsics.yml", FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
@ -232,7 +232,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.yml", CV_STORAGE_WRITE);
fs.open("extrinsics.yml", FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;

View File

@ -24,7 +24,7 @@ class Settings
public:
Settings() : goodInput(false) {}
enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
enum InputType {INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST};
enum InputType { INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST };
void write(FileStorage& fs) const //Write serialization for this class
{
@ -120,9 +120,9 @@ public:
}
flag = 0;
if(calibFixPrincipalPoint) flag |= CV_CALIB_FIX_PRINCIPAL_POINT;
if(calibZeroTangentDist) flag |= CV_CALIB_ZERO_TANGENT_DIST;
if(aspectRatio) flag |= CV_CALIB_FIX_ASPECT_RATIO;
if(calibFixPrincipalPoint) flag |= CALIB_FIX_PRINCIPAL_POINT;
if(calibZeroTangentDist) flag |= CALIB_ZERO_TANGENT_DIST;
if(aspectRatio) flag |= CALIB_FIX_ASPECT_RATIO;
calibrationPattern = NOT_EXISTING;
@ -272,7 +272,7 @@ int main(int argc, char* argv[])
{
case Settings::CHESSBOARD:
found = findChessboardCorners( view, s.boardSize, pointBuf,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
break;
case Settings::CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf );
@ -293,7 +293,7 @@ int main(int argc, char* argv[])
Mat viewGray;
cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(11,11),
Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
Size(-1,-1), TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 30, 0.1 ));
}
if( mode == CAPTURING && // For camera only take new samples after delay time
@ -392,7 +392,7 @@ static double computeReprojectionErrors( const vector<vector<Point3f> >& objectP
{
projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), NORM_L2);
int n = (int)objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
@ -433,7 +433,7 @@ static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat
{
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
if( s.flag & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = 1.0;
distCoeffs = Mat::zeros(8, 1, CV_64F);
@ -445,7 +445,7 @@ static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat
//Find intrinsic and extrinsic camera parameters
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
distCoeffs, rvecs, tvecs, s.flag|CALIB_FIX_K4|CALIB_FIX_K5);
cout << "Re-projection error reported by calibrateCamera: "<< rms << endl;
@ -481,17 +481,17 @@ static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, M
fs << "board_Height" << s.boardSize.height;
fs << "square_Size" << s.squareSize;
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
if( s.flag & CALIB_FIX_ASPECT_RATIO )
fs << "FixAspectRatio" << s.aspectRatio;
if( s.flag )
{
sprintf( buf, "flags: %s%s%s%s",
s.flag & CV_CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
s.flag & CV_CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "",
s.flag & CV_CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
s.flag & CV_CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "" );
cvWriteComment( *fs, buf, 0 );
s.flag & CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
s.flag & CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "",
s.flag & CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
s.flag & CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "" );
//cvWriteComment( *fs, buf, 0 );
}
@ -519,7 +519,7 @@ static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, M
r = rvecs[i].t();
t = tvecs[i].t();
}
cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
//cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
fs << "Extrinsic_Parameters" << bigmat;
}

View File

@ -92,12 +92,12 @@ int main( int argc, char** argv )
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);

View File

@ -207,7 +207,7 @@ int main(int argc, char* argv[])
obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);