mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 19:20:28 +08:00
Merge pull request #3874 from paroj:calib_sample
This commit is contained in:
commit
5ab26e3202
@ -30,7 +30,7 @@ y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy]\f]
|
||||
So we have five distortion parameters which in OpenCV are presented as one row matrix with 5
|
||||
columns:
|
||||
|
||||
\f[Distortion_{coefficients}=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3)\f]
|
||||
\f[distortion\_coefficients=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3)\f]
|
||||
|
||||
Now for the unit conversion we use the following formula:
|
||||
|
||||
@ -96,83 +96,30 @@ on how to do this you can find in the @ref tutorial_file_input_output_with_xml_y
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
-# **Read the settings.**
|
||||
@code{.cpp}
|
||||
Settings s;
|
||||
const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml";
|
||||
FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
|
||||
if (!fs.isOpened())
|
||||
{
|
||||
cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
|
||||
return -1;
|
||||
}
|
||||
fs["Settings"] >> s;
|
||||
fs.release(); // close Settings file
|
||||
-# **Read the settings**
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp file_read
|
||||
|
||||
if (!s.goodInput)
|
||||
{
|
||||
cout << "Invalid input detected. Application stopping. " << endl;
|
||||
return -1;
|
||||
}
|
||||
@endcode
|
||||
For this I've used simple OpenCV class input operation. After reading the file I've an
|
||||
additional post-processing function that checks validity of the input. Only if all inputs are
|
||||
good then *goodInput* variable will be true.
|
||||
|
||||
-# **Get next input, if it fails or we have enough of them - calibrate**. After this we have a big
|
||||
-# **Get next input, if it fails or we have enough of them - calibrate**
|
||||
|
||||
After this we have a big
|
||||
loop where we do the following operations: get the next image from the image list, camera or
|
||||
video file. If this fails or we have enough images then we run the calibration process. In case
|
||||
of image we step out of the loop and otherwise the remaining frames will be undistorted (if the
|
||||
option is set) via changing from *DETECTION* mode to the *CALIBRATED* one.
|
||||
@code{.cpp}
|
||||
for(int i = 0;;++i)
|
||||
{
|
||||
Mat view;
|
||||
bool blinkOutput = false;
|
||||
|
||||
view = s.nextImage();
|
||||
|
||||
//----- If no more image, or got enough, then stop calibration and show result -------------
|
||||
if( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames )
|
||||
{
|
||||
if( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
|
||||
mode = CALIBRATED;
|
||||
else
|
||||
mode = DETECTION;
|
||||
}
|
||||
if(view.empty()) // If no more images then run calibration, save and stop loop.
|
||||
{
|
||||
if( imagePoints.size() > 0 )
|
||||
runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
|
||||
break;
|
||||
imageSize = view.size(); // Format input image.
|
||||
if( s.flipVertical ) flip( view, view, 0 );
|
||||
}
|
||||
@endcode
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp get_input
|
||||
For some cameras we may need to flip the input image. Here we do this too.
|
||||
|
||||
-# **Find the pattern in the current input**. The formation of the equations I mentioned above aims
|
||||
-# **Find the pattern in the current input**
|
||||
|
||||
The formation of the equations I mentioned above aims
|
||||
to finding major patterns in the input: in case of the chessboard this are corners of the
|
||||
squares and for the circles, well, the circles themselves. The position of these will form the
|
||||
result which will be written into the *pointBuf* vector.
|
||||
@code{.cpp}
|
||||
vector<Point2f> pointBuf;
|
||||
|
||||
bool found;
|
||||
switch( s.calibrationPattern ) // Find feature points on the input format
|
||||
{
|
||||
case Settings::CHESSBOARD:
|
||||
found = findChessboardCorners( view, s.boardSize, pointBuf,
|
||||
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
|
||||
break;
|
||||
case Settings::CIRCLES_GRID:
|
||||
found = findCirclesGrid( view, s.boardSize, pointBuf );
|
||||
break;
|
||||
case Settings::ASYMMETRIC_CIRCLES_GRID:
|
||||
found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
|
||||
break;
|
||||
}
|
||||
@endcode
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp find_pattern
|
||||
Depending on the type of the input pattern you use either the @ref cv::findChessboardCorners or
|
||||
the @ref cv::findCirclesGrid function. For both of them you pass the current image and the size
|
||||
of the board and you'll get the positions of the patterns. Furthermore, they return a boolean
|
||||
@ -188,109 +135,27 @@ Explanation
|
||||
*imagePoints* vector to collect all of the equations into a single container. Finally, for
|
||||
visualization feedback purposes we will draw the found points on the input image using @ref
|
||||
cv::findChessboardCorners function.
|
||||
@code{.cpp}
|
||||
if ( found) // If done with success,
|
||||
{
|
||||
// improve the found corners' coordinate accuracy for chessboard
|
||||
if( s.calibrationPattern == Settings::CHESSBOARD)
|
||||
{
|
||||
Mat viewGray;
|
||||
cvtColor(view, viewGray, COLOR_BGR2GRAY);
|
||||
cornerSubPix( viewGray, pointBuf, Size(11,11),
|
||||
Size(-1,-1), TermCriteria( TermCriteria::EPS+TermCriteria::MAX_ITER, 30, 0.1 ));
|
||||
}
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp pattern_found
|
||||
-# **Show state and result to the user, plus command line control of the application**
|
||||
|
||||
if( mode == CAPTURING && // For camera only take new samples after delay time
|
||||
(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) )
|
||||
{
|
||||
imagePoints.push_back(pointBuf);
|
||||
prevTimestamp = clock();
|
||||
blinkOutput = s.inputCapture.isOpened();
|
||||
}
|
||||
|
||||
// Draw the corners.
|
||||
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
|
||||
}
|
||||
@endcode
|
||||
-# **Show state and result to the user, plus command line control of the application**. This part
|
||||
shows text output on the image.
|
||||
@code{.cpp}
|
||||
//----------------------------- Output Text ------------------------------------------------
|
||||
string msg = (mode == CAPTURING) ? "100/100" :
|
||||
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
|
||||
int baseLine = 0;
|
||||
Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
|
||||
Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
|
||||
|
||||
if( mode == CAPTURING )
|
||||
{
|
||||
if(s.showUndistorsed)
|
||||
msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
|
||||
else
|
||||
msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
|
||||
}
|
||||
|
||||
putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);
|
||||
|
||||
if( blinkOutput )
|
||||
bitwise_not(view, view);
|
||||
@endcode
|
||||
This part shows text output on the image.
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp output_text
|
||||
If we ran calibration and got camera's matrix with the distortion coefficients we may want to
|
||||
correct the image using @ref cv::undistort function:
|
||||
@code{.cpp}
|
||||
//------------------------- Video capture output undistorted ------------------------------
|
||||
if( mode == CALIBRATED && s.showUndistorsed )
|
||||
{
|
||||
Mat temp = view.clone();
|
||||
undistort(temp, view, cameraMatrix, distCoeffs);
|
||||
}
|
||||
//------------------------------ Show image and check for input commands -------------------
|
||||
imshow("Image View", view);
|
||||
@endcode
|
||||
Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g*
|
||||
we start again the detection process, and finally for the *ESC* key we quit the application:
|
||||
@code{.cpp}
|
||||
char key = waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
|
||||
if( key == ESC_KEY )
|
||||
break;
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp output_undistorted
|
||||
Then we show the image and wait for an input key and if this is *u* we toggle the distortion removal,
|
||||
if it is *g* we start again the detection process, and finally for the *ESC* key we quit the application:
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp await_input
|
||||
-# **Show the distortion removal for the images too**
|
||||
|
||||
if( key == 'u' && mode == CALIBRATED )
|
||||
s.showUndistorsed = !s.showUndistorsed;
|
||||
|
||||
if( s.inputCapture.isOpened() && key == 'g' )
|
||||
{
|
||||
mode = CAPTURING;
|
||||
imagePoints.clear();
|
||||
}
|
||||
@endcode
|
||||
-# **Show the distortion removal for the images too**. When you work with an image list it is not
|
||||
When you work with an image list it is not
|
||||
possible to remove the distortion inside the loop. Therefore, you must do this after the loop.
|
||||
Taking advantage of this now I'll expand the @ref cv::undistort function, which is in fact first
|
||||
calls @ref cv::initUndistortRectifyMap to find transformation matrices and then performs
|
||||
transformation using @ref cv::remap function. Because, after successful calibration map
|
||||
calculation needs to be done only once, by using this expanded form you may speed up your
|
||||
application:
|
||||
@code{.cpp}
|
||||
if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
|
||||
{
|
||||
Mat view, rview, map1, map2;
|
||||
initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
|
||||
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
|
||||
imageSize, CV_16SC2, map1, map2);
|
||||
|
||||
for(int i = 0; i < (int)s.imageList.size(); i++ )
|
||||
{
|
||||
view = imread(s.imageList[i], 1);
|
||||
if(view.empty())
|
||||
continue;
|
||||
remap(view, rview, map1, map2, INTER_LINEAR);
|
||||
imshow("Image View", rview);
|
||||
char c = waitKey();
|
||||
if( c == ESC_KEY || c == 'q' || c == 'Q' )
|
||||
break;
|
||||
}
|
||||
}
|
||||
@endcode
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp show_results
|
||||
|
||||
The calibration and save
|
||||
------------------------
|
||||
@ -304,24 +169,7 @@ Therefore in the first function we just split up these two processes. Because we
|
||||
of the calibration variables we'll create these variables here and pass on both of them to the
|
||||
calibration and saving function. Again, I'll not show the saving part as that has little in common
|
||||
with the calibration. Explore the source file in order to find out how and what:
|
||||
@code{.cpp}
|
||||
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
|
||||
{
|
||||
vector<Mat> rvecs, tvecs;
|
||||
vector<float> reprojErrs;
|
||||
double totalAvgErr = 0;
|
||||
|
||||
bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
|
||||
reprojErrs, totalAvgErr);
|
||||
cout << (ok ? "Calibration succeeded" : "Calibration failed")
|
||||
<< ". avg re projection error = " << totalAvgErr ;
|
||||
|
||||
if( ok ) // save only if the calibration was done with success
|
||||
saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
|
||||
imagePoints, totalAvgErr);
|
||||
return ok;
|
||||
}
|
||||
@endcode
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp run_and_save
|
||||
We do the calibration with the help of the @ref cv::calibrateCamera function. It has the following
|
||||
parameters:
|
||||
|
||||
@ -331,29 +179,7 @@ parameters:
|
||||
present. Because, we use a single pattern for all the input images we can calculate this just
|
||||
once and multiply it for all the other input views. We calculate the corner points with the
|
||||
*calcBoardCornerPositions* function as:
|
||||
@code{.cpp}
|
||||
void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
|
||||
Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
|
||||
{
|
||||
corners.clear();
|
||||
|
||||
switch(patternType)
|
||||
{
|
||||
case Settings::CHESSBOARD:
|
||||
case Settings::CIRCLES_GRID:
|
||||
for( int i = 0; i < boardSize.height; ++i )
|
||||
for( int j = 0; j < boardSize.width; ++j )
|
||||
corners.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
|
||||
break;
|
||||
|
||||
case Settings::ASYMMETRIC_CIRCLES_GRID:
|
||||
for( int i = 0; i < boardSize.height; i++ )
|
||||
for( int j = 0; j < boardSize.width; j++ )
|
||||
corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@endcode
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp board_corners
|
||||
And then multiply it as:
|
||||
@code{.cpp}
|
||||
vector<vector<Point3f> > objectPoints(1);
|
||||
@ -365,12 +191,8 @@ parameters:
|
||||
circle pattern). We have already collected this from @ref cv::findChessboardCorners or @ref
|
||||
cv::findCirclesGrid function. We just need to pass it on.
|
||||
- The size of the image acquired from the camera, video file or the images.
|
||||
- The camera matrix. If we used the fixed aspect ratio option we need to set the \f$f_x\f$ to zero:
|
||||
@code{.cpp}
|
||||
cameraMatrix = Mat::eye(3, 3, CV_64F);
|
||||
if( s.flag & CALIB_FIX_ASPECT_RATIO )
|
||||
cameraMatrix.at<double>(0,0) = 1.0;
|
||||
@endcode
|
||||
- The camera matrix. If we used the fixed aspect ratio option we need to set \f$f_x\f$:
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp fixed_aspect
|
||||
- The distortion coefficient matrix. Initialize with zero.
|
||||
@code{.cpp}
|
||||
distCoeffs = Mat::zeros(8, 1, CV_64F);
|
||||
@ -393,33 +215,7 @@ double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
|
||||
calculate the absolute norm between what we got with our transformation and the corner/circle
|
||||
finding algorithm. To find the average error we calculate the arithmetical mean of the errors
|
||||
calculated for all the calibration images.
|
||||
@code{.cpp}
|
||||
double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
|
||||
const vector<vector<Point2f> >& imagePoints,
|
||||
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
|
||||
const Mat& cameraMatrix , const Mat& distCoeffs,
|
||||
vector<float>& perViewErrors)
|
||||
{
|
||||
vector<Point2f> imagePoints2;
|
||||
int i, totalPoints = 0;
|
||||
double totalErr = 0, err;
|
||||
perViewErrors.resize(objectPoints.size());
|
||||
|
||||
for( i = 0; i < (int)objectPoints.size(); ++i )
|
||||
{
|
||||
projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix, // project
|
||||
distCoeffs, imagePoints2);
|
||||
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), NORM_L2); // difference
|
||||
|
||||
int n = (int)objectPoints[i].size();
|
||||
perViewErrors[i] = (float) std::sqrt(err*err/n); // save for this view
|
||||
totalErr += err*err; // sum it up
|
||||
totalPoints += n;
|
||||
}
|
||||
|
||||
return std::sqrt(totalErr/totalPoints); // calculate the arithmetical mean
|
||||
}
|
||||
@endcode
|
||||
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp compute_errors
|
||||
|
||||
Results
|
||||
-------
|
||||
@ -461,20 +257,20 @@ the input. Here's, how a detected pattern should look:
|
||||
In both cases in the specified output XML/YAML file you'll find the camera and distortion
|
||||
coefficients matrices:
|
||||
@code{.xml}
|
||||
<Camera_Matrix type_id="opencv-matrix">
|
||||
<camera_matrix type_id="opencv-matrix">
|
||||
<rows>3</rows>
|
||||
<cols>3</cols>
|
||||
<dt>d</dt>
|
||||
<data>
|
||||
6.5746697944293521e+002 0. 3.1950000000000000e+002 0.
|
||||
6.5746697944293521e+002 2.3950000000000000e+002 0. 0. 1.</data></Camera_Matrix>
|
||||
<Distortion_Coefficients type_id="opencv-matrix">
|
||||
6.5746697944293521e+002 2.3950000000000000e+002 0. 0. 1.</data></camera_matrix>
|
||||
<distortion_coefficients type_id="opencv-matrix">
|
||||
<rows>5</rows>
|
||||
<cols>1</cols>
|
||||
<dt>d</dt>
|
||||
<data>
|
||||
-4.1802327176423804e-001 5.0715244063187526e-001 0. 0.
|
||||
-5.7843597214487474e-001</data></Distortion_Coefficients>
|
||||
-5.7843597214487474e-001</data></distortion_coefficients>
|
||||
@endcode
|
||||
Add these values as constants to your program, call the @ref cv::initUndistortRectifyMap and the
|
||||
@ref cv::remap function to remove distortion and enjoy distortion free inputs for cheap and low
|
||||
|
@ -34,7 +34,8 @@ public:
|
||||
|
||||
void write(FileStorage& fs) const //Write serialization for this class
|
||||
{
|
||||
fs << "{" << "BoardSize_Width" << boardSize.width
|
||||
fs << "{"
|
||||
<< "BoardSize_Width" << boardSize.width
|
||||
<< "BoardSize_Height" << boardSize.height
|
||||
<< "Square_Size" << squareSize
|
||||
<< "Calibrate_Pattern" << patternToUse
|
||||
@ -43,8 +44,8 @@ public:
|
||||
<< "Calibrate_AssumeZeroTangentialDistortion" << calibZeroTangentDist
|
||||
<< "Calibrate_FixPrincipalPointAtTheCenter" << calibFixPrincipalPoint
|
||||
|
||||
<< "Write_DetectedFeaturePoints" << bwritePoints
|
||||
<< "Write_extrinsicParameters" << bwriteExtrinsics
|
||||
<< "Write_DetectedFeaturePoints" << writePoints
|
||||
<< "Write_extrinsicParameters" << writeExtrinsics
|
||||
<< "Write_outputFileName" << outputFileName
|
||||
|
||||
<< "Show_UndistortedImage" << showUndistorsed
|
||||
@ -62,8 +63,8 @@ public:
|
||||
node["Square_Size"] >> squareSize;
|
||||
node["Calibrate_NrOfFrameToUse"] >> nrFrames;
|
||||
node["Calibrate_FixAspectRatio"] >> aspectRatio;
|
||||
node["Write_DetectedFeaturePoints"] >> bwritePoints;
|
||||
node["Write_extrinsicParameters"] >> bwriteExtrinsics;
|
||||
node["Write_DetectedFeaturePoints"] >> writePoints;
|
||||
node["Write_extrinsicParameters"] >> writeExtrinsics;
|
||||
node["Write_outputFileName"] >> outputFileName;
|
||||
node["Calibrate_AssumeZeroTangentialDistortion"] >> calibZeroTangentDist;
|
||||
node["Calibrate_FixPrincipalPointAtTheCenter"] >> calibFixPrincipalPoint;
|
||||
@ -71,9 +72,9 @@ public:
|
||||
node["Show_UndistortedImage"] >> showUndistorsed;
|
||||
node["Input"] >> input;
|
||||
node["Input_Delay"] >> delay;
|
||||
interprate();
|
||||
validate();
|
||||
}
|
||||
void interprate()
|
||||
void validate()
|
||||
{
|
||||
goodInput = true;
|
||||
if (boardSize.width <= 0 || boardSize.height <= 0)
|
||||
@ -105,10 +106,10 @@ public:
|
||||
else
|
||||
{
|
||||
if (readStringList(input, imageList))
|
||||
{
|
||||
inputType = IMAGE_LIST;
|
||||
nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
|
||||
}
|
||||
{
|
||||
inputType = IMAGE_LIST;
|
||||
nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
|
||||
}
|
||||
else
|
||||
inputType = VIDEO_FILE;
|
||||
}
|
||||
@ -121,7 +122,7 @@ public:
|
||||
}
|
||||
if (inputType == INVALID)
|
||||
{
|
||||
cerr << " Inexistent input: " << input;
|
||||
cerr << " Input does not exist: " << input;
|
||||
goodInput = false;
|
||||
}
|
||||
|
||||
@ -136,10 +137,10 @@ public:
|
||||
if (!patternToUse.compare("CIRCLES_GRID")) calibrationPattern = CIRCLES_GRID;
|
||||
if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) calibrationPattern = ASYMMETRIC_CIRCLES_GRID;
|
||||
if (calibrationPattern == NOT_EXISTING)
|
||||
{
|
||||
cerr << " Inexistent camera calibration mode: " << patternToUse << endl;
|
||||
goodInput = false;
|
||||
}
|
||||
{
|
||||
cerr << " Camera calibration mode does not exist: " << patternToUse << endl;
|
||||
goodInput = false;
|
||||
}
|
||||
atImageList = 0;
|
||||
|
||||
}
|
||||
@ -152,7 +153,7 @@ public:
|
||||
inputCapture >> view0;
|
||||
view0.copyTo(result);
|
||||
}
|
||||
else if( atImageList < (int)imageList.size() )
|
||||
else if( atImageList < imageList.size() )
|
||||
result = imread(imageList[atImageList++], IMREAD_COLOR);
|
||||
|
||||
return result;
|
||||
@ -173,26 +174,24 @@ public:
|
||||
return true;
|
||||
}
|
||||
public:
|
||||
Size boardSize; // The size of the board -> Number of items by width and height
|
||||
Pattern calibrationPattern;// One of the Chessboard, circles, or asymmetric circle pattern
|
||||
float squareSize; // The size of a square in your defined unit (point, millimeter,etc).
|
||||
int nrFrames; // The number of frames to use from the input for calibration
|
||||
float aspectRatio; // The aspect ratio
|
||||
int delay; // In case of a video input
|
||||
bool bwritePoints; // Write detected feature points
|
||||
bool bwriteExtrinsics; // Write extrinsic parameters
|
||||
bool calibZeroTangentDist; // Assume zero tangential distortion
|
||||
bool calibFixPrincipalPoint;// Fix the principal point at the center
|
||||
bool flipVertical; // Flip the captured images around the horizontal axis
|
||||
string outputFileName; // The name of the file where to write
|
||||
bool showUndistorsed; // Show undistorted images after calibration
|
||||
string input; // The input ->
|
||||
|
||||
|
||||
Size boardSize; // The size of the board -> Number of items by width and height
|
||||
Pattern calibrationPattern; // One of the Chessboard, circles, or asymmetric circle pattern
|
||||
float squareSize; // The size of a square in your defined unit (point, millimeter,etc).
|
||||
int nrFrames; // The number of frames to use from the input for calibration
|
||||
float aspectRatio; // The aspect ratio
|
||||
int delay; // In case of a video input
|
||||
bool writePoints; // Write detected feature points
|
||||
bool writeExtrinsics; // Write extrinsic parameters
|
||||
bool calibZeroTangentDist; // Assume zero tangential distortion
|
||||
bool calibFixPrincipalPoint; // Fix the principal point at the center
|
||||
bool flipVertical; // Flip the captured images around the horizontal axis
|
||||
string outputFileName; // The name of the file where to write
|
||||
bool showUndistorsed; // Show undistorted images after calibration
|
||||
string input; // The input ->
|
||||
|
||||
int cameraID;
|
||||
vector<string> imageList;
|
||||
int atImageList;
|
||||
size_t atImageList;
|
||||
VideoCapture inputCapture;
|
||||
InputType inputType;
|
||||
bool goodInput;
|
||||
@ -204,7 +203,7 @@ private:
|
||||
|
||||
};
|
||||
|
||||
static void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
|
||||
static inline void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
|
||||
{
|
||||
if(node.empty())
|
||||
x = default_value;
|
||||
@ -212,6 +211,11 @@ static void read(const FileNode& node, Settings& x, const Settings& default_valu
|
||||
x.read(node);
|
||||
}
|
||||
|
||||
static inline void write(FileStorage& fs, const String&, const Settings& s )
|
||||
{
|
||||
s.write(fs);
|
||||
}
|
||||
|
||||
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
|
||||
|
||||
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
|
||||
@ -220,6 +224,8 @@ bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat&
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
help();
|
||||
|
||||
//! [file_read]
|
||||
Settings s;
|
||||
const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml";
|
||||
FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
|
||||
@ -230,6 +236,10 @@ int main(int argc, char* argv[])
|
||||
}
|
||||
fs["Settings"] >> s;
|
||||
fs.release(); // close Settings file
|
||||
//! [file_read]
|
||||
|
||||
//FileStorage fout("settings.yml", FileStorage::WRITE); // write config as YAML
|
||||
//fout << "Settings" << s;
|
||||
|
||||
if (!s.goodInput)
|
||||
{
|
||||
@ -245,32 +255,35 @@ int main(int argc, char* argv[])
|
||||
const Scalar RED(0,0,255), GREEN(0,255,0);
|
||||
const char ESC_KEY = 27;
|
||||
|
||||
for(int i = 0;;++i)
|
||||
//! [get_input]
|
||||
for(;;)
|
||||
{
|
||||
Mat view;
|
||||
bool blinkOutput = false;
|
||||
Mat view;
|
||||
bool blinkOutput = false;
|
||||
|
||||
view = s.nextImage();
|
||||
view = s.nextImage();
|
||||
|
||||
//----- If no more image, or got enough, then stop calibration and show result -------------
|
||||
if( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames )
|
||||
{
|
||||
//----- If no more image, or got enough, then stop calibration and show result -------------
|
||||
if( mode == CAPTURING && imagePoints.size() >= (size_t)s.nrFrames )
|
||||
{
|
||||
if( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
|
||||
mode = CALIBRATED;
|
||||
else
|
||||
mode = DETECTION;
|
||||
}
|
||||
if(view.empty()) // If no more images then run calibration, save and stop loop.
|
||||
{
|
||||
if( imagePoints.size() > 0 )
|
||||
}
|
||||
if(view.empty()) // If there are no more images stop the loop
|
||||
{
|
||||
// if calibration threshold was not reached yet, calibrate now
|
||||
if( mode != CALIBRATED && !imagePoints.empty() )
|
||||
runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
//! [get_input]
|
||||
|
||||
imageSize = view.size(); // Format input image.
|
||||
if( s.flipVertical ) flip( view, view, 0 );
|
||||
|
||||
//! [find_pattern]
|
||||
vector<Point2f> pointBuf;
|
||||
|
||||
bool found;
|
||||
@ -290,7 +303,8 @@ int main(int argc, char* argv[])
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
|
||||
//! [find_pattern]
|
||||
//! [pattern_found]
|
||||
if ( found) // If done with success,
|
||||
{
|
||||
// improve the found corners' coordinate accuracy for chessboard
|
||||
@ -313,8 +327,9 @@ int main(int argc, char* argv[])
|
||||
// Draw the corners.
|
||||
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
|
||||
}
|
||||
|
||||
//! [pattern_found]
|
||||
//----------------------------- Output Text ------------------------------------------------
|
||||
//! [output_text]
|
||||
string msg = (mode == CAPTURING) ? "100/100" :
|
||||
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
|
||||
int baseLine = 0;
|
||||
@ -333,15 +348,17 @@ int main(int argc, char* argv[])
|
||||
|
||||
if( blinkOutput )
|
||||
bitwise_not(view, view);
|
||||
|
||||
//! [output_text]
|
||||
//------------------------- Video capture output undistorted ------------------------------
|
||||
//! [output_undistorted]
|
||||
if( mode == CALIBRATED && s.showUndistorsed )
|
||||
{
|
||||
Mat temp = view.clone();
|
||||
undistort(temp, view, cameraMatrix, distCoeffs);
|
||||
}
|
||||
|
||||
//! [output_undistorted]
|
||||
//------------------------------ Show image and check for input commands -------------------
|
||||
//! [await_input]
|
||||
imshow("Image View", view);
|
||||
char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
|
||||
|
||||
@ -356,9 +373,11 @@ int main(int argc, char* argv[])
|
||||
mode = CAPTURING;
|
||||
imagePoints.clear();
|
||||
}
|
||||
//! [await_input]
|
||||
}
|
||||
|
||||
// -----------------------Show the undistorted image for the image list ------------------------
|
||||
//! [show_results]
|
||||
if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
|
||||
{
|
||||
Mat view, rview, map1, map2;
|
||||
@ -366,7 +385,7 @@ int main(int argc, char* argv[])
|
||||
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
|
||||
imageSize, CV_16SC2, map1, map2);
|
||||
|
||||
for(int i = 0; i < (int)s.imageList.size(); i++ )
|
||||
for(size_t i = 0; i < s.imageList.size(); i++ )
|
||||
{
|
||||
view = imread(s.imageList[i], 1);
|
||||
if(view.empty())
|
||||
@ -378,11 +397,12 @@ int main(int argc, char* argv[])
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//! [show_results]
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//! [compute_errors]
|
||||
static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
|
||||
const vector<vector<Point2f> >& imagePoints,
|
||||
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
|
||||
@ -390,17 +410,16 @@ static double computeReprojectionErrors( const vector<vector<Point3f> >& objectP
|
||||
vector<float>& perViewErrors)
|
||||
{
|
||||
vector<Point2f> imagePoints2;
|
||||
int i, totalPoints = 0;
|
||||
size_t totalPoints = 0;
|
||||
double totalErr = 0, err;
|
||||
perViewErrors.resize(objectPoints.size());
|
||||
|
||||
for( i = 0; i < (int)objectPoints.size(); ++i )
|
||||
for(size_t i = 0; i < objectPoints.size(); ++i )
|
||||
{
|
||||
projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
|
||||
distCoeffs, imagePoints2);
|
||||
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), NORM_L2);
|
||||
projectPoints(objectPoints[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2);
|
||||
err = norm(imagePoints[i], imagePoints2, NORM_L2);
|
||||
|
||||
int n = (int)objectPoints[i].size();
|
||||
size_t n = objectPoints[i].size();
|
||||
perViewErrors[i] = (float) std::sqrt(err*err/n);
|
||||
totalErr += err*err;
|
||||
totalPoints += n;
|
||||
@ -408,7 +427,8 @@ static double computeReprojectionErrors( const vector<vector<Point3f> >& objectP
|
||||
|
||||
return std::sqrt(totalErr/totalPoints);
|
||||
}
|
||||
|
||||
//! [compute_errors]
|
||||
//! [board_corners]
|
||||
static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
|
||||
Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
|
||||
{
|
||||
@ -420,28 +440,28 @@ static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Po
|
||||
case Settings::CIRCLES_GRID:
|
||||
for( int i = 0; i < boardSize.height; ++i )
|
||||
for( int j = 0; j < boardSize.width; ++j )
|
||||
corners.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
|
||||
corners.push_back(Point3f(j*squareSize, i*squareSize, 0));
|
||||
break;
|
||||
|
||||
case Settings::ASYMMETRIC_CIRCLES_GRID:
|
||||
for( int i = 0; i < boardSize.height; i++ )
|
||||
for( int j = 0; j < boardSize.width; j++ )
|
||||
corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0));
|
||||
corners.push_back(Point3f((2*j + i % 2)*squareSize, i*squareSize, 0));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//! [board_corners]
|
||||
static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
|
||||
vector<vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs,
|
||||
vector<float>& reprojErrs, double& totalAvgErr)
|
||||
{
|
||||
|
||||
//! [fixed_aspect]
|
||||
cameraMatrix = Mat::eye(3, 3, CV_64F);
|
||||
if( s.flag & CALIB_FIX_ASPECT_RATIO )
|
||||
cameraMatrix.at<double>(0,0) = 1.0;
|
||||
|
||||
cameraMatrix.at<double>(0,0) = s.aspectRatio;
|
||||
//! [fixed_aspect]
|
||||
distCoeffs = Mat::zeros(8, 1, CV_64F);
|
||||
|
||||
vector<vector<Point3f> > objectPoints(1);
|
||||
@ -475,49 +495,48 @@ static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, M
|
||||
time( &tm );
|
||||
struct tm *t2 = localtime( &tm );
|
||||
char buf[1024];
|
||||
strftime( buf, sizeof(buf)-1, "%c", t2 );
|
||||
strftime( buf, sizeof(buf), "%c", t2 );
|
||||
|
||||
fs << "calibration_Time" << buf;
|
||||
fs << "calibration_time" << buf;
|
||||
|
||||
if( !rvecs.empty() || !reprojErrs.empty() )
|
||||
fs << "nrOfFrames" << (int)std::max(rvecs.size(), reprojErrs.size());
|
||||
fs << "image_Width" << imageSize.width;
|
||||
fs << "image_Height" << imageSize.height;
|
||||
fs << "board_Width" << s.boardSize.width;
|
||||
fs << "board_Height" << s.boardSize.height;
|
||||
fs << "square_Size" << s.squareSize;
|
||||
fs << "nr_of_frames" << (int)std::max(rvecs.size(), reprojErrs.size());
|
||||
fs << "image_width" << imageSize.width;
|
||||
fs << "image_height" << imageSize.height;
|
||||
fs << "board_width" << s.boardSize.width;
|
||||
fs << "board_height" << s.boardSize.height;
|
||||
fs << "square_size" << s.squareSize;
|
||||
|
||||
if( s.flag & CALIB_FIX_ASPECT_RATIO )
|
||||
fs << "FixAspectRatio" << s.aspectRatio;
|
||||
fs << "fix_aspect_ratio" << s.aspectRatio;
|
||||
|
||||
if( s.flag )
|
||||
if (s.flag)
|
||||
{
|
||||
sprintf( buf, "flags: %s%s%s%s",
|
||||
s.flag & CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
|
||||
s.flag & CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "",
|
||||
s.flag & CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
|
||||
s.flag & CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "" );
|
||||
//cvWriteComment( *fs, buf, 0 );
|
||||
|
||||
sprintf(buf, "flags: %s%s%s%s",
|
||||
s.flag & CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
|
||||
s.flag & CALIB_FIX_ASPECT_RATIO ? " +fix_aspect_ratio" : "",
|
||||
s.flag & CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
|
||||
s.flag & CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "");
|
||||
cvWriteComment(*fs, buf, 0);
|
||||
}
|
||||
|
||||
fs << "flagValue" << s.flag;
|
||||
fs << "flags" << s.flag;
|
||||
|
||||
fs << "Camera_Matrix" << cameraMatrix;
|
||||
fs << "Distortion_Coefficients" << distCoeffs;
|
||||
fs << "camera_matrix" << cameraMatrix;
|
||||
fs << "distortion_coefficients" << distCoeffs;
|
||||
|
||||
fs << "Avg_Reprojection_Error" << totalAvgErr;
|
||||
if( !reprojErrs.empty() )
|
||||
fs << "Per_View_Reprojection_Errors" << Mat(reprojErrs);
|
||||
fs << "avg_reprojection_error" << totalAvgErr;
|
||||
if (s.writeExtrinsics && !reprojErrs.empty())
|
||||
fs << "per_view_reprojection_errors" << Mat(reprojErrs);
|
||||
|
||||
if( !rvecs.empty() && !tvecs.empty() )
|
||||
if(s.writeExtrinsics && !rvecs.empty() && !tvecs.empty() )
|
||||
{
|
||||
CV_Assert(rvecs[0].type() == tvecs[0].type());
|
||||
Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
|
||||
for( int i = 0; i < (int)rvecs.size(); i++ )
|
||||
for( size_t i = 0; i < rvecs.size(); i++ )
|
||||
{
|
||||
Mat r = bigmat(Range(i, i+1), Range(0,3));
|
||||
Mat t = bigmat(Range(i, i+1), Range(3,6));
|
||||
Mat r = bigmat(Range(int(i), int(i+1)), Range(0,3));
|
||||
Mat t = bigmat(Range(int(i), int(i+1)), Range(3,6));
|
||||
|
||||
CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
|
||||
CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
|
||||
@ -526,35 +545,38 @@ static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, M
|
||||
t = tvecs[i].t();
|
||||
}
|
||||
//cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
|
||||
fs << "Extrinsic_Parameters" << bigmat;
|
||||
fs << "extrinsic_parameters" << bigmat;
|
||||
}
|
||||
|
||||
if( !imagePoints.empty() )
|
||||
if(s.writePoints && !imagePoints.empty() )
|
||||
{
|
||||
Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
|
||||
for( int i = 0; i < (int)imagePoints.size(); i++ )
|
||||
for( size_t i = 0; i < imagePoints.size(); i++ )
|
||||
{
|
||||
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
|
||||
Mat r = imagePtMat.row(int(i)).reshape(2, imagePtMat.cols);
|
||||
Mat imgpti(imagePoints[i]);
|
||||
imgpti.copyTo(r);
|
||||
}
|
||||
fs << "Image_points" << imagePtMat;
|
||||
fs << "image_points" << imagePtMat;
|
||||
}
|
||||
}
|
||||
|
||||
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
|
||||
//! [run_and_save]
|
||||
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
|
||||
vector<vector<Point2f> > imagePoints)
|
||||
{
|
||||
vector<Mat> rvecs, tvecs;
|
||||
vector<float> reprojErrs;
|
||||
double totalAvgErr = 0;
|
||||
|
||||
bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
|
||||
reprojErrs, totalAvgErr);
|
||||
bool ok = runCalibration(s, imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs, reprojErrs,
|
||||
totalAvgErr);
|
||||
cout << (ok ? "Calibration succeeded" : "Calibration failed")
|
||||
<< ". avg re projection error = " << totalAvgErr ;
|
||||
<< ". avg re projection error = " << totalAvgErr << endl;
|
||||
|
||||
if( ok )
|
||||
saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
|
||||
imagePoints, totalAvgErr);
|
||||
if (ok)
|
||||
saveCameraParams(s, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, reprojErrs, imagePoints,
|
||||
totalAvgErr);
|
||||
return ok;
|
||||
}
|
||||
//! [run_and_save]
|
||||
|
Loading…
Reference in New Issue
Block a user