calib3d module in opencv is split into 3 modules: 3d, calib and stereo.

stereo module in opencv_contrib is renamed to xstereo
This commit is contained in:
Vadim Pisarevsky 2020-12-01 23:42:15 +03:00
parent 9d2eabaaa2
commit d6c699c014
232 changed files with 7663 additions and 7606 deletions

View File

@ -1,4 +1,4 @@
file(GLOB SRCS *.cpp)
ocv_add_application(opencv_createsamples
MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_calib3d opencv_features2d opencv_videoio
MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_3d opencv_features2d opencv_videoio
SRCS ${SRCS})

View File

@ -52,7 +52,7 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/3d.hpp"
#if defined __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"

View File

@ -1,4 +1,4 @@
set(DEPS opencv_core opencv_imgproc opencv_features2d opencv_highgui opencv_calib3d opencv_videoio)
set(DEPS opencv_core opencv_imgproc opencv_features2d opencv_highgui opencv_3d opencv_calib opencv_videoio)
if(${BUILD_opencv_aruco})
list(APPEND DEPS opencv_aruco)
endif()

View File

@ -8,9 +8,12 @@
#include <cmath>
#include <ctime>
#include <opencv2/calib3d.hpp>
#include <opencv2/3d.hpp>
#include <opencv2/calib.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
double calib::calibController::estimateCoverageQuality()
{
int gridSize = 10;

View File

@ -5,7 +5,8 @@
#include "frameProcessor.hpp"
#include "rotationConverters.hpp"
#include <opencv2/calib3d.hpp>
#include <opencv2/3d.hpp>
#include <opencv2/calib.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

View File

@ -6,7 +6,7 @@
#define FRAME_PROCESSOR_HPP
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/calib.hpp>
#ifdef HAVE_OPENCV_ARUCO
#include <opencv2/aruco/charuco.hpp>
#endif

View File

@ -3,7 +3,8 @@
// of this distribution and at http://opencv.org/license.html.
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/3d.hpp>
#include <opencv2/calib.hpp>
#include <opencv2/cvconfig.h>
#include <opencv2/highgui.hpp>

View File

@ -6,12 +6,15 @@
#include <cmath>
#include <opencv2/calib3d.hpp>
#include <opencv2/3d.hpp>
#include <opencv2/calib.hpp>
#include <opencv2/core.hpp>
#define CALIB_PI 3.14159265358979323846
#define CALIB_PI_2 1.57079632679489661923
using namespace cv;
void calib::Euler(const cv::Mat& src, cv::Mat& dst, int argType)
{
if((src.rows == 3) && (src.cols == 3))

View File

@ -1,5 +1,5 @@
ocv_warnings_disable(CMAKE_CXX_FLAGS -Woverloaded-virtual -Winconsistent-missing-override -Wsuggest-override)
file(GLOB SRCS *.cpp)
ocv_add_application(opencv_traincascade
MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_calib3d opencv_features2d
MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_3d opencv_features2d
SRCS ${SRCS})

View File

@ -123,9 +123,9 @@ endif()
set(STD_OPENCV_LIBS opencv-data)
set(STD_OPENCV_DEV libopencv-dev)
foreach(module calib3d core dnn features2d flann gapi highgui
foreach(module 3d calib core dnn features2d flann gapi highgui
imgcodecs imgproc ml objdetect
photo stitching ts video videoio)
photo stereo stitching ts video videoio)
if(HAVE_opencv_${module})
list(APPEND STD_OPENCV_LIBS "libopencv-${module}4.0")
list(APPEND STD_OPENCV_DEV "libopencv-${module}-dev")

View File

@ -12,7 +12,12 @@ endif()
string(REGEX REPLACE "\\.cpp$" ".hpp" OUTPUT_HPP "${OUTPUT}")
get_filename_component(OUTPUT_HPP_NAME "${OUTPUT_HPP}" NAME)
set(nested_namespace_start "namespace ${MODULE_NAME}\n{")
if (MODULE_NAME MATCHES "^[0-9].+")
set(MANGLED_MODULE_NAME "_${MODULE_NAME}")
else()
set(MANGLED_MODULE_NAME "${MODULE_NAME}")
endif()
set(nested_namespace_start "namespace ${MANGLED_MODULE_NAME}\n{")
set(nested_namespace_end "}")
set(STR_CPP "// This file is auto-generated. Do not edit!

View File

@ -109,7 +109,12 @@ if(DOXYGEN_FOUND)
list(APPEND deps ${bib_file})
endif()
# Reference entry
set(one_ref "\t- ${m}. @ref ${m}\n")
if("${m}" MATCHES "^[0-9].+")
set(ref_m "_${m} \"${m}\"")
else()
set(ref_m "${m}")
endif()
set(one_ref "\t- ${m}. @ref ${ref_m}\n")
list(FIND OPENCV_MODULES_EXTRA ${m} _pos)
if(${_pos} EQUAL -1)
set(refs_main "${refs_main}${one_ref}")

View File

@ -5,7 +5,7 @@ Goal
----
In this section,
- We will learn to exploit calib3d module to create some 3D effects in images.
- We will learn to exploit 3d module to create some 3D effects in images.
Basics
------

View File

@ -51,4 +51,4 @@ Feature Detection and Description {#tutorial_py_table_of_contents_feature2d}
- @subpage tutorial_py_feature_homography
Now we know about feature matching. Let's mix it up with calib3d module to find objects in a complex image.
Now we know about feature matching. Let's mix it up with 3d module to find objects in a complex image.

View File

@ -55,7 +55,7 @@ plane:
\f[s\ \left [ \begin{matrix} u \\ v \\ 1 \end{matrix} \right ] = \left [ \begin{matrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1 \end{matrix} \right ] \left [ \begin{matrix} r_{11} & r_{12} & r_{13} & t_1 \\ r_{21} & r_{22} & r_{23} & t_2 \\ r_{31} & r_{32} & r_{33} & t_3 \end{matrix} \right ] \left [ \begin{matrix} X \\ Y \\ Z\\ 1 \end{matrix} \right ]\f]
The complete documentation of how to manage with this equations is in @ref calib3d .
The complete documentation of how to manage with this equations is in @ref _3d "3d".
Source code
-----------

View File

@ -10,7 +10,7 @@ title: 'Google Summer of Code: Improvement of Random Sample Consensus in OpenCV'
Contribution
============
The integrated part to OpenCV `calib3d` module is RANSAC-based universal
The integrated part to OpenCV `3d` module is RANSAC-based universal
framework USAC (`namespace usac`) written in C++. The framework includes
different state-of-the-arts methods for sampling, verification or local
optimization. The main advantage of the framework is its independence to

View File

@ -138,7 +138,7 @@ cmake \
Each module is a subdirectory of the `modules` directory. It is possible to disable one module:
```.sh
cmake -DBUILD_opencv_calib3d=OFF ../opencv
cmake -DBUILD_opencv_3d=OFF ../opencv
```
The opposite option is to build only specified modules and all modules they depend on:

View File

@ -112,7 +112,7 @@ Making a project
since I plan to use the whole bunch:
opencv_core opencv_imgproc opencv_imgcodecs opencv_highgui opencv_ml opencv_videoio opencv_video opencv_features2d
opencv_calib3d opencv_objdetect opencv_flann
opencv_3d opencv_objdetect opencv_flann
![](images/a10.png)

View File

@ -52,8 +52,11 @@
#include "opencv2/core.hpp"
// Then the optional modules are checked
#ifdef HAVE_OPENCV_CALIB3D
#include "opencv2/calib3d.hpp"
#ifdef HAVE_OPENCV_3D
#include "opencv2/3d.hpp"
#endif
#ifdef HAVE_OPENCV_CALIB
#include "opencv2/calib.hpp"
#endif
#ifdef HAVE_OPENCV_FEATURES2D
#include "opencv2/features2d.hpp"
@ -82,6 +85,9 @@
#ifdef HAVE_OPENCV_PHOTO
#include "opencv2/photo.hpp"
#endif
#ifdef HAVE_OPENCV_STEREO
#include "opencv2/stereo.hpp"
#endif
#ifdef HAVE_OPENCV_STITCHING
#include "opencv2/stitching.hpp"
#endif

View File

@ -3,10 +3,10 @@ set(the_description "Camera Calibration and 3D Reconstruction")
ocv_add_dispatched_file(undistort SSE2 AVX2)
set(debug_modules "")
if(DEBUG_opencv_calib3d)
if(DEBUG_opencv_3d)
list(APPEND debug_modules opencv_highgui)
endif()
ocv_define_module(calib3d opencv_imgproc opencv_features2d opencv_flann ${debug_modules}
ocv_define_module(3d opencv_imgproc opencv_features2d opencv_flann ${debug_modules}
WRAP java objc python js
)
ocv_target_link_libraries(${the_module} ${LAPACK_LIBRARIES})

View File

@ -1,14 +1,7 @@
{
"class_ignore_list": [
"CirclesGridFinderParameters"
],
"namespaces_dict": {
"cv.fisheye": "fisheye"
},
"func_arg_fix" : {
"findFundamentalMat" : { "points1" : {"ctype" : "vector_Point2f"},
"points2" : {"ctype" : "vector_Point2f"} },
"cornerSubPix" : { "corners" : {"ctype" : "vector_Point2f"} },
"findHomography" : { "srcPoints" : {"ctype" : "vector_Point2f"},
"dstPoints" : {"ctype" : "vector_Point2f"} },
"solvePnP" : { "objectPoints" : {"ctype" : "vector_Point3f"},
@ -21,10 +14,6 @@
"dst" : {"ctype" : "vector_Point2f"} },
"projectPoints" : { "objectPoints" : {"ctype" : "vector_Point3f"},
"imagePoints" : {"ctype" : "vector_Point2f"},
"distCoeffs" : {"ctype" : "vector_double" } },
"initCameraMatrix2D" : { "objectPoints" : {"ctype" : "vector_vector_Point3f"},
"imagePoints" : {"ctype" : "vector_vector_Point2f"} },
"findChessboardCorners" : { "corners" : {"ctype" : "vector_Point2f"} },
"drawChessboardCorners" : { "corners" : {"ctype" : "vector_Point2f"} }
"distCoeffs" : {"ctype" : "vector_double" } }
}
}

View File

@ -1,8 +1,8 @@
package org.opencv.test.calib3d;
package org.opencv.test.cv3d;
import java.util.ArrayList;
import org.opencv.calib3d.Calib3d;
import org.opencv.cv3d.Cv3d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
@ -15,7 +15,7 @@ import org.opencv.core.Size;
import org.opencv.test.OpenCVTestCase;
import org.opencv.imgproc.Imgproc;
public class Calib3dTest extends OpenCVTestCase {
public class Cv3dTest extends OpenCVTestCase {
Size size;
@ -26,18 +26,6 @@ public class Calib3dTest extends OpenCVTestCase {
size = new Size(3, 3);
}
public void testCalibrateCameraListOfMatListOfMatSizeMatMatListOfMatListOfMat() {
fail("Not yet implemented");
}
public void testCalibrateCameraListOfMatListOfMatSizeMatMatListOfMatListOfMatInt() {
fail("Not yet implemented");
}
public void testCalibrationMatrixValues() {
fail("Not yet implemented");
}
public void testComposeRTMatMatMatMatMatMat() {
Mat rvec1 = new Mat(3, 1, CvType.CV_32F);
rvec1.put(0, 0, 0.5302828, 0.19925919, 0.40105945);
@ -56,7 +44,7 @@ public class Calib3dTest extends OpenCVTestCase {
Mat outTvec = new Mat(3, 1, CvType.CV_32F);
outTvec.put(0, 0, 1.4560841, 1.0680628, 0.81598103);
Calib3d.composeRT(rvec1, tvec1, rvec2, tvec2, rvec3, tvec3);
Cv3d.composeRT(rvec1, tvec1, rvec2, tvec2, rvec3, tvec3);
assertMatEqual(outRvec, rvec3, EPS);
assertMatEqual(outTvec, tvec3, EPS);
@ -155,10 +143,6 @@ public class Calib3dTest extends OpenCVTestCase {
fail("Not yet implemented");
}
public void testDrawChessboardCorners() {
fail("Not yet implemented");
}
public void testEstimateAffine3DMatMatMatMat() {
fail("Not yet implemented");
}
@ -171,113 +155,6 @@ public class Calib3dTest extends OpenCVTestCase {
fail("Not yet implemented");
}
public void testFilterSpecklesMatDoubleIntDouble() {
gray_16s_1024.copyTo(dst);
Point center = new Point(gray_16s_1024.rows() / 2., gray_16s_1024.cols() / 2.);
Imgproc.circle(dst, center, 1, Scalar.all(4096));
assertMatNotEqual(gray_16s_1024, dst);
Calib3d.filterSpeckles(dst, 1024.0, 100, 0.);
assertMatEqual(gray_16s_1024, dst);
}
public void testFilterSpecklesMatDoubleIntDoubleMat() {
fail("Not yet implemented");
}
public void testFindChessboardCornersMatSizeMat() {
Size patternSize = new Size(9, 6);
MatOfPoint2f corners = new MatOfPoint2f();
Calib3d.findChessboardCorners(grayChess, patternSize, corners);
assertFalse(corners.empty());
}
public void testFindChessboardCornersMatSizeMatInt() {
Size patternSize = new Size(9, 6);
MatOfPoint2f corners = new MatOfPoint2f();
Calib3d.findChessboardCorners(grayChess, patternSize, corners, Calib3d.CALIB_CB_ADAPTIVE_THRESH + Calib3d.CALIB_CB_NORMALIZE_IMAGE
+ Calib3d.CALIB_CB_FAST_CHECK);
assertFalse(corners.empty());
}
public void testFind4QuadCornerSubpix() {
Size patternSize = new Size(9, 6);
MatOfPoint2f corners = new MatOfPoint2f();
Size region_size = new Size(5, 5);
Calib3d.findChessboardCorners(grayChess, patternSize, corners);
Calib3d.find4QuadCornerSubpix(grayChess, corners, region_size);
assertFalse(corners.empty());
}
public void testFindCirclesGridMatSizeMat() {
int size = 300;
Mat img = new Mat(size, size, CvType.CV_8U);
img.setTo(new Scalar(255));
Mat centers = new Mat();
assertFalse(Calib3d.findCirclesGrid(img, new Size(5, 5), centers));
for (int i = 0; i < 5; i++)
for (int j = 0; j < 5; j++) {
Point pt = new Point(size * (2 * i + 1) / 10, size * (2 * j + 1) / 10);
Imgproc.circle(img, pt, 10, new Scalar(0), -1);
}
assertTrue(Calib3d.findCirclesGrid(img, new Size(5, 5), centers));
assertEquals(25, centers.rows());
assertEquals(1, centers.cols());
assertEquals(CvType.CV_32FC2, centers.type());
}
public void testFindCirclesGridMatSizeMatInt() {
int size = 300;
Mat img = new Mat(size, size, CvType.CV_8U);
img.setTo(new Scalar(255));
Mat centers = new Mat();
assertFalse(Calib3d.findCirclesGrid(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
int step = size * 2 / 15;
int offsetx = size / 6;
int offsety = (size - 4 * step) / 2;
for (int i = 0; i < 3; i++)
for (int j = 0; j < 5; j++) {
Point pt = new Point(offsetx + (2 * i + j % 2) * step, offsety + step * j);
Imgproc.circle(img, pt, 10, new Scalar(0), -1);
}
assertTrue(Calib3d.findCirclesGrid(img, new Size(3, 5), centers, Calib3d.CALIB_CB_CLUSTERING
| Calib3d.CALIB_CB_ASYMMETRIC_GRID));
assertEquals(15, centers.rows());
assertEquals(1, centers.cols());
assertEquals(CvType.CV_32FC2, centers.type());
}
public void testFindFundamentalMatListOfPointListOfPoint() {
fail("Not yet implemented");
/*
int minFundamentalMatPoints = 8;
MatOfPoint2f pts = new MatOfPoint2f();
pts.alloc(minFundamentalMatPoints);
for (int i = 0; i < minFundamentalMatPoints; i++) {
double x = Math.random() * 100 - 50;
double y = Math.random() * 100 - 50;
pts.put(i, 0, x, y); //add(new Point(x, y));
}
Mat fm = Calib3d.findFundamentalMat(pts, pts);
truth = new Mat(3, 3, CvType.CV_64F);
truth.put(0, 0, 0, -0.577, 0.288, 0.577, 0, 0.288, -0.288, -0.288, 0);
assertMatEqual(truth, fm, EPS);
*/
}
public void testFindFundamentalMatListOfPointListOfPointInt() {
fail("Not yet implemented");
}
@ -309,7 +186,7 @@ public class Calib3dTest extends OpenCVTestCase {
transformedPoints.put(i, 0, y, x);
}
Mat hmg = Calib3d.findHomography(originalPoints, transformedPoints);
Mat hmg = Cv3d.findHomography(originalPoints, transformedPoints);
truth = new Mat(3, 3, CvType.CV_64F);
truth.put(0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1);
@ -349,14 +226,6 @@ public class Calib3dTest extends OpenCVTestCase {
fail("Not yet implemented");
}
public void testInitCameraMatrix2DListOfMatListOfMatSize() {
fail("Not yet implemented");
}
public void testInitCameraMatrix2DListOfMatListOfMatSizeDouble() {
fail("Not yet implemented");
}
public void testMatMulDeriv() {
fail("Not yet implemented");
}
@ -377,124 +246,20 @@ public class Calib3dTest extends OpenCVTestCase {
fail("Not yet implemented");
}
public void testReprojectImageTo3DMatMatMat() {
Mat transformMatrix = new Mat(4, 4, CvType.CV_64F);
transformMatrix.put(0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1);
Mat disparity = new Mat(matSize, matSize, CvType.CV_32F);
float[] disp = new float[matSize * matSize];
for (int i = 0; i < matSize; i++)
for (int j = 0; j < matSize; j++)
disp[i * matSize + j] = i - j;
disparity.put(0, 0, disp);
Mat _3dPoints = new Mat();
Calib3d.reprojectImageTo3D(disparity, _3dPoints, transformMatrix);
assertEquals(CvType.CV_32FC3, _3dPoints.type());
assertEquals(matSize, _3dPoints.rows());
assertEquals(matSize, _3dPoints.cols());
truth = new Mat(matSize, matSize, CvType.CV_32FC3);
float[] _truth = new float[matSize * matSize * 3];
for (int i = 0; i < matSize; i++)
for (int j = 0; j < matSize; j++) {
_truth[(i * matSize + j) * 3 + 0] = i;
_truth[(i * matSize + j) * 3 + 1] = j;
_truth[(i * matSize + j) * 3 + 2] = i - j;
}
truth.put(0, 0, _truth);
assertMatEqual(truth, _3dPoints, EPS);
}
public void testReprojectImageTo3DMatMatMatBoolean() {
Mat transformMatrix = new Mat(4, 4, CvType.CV_64F);
transformMatrix.put(0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1);
Mat disparity = new Mat(matSize, matSize, CvType.CV_32F);
float[] disp = new float[matSize * matSize];
for (int i = 0; i < matSize; i++)
for (int j = 0; j < matSize; j++)
disp[i * matSize + j] = i - j;
disp[0] = -Float.MAX_VALUE;
disparity.put(0, 0, disp);
Mat _3dPoints = new Mat();
Calib3d.reprojectImageTo3D(disparity, _3dPoints, transformMatrix, true);
assertEquals(CvType.CV_32FC3, _3dPoints.type());
assertEquals(matSize, _3dPoints.rows());
assertEquals(matSize, _3dPoints.cols());
truth = new Mat(matSize, matSize, CvType.CV_32FC3);
float[] _truth = new float[matSize * matSize * 3];
for (int i = 0; i < matSize; i++)
for (int j = 0; j < matSize; j++) {
_truth[(i * matSize + j) * 3 + 0] = i;
_truth[(i * matSize + j) * 3 + 1] = j;
_truth[(i * matSize + j) * 3 + 2] = i - j;
}
_truth[2] = 10000;
truth.put(0, 0, _truth);
assertMatEqual(truth, _3dPoints, EPS);
}
public void testReprojectImageTo3DMatMatMatBooleanInt() {
Mat transformMatrix = new Mat(4, 4, CvType.CV_64F);
transformMatrix.put(0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1);
Mat disparity = new Mat(matSize, matSize, CvType.CV_32F);
float[] disp = new float[matSize * matSize];
for (int i = 0; i < matSize; i++)
for (int j = 0; j < matSize; j++)
disp[i * matSize + j] = i - j;
disparity.put(0, 0, disp);
Mat _3dPoints = new Mat();
Calib3d.reprojectImageTo3D(disparity, _3dPoints, transformMatrix, false, CvType.CV_16S);
assertEquals(CvType.CV_16SC3, _3dPoints.type());
assertEquals(matSize, _3dPoints.rows());
assertEquals(matSize, _3dPoints.cols());
truth = new Mat(matSize, matSize, CvType.CV_16SC3);
short[] _truth = new short[matSize * matSize * 3];
for (short i = 0; i < matSize; i++)
for (short j = 0; j < matSize; j++) {
_truth[(i * matSize + j) * 3 + 0] = i;
_truth[(i * matSize + j) * 3 + 1] = j;
_truth[(i * matSize + j) * 3 + 2] = (short) (i - j);
}
truth.put(0, 0, _truth);
assertMatEqual(truth, _3dPoints, EPS);
}
public void testRodriguesMatMat() {
Mat r = new Mat(3, 1, CvType.CV_32F);
Mat R = new Mat(3, 3, CvType.CV_32F);
r.put(0, 0, Math.PI, 0, 0);
Calib3d.Rodrigues(r, R);
Cv3d.Rodrigues(r, R);
truth = new Mat(3, 3, CvType.CV_32F);
truth.put(0, 0, 1, 0, 0, 0, -1, 0, 0, 0, -1);
assertMatEqual(truth, R, EPS);
Mat r2 = new Mat();
Calib3d.Rodrigues(R, r2);
Cv3d.Rodrigues(R, r2);
assertMatEqual(r, r2, EPS);
}
@ -542,7 +307,7 @@ public class Calib3dTest extends OpenCVTestCase {
Mat rvec = new Mat();
Mat tvec = new Mat();
Calib3d.solvePnP(points3d, points2d, intrinsics, new MatOfDouble(), rvec, tvec);
Cv3d.solvePnP(points3d, points2d, intrinsics, new MatOfDouble(), rvec, tvec);
Mat truth_rvec = new Mat(3, 1, CvType.CV_64F);
truth_rvec.put(0, 0, 0, Math.PI / 2, 0);
@ -620,38 +385,10 @@ public class Calib3dTest extends OpenCVTestCase {
Mat lines = new Mat();
Mat truth = new Mat(1, 1, CvType.CV_32FC3);
truth.put(0, 0, -0.70735186, 0.70686162, -0.70588124);
Calib3d.computeCorrespondEpilines(left, 1, fundamental, lines);
Cv3d.computeCorrespondEpilines(left, 1, fundamental, lines);
assertMatEqual(truth, lines, EPS);
}
public void testConstants()
{
// calib3d.hpp: some constants have conflict with constants from 'fisheye' namespace
assertEquals(1, Calib3d.CALIB_USE_INTRINSIC_GUESS);
assertEquals(2, Calib3d.CALIB_FIX_ASPECT_RATIO);
assertEquals(4, Calib3d.CALIB_FIX_PRINCIPAL_POINT);
assertEquals(8, Calib3d.CALIB_ZERO_TANGENT_DIST);
assertEquals(16, Calib3d.CALIB_FIX_FOCAL_LENGTH);
assertEquals(32, Calib3d.CALIB_FIX_K1);
assertEquals(64, Calib3d.CALIB_FIX_K2);
assertEquals(128, Calib3d.CALIB_FIX_K3);
assertEquals(0x0800, Calib3d.CALIB_FIX_K4);
assertEquals(0x1000, Calib3d.CALIB_FIX_K5);
assertEquals(0x2000, Calib3d.CALIB_FIX_K6);
assertEquals(0x4000, Calib3d.CALIB_RATIONAL_MODEL);
assertEquals(0x8000, Calib3d.CALIB_THIN_PRISM_MODEL);
assertEquals(0x10000, Calib3d.CALIB_FIX_S1_S2_S3_S4);
assertEquals(0x40000, Calib3d.CALIB_TILTED_MODEL);
assertEquals(0x80000, Calib3d.CALIB_FIX_TAUX_TAUY);
assertEquals(0x100000, Calib3d.CALIB_USE_QR);
assertEquals(0x200000, Calib3d.CALIB_FIX_TANGENT_DIST);
assertEquals(0x100, Calib3d.CALIB_FIX_INTRINSIC);
assertEquals(0x200, Calib3d.CALIB_SAME_FOCAL_LENGTH);
assertEquals(0x400, Calib3d.CALIB_ZERO_DISPARITY);
assertEquals((1 << 17), Calib3d.CALIB_USE_LU);
assertEquals((1 << 22), Calib3d.CALIB_USE_EXTRINSIC_GUESS);
}
public void testSolvePnPGeneric_regression_16040() {
Mat intrinsics = Mat.eye(3, 3, CvType.CV_64F);
intrinsics.put(0, 0, 400);
@ -681,7 +418,7 @@ public class Calib3dTest extends OpenCVTestCase {
Mat reprojectionError = new Mat(2, 1, CvType.CV_64FC1);
Calib3d.solvePnPGeneric(points3d, points2d, intrinsics, new MatOfDouble(), rvecs, tvecs, false, Calib3d.SOLVEPNP_IPPE, rvec, tvec, reprojectionError);
Cv3d.solvePnPGeneric(points3d, points2d, intrinsics, new MatOfDouble(), rvecs, tvecs, false, Cv3d.SOLVEPNP_IPPE, rvec, tvec, reprojectionError);
Mat truth_rvec = new Mat(3, 1, CvType.CV_64F);
truth_rvec.put(0, 0, 0, Math.PI / 2, 0);
@ -694,14 +431,14 @@ public class Calib3dTest extends OpenCVTestCase {
}
public void testGetDefaultNewCameraMatrixMat() {
Mat mtx = Calib3d.getDefaultNewCameraMatrix(gray0);
Mat mtx = Cv3d.getDefaultNewCameraMatrix(gray0);
assertFalse(mtx.empty());
assertEquals(0, Core.countNonZero(mtx));
}
public void testGetDefaultNewCameraMatrixMatSizeBoolean() {
Mat mtx = Calib3d.getDefaultNewCameraMatrix(gray0, size, true);
Mat mtx = Cv3d.getDefaultNewCameraMatrix(gray0, size, true);
assertFalse(mtx.empty());
assertFalse(0 == Core.countNonZero(mtx));
@ -723,7 +460,7 @@ public class Calib3dTest extends OpenCVTestCase {
Mat map2 = new Mat();
// TODO: complete this test
Calib3d.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, CvType.CV_32F, map1, map2);
Cv3d.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, CvType.CV_32F, map1, map2);
}
public void testInitWideAngleProjMapMatMatSizeIntIntMatMat() {
@ -742,7 +479,7 @@ public class Calib3dTest extends OpenCVTestCase {
truth.put(1, 0, 0, 0, 0);
truth.put(2, 0, 0, 3, 0);
// TODO: No documentation for this function
// Calib3d.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize,
// Cv3d.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize,
// 5, m1type, truthput1, truthput2);
}
@ -769,7 +506,7 @@ public class Calib3dTest extends OpenCVTestCase {
}
};
Calib3d.undistort(src, dst, cameraMatrix, distCoeffs);
Cv3d.undistort(src, dst, cameraMatrix, distCoeffs);
truth = new Mat(3, 3, CvType.CV_32F) {
{
@ -797,7 +534,7 @@ public class Calib3dTest extends OpenCVTestCase {
};
Mat newCameraMatrix = new Mat(3, 3, CvType.CV_32F, new Scalar(1));
Calib3d.undistort(src, dst, cameraMatrix, distCoeffs, newCameraMatrix);
Cv3d.undistort(src, dst, cameraMatrix, distCoeffs, newCameraMatrix);
truth = new Mat(3, 3, CvType.CV_32F, new Scalar(3));
assertMatEqual(truth, dst, EPS);
@ -810,7 +547,7 @@ public class Calib3dTest extends OpenCVTestCase {
Mat cameraMatrix = Mat.eye(3, 3, CvType.CV_64FC1);
Mat distCoeffs = new Mat(8, 1, CvType.CV_64FC1, new Scalar(0));
Calib3d.undistortPoints(src, dst, cameraMatrix, distCoeffs);
Cv3d.undistortPoints(src, dst, cameraMatrix, distCoeffs);
assertEquals(src.size(), dst.size());
for(int i=0; i<src.toList().size(); i++) {
@ -818,32 +555,4 @@ public class Calib3dTest extends OpenCVTestCase {
assertTrue(src.toList().get(i).equals(dst.toList().get(i)));
}
}
public void testEstimateNewCameraMatrixForUndistortRectify() {
Mat K = new Mat().eye(3, 3, CvType.CV_64FC1);
Mat K_new = new Mat().eye(3, 3, CvType.CV_64FC1);
Mat K_new_truth = new Mat().eye(3, 3, CvType.CV_64FC1);
Mat D = new Mat().zeros(4, 1, CvType.CV_64FC1);
K.put(0,0,600.4447738238429);
K.put(1,1,578.9929805505851);
K.put(0,2,992.0642578801213);
K.put(1,2,549.2682624212172);
D.put(0,0,-0.05090103223466704);
D.put(1,0,0.030944413642173308);
D.put(2,0,-0.021509225493198905);
D.put(3,0,0.0043378096628297145);
K_new_truth.put(0,0, 387.4809086880343);
K_new_truth.put(0,2, 1036.669802754649);
K_new_truth.put(1,1, 373.6375700303157);
K_new_truth.put(1,2, 538.8373261247601);
Calib3d.fisheye_estimateNewCameraMatrixForUndistortRectify(K,D,new Size(1920,1080),
new Mat().eye(3, 3, CvType.CV_64F), K_new, 0.0, new Size(1920,1080));
assertMatEqual(K_new, K_new_truth, EPS);
}
}

View File

@ -0,0 +1,240 @@
//
// Calib3dTest.swift
//
// Created by Giles Payne on 2020/05/26.
//
import XCTest
import OpenCV
class Cv3dTest: OpenCVTestCase {
var size = Size()
override func setUp() {
super.setUp()
size = Size(width: 3, height: 3)
}
override func tearDown() {
super.tearDown()
}
func testComposeRTMatMatMatMatMatMat() throws {
let rvec1 = Mat(rows: 3, cols: 1, type: CvType.CV_32F)
try rvec1.put(row: 0, col: 0, data: [0.5302828, 0.19925919, 0.40105945] as [Float])
let tvec1 = Mat(rows: 3, cols: 1, type: CvType.CV_32F)
try tvec1.put(row: 0, col: 0, data: [0.81438506, 0.43713298, 0.2487897] as [Float])
let rvec2 = Mat(rows: 3, cols: 1, type: CvType.CV_32F)
try rvec2.put(row: 0, col: 0, data: [0.77310503, 0.76209372, 0.30779448] as [Float])
let tvec2 = Mat(rows: 3, cols: 1, type: CvType.CV_32F)
try tvec2.put(row: 0, col: 0, data: [0.70243168, 0.4784472, 0.79219002] as [Float])
let rvec3 = Mat()
let tvec3 = Mat()
let outRvec = Mat(rows: 3, cols: 1, type: CvType.CV_32F)
try outRvec.put(row: 0, col: 0, data: [1.418641, 0.88665926, 0.56020796])
let outTvec = Mat(rows: 3, cols: 1, type: CvType.CV_32F)
try outTvec.put(row: 0, col: 0, data: [1.4560841, 1.0680628, 0.81598103])
Cv3d.composeRT(rvec1: rvec1, tvec1: tvec1, rvec2: rvec2, tvec2: tvec2, rvec3: rvec3, tvec3: tvec3)
try assertMatEqual(outRvec, rvec3, OpenCVTestCase.EPS)
try assertMatEqual(outTvec, tvec3, OpenCVTestCase.EPS)
}
func testFindHomographyListOfPointListOfPoint() throws {
let NUM:Int32 = 20
let originalPoints = MatOfPoint2f()
originalPoints.alloc(NUM)
let transformedPoints = MatOfPoint2f()
transformedPoints.alloc(NUM)
for i:Int32 in 0..<NUM {
let x:Float = Float.random(in: -50...50)
let y:Float = Float.random(in: -50...50)
try originalPoints.put(row:i, col:0, data:[x, y])
try transformedPoints.put(row:i, col:0, data:[y, x])
}
let hmg = Cv3d.findHomography(srcPoints: originalPoints, dstPoints: transformedPoints)
truth = Mat(rows: 3, cols: 3, type: CvType.CV_64F)
try truth!.put(row:0, col:0, data:[0, 1, 0, 1, 0, 0, 0, 0, 1] as [Double])
try assertMatEqual(truth!, hmg, OpenCVTestCase.EPS)
}
func testRodriguesMatMat() throws {
let r = Mat(rows: 3, cols: 1, type: CvType.CV_32F)
let R = Mat(rows: 3, cols: 3, type: CvType.CV_32F)
try r.put(row:0, col:0, data:[.pi, 0, 0] as [Float])
Cv3d.Rodrigues(src: r, dst: R)
truth = Mat(rows: 3, cols: 3, type: CvType.CV_32F)
try truth!.put(row:0, col:0, data:[1, 0, 0, 0, -1, 0, 0, 0, -1] as [Float])
try assertMatEqual(truth!, R, OpenCVTestCase.EPS)
let r2 = Mat()
Cv3d.Rodrigues(src: R, dst: r2)
try assertMatEqual(r, r2, OpenCVTestCase.EPS)
}
func testSolvePnPListOfPoint3ListOfPointMatMatMatMat() throws {
let intrinsics = Mat.eye(rows: 3, cols: 3, type: CvType.CV_64F)
try intrinsics.put(row: 0, col: 0, data: [400] as [Double])
try intrinsics.put(row: 1, col: 1, data: [400] as [Double])
try intrinsics.put(row: 0, col: 2, data: [640 / 2] as [Double])
try intrinsics.put(row: 1, col: 2, data: [480 / 2] as [Double])
let minPnpPointsNum: Int32 = 4
let points3d = MatOfPoint3f()
points3d.alloc(minPnpPointsNum)
let points2d = MatOfPoint2f()
points2d.alloc(minPnpPointsNum)
for i in 0..<minPnpPointsNum {
let x = Float.random(in: -50...50)
let y = Float.random(in: -50...50)
try points2d.put(row: i, col: 0, data: [x, y]) //add(Point(x, y))
try points3d.put(row: i, col: 0, data: [0, y, x]) // add(Point3(0, y, x))
}
let rvec = Mat()
let tvec = Mat()
Cv3d.solvePnP(objectPoints: points3d, imagePoints: points2d, cameraMatrix: intrinsics, distCoeffs: MatOfDouble(), rvec: rvec, tvec: tvec)
let truth_rvec = Mat(rows: 3, cols: 1, type: CvType.CV_64F)
try truth_rvec.put(row: 0, col: 0, data: [0, .pi / 2, 0] as [Double])
let truth_tvec = Mat(rows: 3, cols: 1, type: CvType.CV_64F)
try truth_tvec.put(row: 0, col: 0, data: [-320, -240, 400] as [Double])
try assertMatEqual(truth_rvec, rvec, OpenCVTestCase.EPS)
try assertMatEqual(truth_tvec, tvec, OpenCVTestCase.EPS)
}
func testComputeCorrespondEpilines() throws {
let fundamental = Mat(rows: 3, cols: 3, type: CvType.CV_64F)
try fundamental.put(row: 0, col: 0, data: [0, -0.577, 0.288, 0.577, 0, 0.288, -0.288, -0.288, 0])
let left = MatOfPoint2f()
left.alloc(1)
try left.put(row: 0, col: 0, data: [2, 3] as [Float]) //add(Point(x, y))
let lines = Mat()
let truth = Mat(rows: 1, cols: 1, type: CvType.CV_32FC3)
try truth.put(row: 0, col: 0, data: [-0.70735186, 0.70686162, -0.70588124])
Cv3d.computeCorrespondEpilines(points: left, whichImage: 1, F: fundamental, lines: lines)
try assertMatEqual(truth, lines, OpenCVTestCase.EPS)
}
func testSolvePnPGeneric_regression_16040() throws {
let intrinsics = Mat.eye(rows: 3, cols: 3, type: CvType.CV_64F)
try intrinsics.put(row: 0, col: 0, data: [400] as [Double])
try intrinsics.put(row: 1, col: 1, data: [400] as [Double])
try intrinsics.put(row: 0, col: 2, data: [640 / 2] as [Double])
try intrinsics.put(row: 1, col: 2, data: [480 / 2] as [Double])
let minPnpPointsNum: Int32 = 4
let points3d = MatOfPoint3f()
points3d.alloc(minPnpPointsNum)
let points2d = MatOfPoint2f()
points2d.alloc(minPnpPointsNum)
for i in 0..<minPnpPointsNum {
let x = Float.random(in: -50...50)
let y = Float.random(in: -50...50)
try points2d.put(row: i, col: 0, data: [x, y]) //add(Point(x, y))
try points3d.put(row: i, col: 0, data: [0, y, x]) // add(Point3(0, y, x))
}
var rvecs = [Mat]()
var tvecs = [Mat]()
let rvec = Mat()
let tvec = Mat()
let reprojectionError = Mat(rows: 2, cols: 1, type: CvType.CV_64FC1)
Cv3d.solvePnPGeneric(objectPoints: points3d, imagePoints: points2d, cameraMatrix: intrinsics, distCoeffs: MatOfDouble(), rvecs: &rvecs, tvecs: &tvecs, useExtrinsicGuess: false, flags: .SOLVEPNP_IPPE, rvec: rvec, tvec: tvec, reprojectionError: reprojectionError)
let truth_rvec = Mat(rows: 3, cols: 1, type: CvType.CV_64F)
try truth_rvec.put(row: 0, col: 0, data: [0, .pi / 2, 0] as [Double])
let truth_tvec = Mat(rows: 3, cols: 1, type: CvType.CV_64F)
try truth_tvec.put(row: 0, col: 0, data: [-320, -240, 400] as [Double])
try assertMatEqual(truth_rvec, rvecs[0], 10 * OpenCVTestCase.EPS)
try assertMatEqual(truth_tvec, tvecs[0], 1000 * OpenCVTestCase.EPS)
}
func testGetDefaultNewCameraMatrixMat() {
let mtx = Cv3d.getDefaultNewCameraMatrix(cameraMatrix: gray0)
XCTAssertFalse(mtx.empty())
XCTAssertEqual(0, Core.countNonZero(src: mtx))
}
func testGetDefaultNewCameraMatrixMatSizeBoolean() {
let mtx = Cv3d.getDefaultNewCameraMatrix(cameraMatrix: gray0, imgsize: size, centerPrincipalPoint: true)
XCTAssertFalse(mtx.empty())
XCTAssertFalse(0 == Core.countNonZero(src: mtx))
// TODO_: write better test
}
func testUndistortMatMatMatMat() throws {
let src = Mat(rows: 3, cols: 3, type: CvType.CV_32F, scalar: Scalar(3))
let cameraMatrix = Mat(rows: 3, cols: 3, type: CvType.CV_32F)
try cameraMatrix.put(row: 0, col: 0, data: [1, 0, 1] as [Float])
try cameraMatrix.put(row: 1, col: 0, data: [0, 1, 2] as [Float])
try cameraMatrix.put(row: 2, col: 0, data: [0, 0, 1] as [Float])
let distCoeffs = Mat(rows: 1, cols: 4, type: CvType.CV_32F)
try distCoeffs.put(row: 0, col: 0, data: [1, 3, 2, 4] as [Float])
Cv3d.undistort(src: src, dst: dst, cameraMatrix: cameraMatrix, distCoeffs: distCoeffs)
truth = Mat(rows: 3, cols: 3, type: CvType.CV_32F)
try truth!.put(row: 0, col: 0, data: [0, 0, 0] as [Float])
try truth!.put(row: 1, col: 0, data: [0, 0, 0] as [Float])
try truth!.put(row: 2, col: 0, data: [0, 3, 0] as [Float])
try assertMatEqual(truth!, dst, OpenCVTestCase.EPS)
}
func testUndistortMatMatMatMatMat() throws {
let src = Mat(rows: 3, cols: 3, type: CvType.CV_32F, scalar: Scalar(3))
let cameraMatrix = Mat(rows: 3, cols: 3, type: CvType.CV_32F)
try cameraMatrix.put(row: 0, col: 0, data: [1, 0, 1] as [Float])
try cameraMatrix.put(row: 1, col: 0, data: [0, 1, 2] as [Float])
try cameraMatrix.put(row: 2, col: 0, data: [0, 0, 1] as [Float])
let distCoeffs = Mat(rows: 1, cols: 4, type: CvType.CV_32F)
try distCoeffs.put(row: 0, col: 0, data: [2, 1, 4, 5] as [Float])
let newCameraMatrix = Mat(rows: 3, cols: 3, type: CvType.CV_32F, scalar: Scalar(1))
Cv3d.undistort(src: src, dst: dst, cameraMatrix: cameraMatrix, distCoeffs: distCoeffs, newCameraMatrix: newCameraMatrix)
truth = Mat(rows: 3, cols: 3, type: CvType.CV_32F, scalar: Scalar(3))
try assertMatEqual(truth!, dst, OpenCVTestCase.EPS)
}
//undistortPoints(List<Point> src, List<Point> dst, Mat cameraMatrix, Mat distCoeffs)
func testUndistortPointsListOfPointListOfPointMatMat() {
let src = MatOfPoint2f(array: [Point2f(x: 1, y: 2), Point2f(x: 3, y: 4), Point2f(x: -1, y: -1)])
let dst = MatOfPoint2f()
let cameraMatrix = Mat.eye(rows: 3, cols: 3, type: CvType.CV_64FC1)
let distCoeffs = Mat(rows: 8, cols: 1, type: CvType.CV_64FC1, scalar: Scalar(0))
Cv3d.undistortPoints(src: src, dst: dst, cameraMatrix: cameraMatrix, distCoeffs: distCoeffs)
XCTAssertEqual(src.toArray(), dst.toArray())
}
}

View File

@ -5,6 +5,6 @@
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/3d.hpp"
#endif

View File

@ -7,10 +7,11 @@
static inline double cbrt(double x) { return (double)cv::cubeRoot((float)x); };
#endif
namespace cv {
using namespace std;
namespace {
void solveQuartic(const double *factors, double *realRoots) {
static void solveQuartic(const double *factors, double *realRoots) {
const double &a4 = factors[0];
const double &a3 = factors[1];
const double &a2 = factors[2];
@ -63,7 +64,7 @@ void solveQuartic(const double *factors, double *realRoots) {
realRoots[3] = B_4A - sqrt_2m_rh - sqrt2;
}
void polishQuarticRoots(const double *coeffs, double *roots) {
static void polishQuarticRoots(const double *coeffs, double *roots) {
const int iterations = 2;
for (int i = 0; i < iterations; ++i) {
for (int j = 0; j < 4; ++j) {
@ -123,9 +124,7 @@ inline void mat_mult(const double a[3][3], const double b[3][3], double result[3
result[2][1] = a[2][0] * b[0][1] + a[2][1] * b[1][1] + a[2][2] * b[2][1];
result[2][2] = a[2][0] * b[0][2] + a[2][1] * b[1][2] + a[2][2] * b[2][2];
}
}
namespace cv {
void ap3p::init_inverse_parameters() {
inv_fx = 1. / fx;
inv_fy = 1. / fy;

File diff suppressed because it is too large Load Diff

View File

@ -42,13 +42,12 @@
#include "precomp.hpp"
#include "opencv2/core/core_c.h"
#include "calib3d_c_api.h"
/************************************************************************************\
Some backward compatibility stuff, to be moved to legacy or compat module
\************************************************************************************/
using cv::Ptr;
namespace cv {
////////////////// Levenberg-Marquardt engine (the old variant) ////////////////////////
@ -258,11 +257,12 @@ bool CvLevMarq::updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, d
return true;
}
namespace {
static void subMatrix(const cv::Mat& src, cv::Mat& dst, const std::vector<uchar>& cols,
const std::vector<uchar>& rows) {
int nonzeros_cols = cv::countNonZero(cols);
cv::Mat tmp(src.rows, nonzeros_cols, CV_64FC1);
static void subMatrix(const Mat& src, Mat& dst,
const std::vector<uchar>& cols,
const std::vector<uchar>& rows)
{
int nonzeros_cols = countNonZero(cols);
Mat tmp(src.rows, nonzeros_cols, CV_64FC1);
for (int i = 0, j = 0; i < (int)cols.size(); i++)
{
@ -283,9 +283,6 @@ static void subMatrix(const cv::Mat& src, cv::Mat& dst, const std::vector<uchar>
}
}
}
void CvLevMarq::step()
{
using namespace cv;
@ -322,150 +319,4 @@ void CvLevMarq::step()
param->data.db[i] = prevParam->data.db[i] - (mask->data.ptr[i] ? nonzero_param(j++) : 0);
}
CV_IMPL int cvRANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
{
return cv::RANSACUpdateNumIters(p, ep, modelPoints, maxIters);
}
CV_IMPL int cvFindHomography( const CvMat* _src, const CvMat* _dst, CvMat* __H, int method,
double ransacReprojThreshold, CvMat* _mask, int maxIters,
double confidence)
{
cv::Mat src = cv::cvarrToMat(_src), dst = cv::cvarrToMat(_dst);
if( src.channels() == 1 && (src.rows == 2 || src.rows == 3) && src.cols > 3 )
cv::transpose(src, src);
if( dst.channels() == 1 && (dst.rows == 2 || dst.rows == 3) && dst.cols > 3 )
cv::transpose(dst, dst);
if ( maxIters < 0 )
maxIters = 0;
if ( maxIters > 2000 )
maxIters = 2000;
if ( confidence < 0 )
confidence = 0;
if ( confidence > 1 )
confidence = 1;
const cv::Mat H = cv::cvarrToMat(__H), mask = cv::cvarrToMat(_mask);
cv::Mat H0 = cv::findHomography(src, dst, method, ransacReprojThreshold,
_mask ? cv::_OutputArray(mask) : cv::_OutputArray(), maxIters,
confidence);
if( H0.empty() )
{
cv::Mat Hz = cv::cvarrToMat(__H);
Hz.setTo(cv::Scalar::all(0));
return 0;
}
H0.convertTo(H, H.type());
return 1;
}
CV_IMPL int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fmatrix, int method,
double param1, double param2, CvMat* _mask )
{
cv::Mat m1 = cv::cvarrToMat(points1), m2 = cv::cvarrToMat(points2);
if( m1.channels() == 1 && (m1.rows == 2 || m1.rows == 3) && m1.cols > 3 )
cv::transpose(m1, m1);
if( m2.channels() == 1 && (m2.rows == 2 || m2.rows == 3) && m2.cols > 3 )
cv::transpose(m2, m2);
const cv::Mat FM = cv::cvarrToMat(fmatrix), mask = cv::cvarrToMat(_mask);
cv::Mat FM0 = cv::findFundamentalMat(m1, m2, method, param1, param2,
_mask ? cv::_OutputArray(mask) : cv::_OutputArray());
if( FM0.empty() )
{
cv::Mat FM0z = cv::cvarrToMat(fmatrix);
FM0z.setTo(cv::Scalar::all(0));
return 0;
}
CV_Assert( FM0.cols == 3 && FM0.rows % 3 == 0 && FM.cols == 3 && FM.rows % 3 == 0 && FM.channels() == 1 );
cv::Mat FM1 = FM.rowRange(0, MIN(FM0.rows, FM.rows));
FM0.rowRange(0, FM1.rows).convertTo(FM1, FM1.type());
return FM1.rows / 3;
}
CV_IMPL void cvComputeCorrespondEpilines( const CvMat* points, int pointImageID,
const CvMat* fmatrix, CvMat* _lines )
{
cv::Mat pt = cv::cvarrToMat(points), fm = cv::cvarrToMat(fmatrix);
cv::Mat lines = cv::cvarrToMat(_lines);
const cv::Mat lines0 = lines;
if( pt.channels() == 1 && (pt.rows == 2 || pt.rows == 3) && pt.cols > 3 )
cv::transpose(pt, pt);
cv::computeCorrespondEpilines(pt, pointImageID, fm, lines);
bool tflag = lines0.channels() == 1 && lines0.rows == 3 && lines0.cols > 3;
lines = lines.reshape(lines0.channels(), (tflag ? lines0.cols : lines0.rows));
if( tflag )
{
CV_Assert( lines.rows == lines0.cols && lines.cols == lines0.rows );
if( lines0.type() == lines.type() )
transpose( lines, lines0 );
else
{
transpose( lines, lines );
lines.convertTo( lines0, lines0.type() );
}
}
else
{
CV_Assert( lines.size() == lines0.size() );
if( lines.data != lines0.data )
lines.convertTo(lines0, lines0.type());
}
}
CV_IMPL void cvConvertPointsHomogeneous( const CvMat* _src, CvMat* _dst )
{
cv::Mat src = cv::cvarrToMat(_src), dst = cv::cvarrToMat(_dst);
const cv::Mat dst0 = dst;
int d0 = src.channels() > 1 ? src.channels() : MIN(src.cols, src.rows);
if( src.channels() == 1 && src.cols > d0 )
cv::transpose(src, src);
int d1 = dst.channels() > 1 ? dst.channels() : MIN(dst.cols, dst.rows);
if( d0 == d1 )
src.copyTo(dst);
else if( d0 < d1 )
cv::convertPointsToHomogeneous(src, dst);
else
cv::convertPointsFromHomogeneous(src, dst);
bool tflag = dst0.channels() == 1 && dst0.cols > d1;
dst = dst.reshape(dst0.channels(), (tflag ? dst0.cols : dst0.rows));
if( tflag )
{
CV_Assert( dst.rows == dst0.cols && dst.cols == dst0.rows );
if( dst0.type() == dst.type() )
transpose( dst, dst0 );
else
{
transpose( dst, dst );
dst.convertTo( dst0, dst0.type() );
}
}
else
{
CV_Assert( dst.size() == dst0.size() );
if( dst.data != dst0.data )
dst.convertTo(dst0, dst0.type());
}
}

View File

@ -45,7 +45,7 @@
//! @cond IGNORED
namespace cv { namespace detail {
namespace cv {
/**
Computes the matrix for the projection onto a tilted image sensor
\param tauX angular parameter rotation around x-axis
@ -115,7 +115,7 @@ void computeTiltProjectionMatrix(FLOAT tauX,
*invMatTilt = matRotXY.t()*invMatProjZ;
}
}
}} // namespace detail, cv
} // namespace detail, _3d, cv
//! @endcond

View File

@ -21,15 +21,15 @@
# include "opencv2/core/eigen.hpp"
#endif
using namespace std;
namespace cv {
dls::dls(const cv::Mat& opoints, const cv::Mat& ipoints)
dls::dls(const Mat& opoints, const Mat& ipoints)
{
N = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
p = cv::Mat(3, N, CV_64F);
z = cv::Mat(3, N, CV_64F);
mn = cv::Mat::zeros(3, 1, CV_64F);
p = Mat(3, N, CV_64F);
z = Mat(3, N, CV_64F);
mn = Mat::zeros(3, 1, CV_64F);
cost__ = 9999;
@ -40,14 +40,14 @@ dls::dls(const cv::Mat& opoints, const cv::Mat& ipoints)
if (opoints.depth() == ipoints.depth())
{
if (opoints.depth() == CV_32F)
init_points<cv::Point3f, cv::Point2f>(opoints, ipoints);
init_points<Point3f, Point2f>(opoints, ipoints);
else
init_points<cv::Point3d, cv::Point2d>(opoints, ipoints);
init_points<Point3d, Point2d>(opoints, ipoints);
}
else if (opoints.depth() == CV_32F)
init_points<cv::Point3f, cv::Point2d>(opoints, ipoints);
init_points<Point3f, Point2d>(opoints, ipoints);
else
init_points<cv::Point3d, cv::Point2f>(opoints, ipoints);
init_points<Point3d, Point2f>(opoints, ipoints);
}
dls::~dls()
@ -55,10 +55,10 @@ dls::~dls()
// TODO Auto-generated destructor stub
}
bool dls::compute_pose(cv::Mat& R, cv::Mat& t)
bool dls::compute_pose(Mat& R, Mat& t)
{
std::vector<cv::Mat> R_;
std::vector<Mat> R_;
R_.push_back(rotx(CV_PI/2));
R_.push_back(roty(CV_PI/2));
R_.push_back(rotz(CV_PI/2));
@ -67,7 +67,7 @@ bool dls::compute_pose(cv::Mat& R, cv::Mat& t)
for (int i = 0; i < 3; ++i)
{
// Make a random rotation
cv::Mat pp = R_[i] * ( p - cv::repeat(mn, 1, p.cols) );
Mat pp = R_[i] * ( p - repeat(mn, 1, p.cols) );
// clear for new data
C_est_.clear();
@ -99,13 +99,13 @@ bool dls::compute_pose(cv::Mat& R, cv::Mat& t)
return false;
}
void dls::run_kernel(const cv::Mat& pp)
void dls::run_kernel(const Mat& pp)
{
cv::Mat Mtilde(27, 27, CV_64F);
cv::Mat D = cv::Mat::zeros(9, 9, CV_64F);
Mat Mtilde(27, 27, CV_64F);
Mat D = Mat::zeros(9, 9, CV_64F);
build_coeff_matrix(pp, Mtilde, D);
cv::Mat eigenval_r, eigenval_i, eigenvec_r, eigenvec_i;
Mat eigenval_r, eigenval_i, eigenvec_r, eigenvec_i;
compute_eigenvec(Mtilde, eigenval_r, eigenval_i, eigenvec_r, eigenvec_i);
/*
@ -115,16 +115,16 @@ void dls::run_kernel(const cv::Mat& pp)
// extract the optimal solutions from the eigen decomposition of the
// Multiplication matrix
cv::Mat sols = cv::Mat::zeros(3, 27, CV_64F);
Mat sols = Mat::zeros(3, 27, CV_64F);
std::vector<double> cost;
int count = 0;
for (int k = 0; k < 27; ++k)
{
// V(:,k) = V(:,k)/V(1,k);
cv::Mat V_kA = eigenvec_r.col(k); // 27x1
cv::Mat V_kB = cv::Mat(1, 1, z.depth(), V_kA.at<double>(0)); // 1x1
cv::Mat V_k; cv::solve(V_kB.t(), V_kA.t(), V_k); // A/B = B'\A'
cv::Mat( V_k.t()).copyTo( eigenvec_r.col(k) );
Mat V_kA = eigenvec_r.col(k); // 27x1
Mat V_kB = Mat(1, 1, z.depth(), V_kA.at<double>(0)); // 1x1
Mat V_k; solve(V_kB.t(), V_kA.t(), V_k); // A/B = B'\A'
Mat( V_k.t()).copyTo( eigenvec_r.col(k) );
//if (imag(V(2,k)) == 0)
#ifdef HAVE_EIGEN
@ -138,24 +138,24 @@ void dls::run_kernel(const cv::Mat& pp)
stmp[1] = eigenvec_r.at<double>(3, k);
stmp[2] = eigenvec_r.at<double>(1, k);
cv::Mat H = Hessian(stmp);
Mat H = Hessian(stmp);
cv::Mat eigenvalues, eigenvectors;
cv::eigen(H, eigenvalues, eigenvectors);
Mat eigenvalues, eigenvectors;
eigen(H, eigenvalues, eigenvectors);
if(positive_eigenvalues(&eigenvalues))
{
// sols(:,i) = stmp;
cv::Mat stmp_mat(3, 1, CV_64F, &stmp);
Mat stmp_mat(3, 1, CV_64F, &stmp);
stmp_mat.copyTo( sols.col(count) );
cv::Mat Cbar = cayley2rotbar(stmp_mat);
cv::Mat Cbarvec = Cbar.reshape(1,1).t();
Mat Cbar = cayley2rotbar(stmp_mat);
Mat Cbarvec = Cbar.reshape(1,1).t();
// cost(i) = CbarVec' * D * CbarVec;
cv::Mat cost_mat = Cbarvec.t() * D * Cbarvec;
Mat cost_mat = Cbarvec.t() * D * Cbarvec;
cost.push_back( cost_mat.at<double>(0) );
count++;
@ -166,30 +166,30 @@ void dls::run_kernel(const cv::Mat& pp)
// extract solutions
sols = sols.clone().colRange(0, count);
std::vector<cv::Mat> C_est, t_est;
std::vector<Mat> C_est, t_est;
for (int j = 0; j < sols.cols; ++j)
{
// recover the optimal orientation
// C_est(:,:,j) = 1/(1 + sols(:,j)' * sols(:,j)) * cayley2rotbar(sols(:,j));
cv::Mat sols_j = sols.col(j);
double sols_mult = 1./(1.+cv::Mat( sols_j.t() * sols_j ).at<double>(0));
cv::Mat C_est_j = cayley2rotbar(sols_j).mul(sols_mult);
Mat sols_j = sols.col(j);
double sols_mult = 1./(1.+Mat( sols_j.t() * sols_j ).at<double>(0));
Mat C_est_j = cayley2rotbar(sols_j).mul(sols_mult);
C_est.push_back( C_est_j );
cv::Mat A2 = cv::Mat::zeros(3, 3, CV_64F);
cv::Mat b2 = cv::Mat::zeros(3, 1, CV_64F);
Mat A2 = Mat::zeros(3, 3, CV_64F);
Mat b2 = Mat::zeros(3, 1, CV_64F);
for (int i = 0; i < N; ++i)
{
cv::Mat eye = cv::Mat::eye(3, 3, CV_64F);
cv::Mat z_mul = z.col(i)*z.col(i).t();
Mat eye = Mat::eye(3, 3, CV_64F);
Mat z_mul = z.col(i)*z.col(i).t();
A2 += eye - z_mul;
b2 += (z_mul - eye) * C_est_j * pp.col(i);
}
// recover the optimal translation
cv::Mat X2; cv::solve(A2, b2, X2); // A\B
Mat X2; solve(A2, b2, X2); // A\B
t_est.push_back(X2);
}
@ -197,12 +197,12 @@ void dls::run_kernel(const cv::Mat& pp)
// check that the points are infront of the center of perspectivity
for (int k = 0; k < sols.cols; ++k)
{
cv::Mat cam_points = C_est[k] * pp + cv::repeat(t_est[k], 1, pp.cols);
cv::Mat cam_points_k = cam_points.row(2);
Mat cam_points = C_est[k] * pp + repeat(t_est[k], 1, pp.cols);
Mat cam_points_k = cam_points.row(2);
if(is_empty(&cam_points_k))
{
cv::Mat C_valid = C_est[k], t_valid = t_est[k];
Mat C_valid = C_est[k], t_valid = t_est[k];
double cost_valid = cost[k];
C_est_.push_back(C_valid);
@ -213,20 +213,20 @@ void dls::run_kernel(const cv::Mat& pp)
}
void dls::build_coeff_matrix(const cv::Mat& pp, cv::Mat& Mtilde, cv::Mat& D)
void dls::build_coeff_matrix(const Mat& pp, Mat& Mtilde, Mat& D)
{
CV_Assert(!pp.empty() && N > 0);
cv::Mat eye = cv::Mat::eye(3, 3, CV_64F);
Mat eye = Mat::eye(3, 3, CV_64F);
// build coeff matrix
// An intermediate matrix, the inverse of what is called "H" in the paper
// (see eq. 25)
cv::Mat H = cv::Mat::zeros(3, 3, CV_64F);
cv::Mat A = cv::Mat::zeros(3, 9, CV_64F);
cv::Mat pp_i(3, 1, CV_64F);
Mat H = Mat::zeros(3, 3, CV_64F);
Mat A = Mat::zeros(3, 9, CV_64F);
Mat pp_i(3, 1, CV_64F);
cv::Mat z_i(3, 1, CV_64F);
Mat z_i(3, 1, CV_64F);
for (int i = 0; i < N; ++i)
{
z.col(i).copyTo(z_i);
@ -236,10 +236,10 @@ void dls::build_coeff_matrix(const cv::Mat& pp, cv::Mat& Mtilde, cv::Mat& D)
H = eye.mul(N) - z * z.t();
// A\B
cv::solve(H, A, A, cv::DECOMP_NORMAL);
solve(H, A, A, DECOMP_NORMAL);
H.release();
cv::Mat ppi_A(3, 1, CV_64F);
Mat ppi_A(3, 1, CV_64F);
for (int i = 0; i < N; ++i)
{
z.col(i).copyTo(z_i);
@ -253,18 +253,18 @@ void dls::build_coeff_matrix(const cv::Mat& pp, cv::Mat& Mtilde, cv::Mat& D)
// generate random samples
std::vector<double> u(5);
cv::randn(u, 0, 200);
randn(u, 0, 200);
cv::Mat M2 = cayley_LS_M(f1coeff, f2coeff, f3coeff, u);
Mat M2 = cayley_LS_M(f1coeff, f2coeff, f3coeff, u);
cv::Mat M2_1 = M2(cv::Range(0,27), cv::Range(0,27));
cv::Mat M2_2 = M2(cv::Range(0,27), cv::Range(27,120));
cv::Mat M2_3 = M2(cv::Range(27,120), cv::Range(27,120));
cv::Mat M2_4 = M2(cv::Range(27,120), cv::Range(0,27));
Mat M2_1 = M2(Range(0,27), Range(0,27));
Mat M2_2 = M2(Range(0,27), Range(27,120));
Mat M2_3 = M2(Range(27,120), Range(27,120));
Mat M2_4 = M2(Range(27,120), Range(0,27));
M2.release();
// A/B = B'\A'
cv::Mat M2_5; cv::solve(M2_3.t(), M2_2.t(), M2_5);
Mat M2_5; solve(M2_3.t(), M2_2.t(), M2_5);
M2_2.release(); M2_3.release();
// construct the multiplication matrix via schur compliment of the Macaulay
@ -273,13 +273,13 @@ void dls::build_coeff_matrix(const cv::Mat& pp, cv::Mat& Mtilde, cv::Mat& D)
}
void dls::compute_eigenvec(const cv::Mat& Mtilde, cv::Mat& eigenval_real, cv::Mat& eigenval_imag,
cv::Mat& eigenvec_real, cv::Mat& eigenvec_imag)
void dls::compute_eigenvec(const Mat& Mtilde, Mat& eigenval_real, Mat& eigenval_imag,
Mat& eigenvec_real, Mat& eigenvec_imag)
{
#ifdef HAVE_EIGEN
Eigen::MatrixXd Mtilde_eig, zeros_eig;
cv::cv2eigen(Mtilde, Mtilde_eig);
cv::cv2eigen(cv::Mat::zeros(27, 27, CV_64F), zeros_eig);
cv2eigen(Mtilde, Mtilde_eig);
cv2eigen(Mat::zeros(27, 27, CV_64F), zeros_eig);
Eigen::MatrixXcd Mtilde_eig_cmplx(27, 27);
Mtilde_eig_cmplx.real() = Mtilde_eig;
@ -293,20 +293,20 @@ void dls::compute_eigenvec(const cv::Mat& Mtilde, cv::Mat& eigenval_real, cv::Ma
Eigen::MatrixXd eigvec_real = ces.eigenvectors().real();
Eigen::MatrixXd eigvec_imag = ces.eigenvectors().imag();
cv::eigen2cv(eigval_real, eigenval_real);
cv::eigen2cv(eigval_imag, eigenval_imag);
cv::eigen2cv(eigvec_real, eigenvec_real);
cv::eigen2cv(eigvec_imag, eigenvec_imag);
eigen2cv(eigval_real, eigenval_real);
eigen2cv(eigval_imag, eigenval_imag);
eigen2cv(eigvec_real, eigenvec_real);
eigen2cv(eigvec_imag, eigenvec_imag);
#else
EigenvalueDecomposition es(Mtilde);
eigenval_real = es.eigenvalues();
eigenvec_real = es.eigenvectors();
eigenval_imag = eigenvec_imag = cv::Mat();
eigenval_imag = eigenvec_imag = Mat();
#endif
}
void dls::fill_coeff(const cv::Mat * D_mat)
void dls::fill_coeff(const Mat * D_mat)
{
// TODO: shift D and coefficients one position to left
@ -394,9 +394,9 @@ void dls::fill_coeff(const cv::Mat * D_mat)
}
cv::Mat dls::LeftMultVec(const cv::Mat& v)
Mat dls::LeftMultVec(const Mat& v)
{
cv::Mat mat_ = cv::Mat::zeros(3, 9, CV_64F);
Mat mat_ = Mat::zeros(3, 9, CV_64F);
for (int i = 0; i < 3; ++i)
{
@ -407,12 +407,12 @@ cv::Mat dls::LeftMultVec(const cv::Mat& v)
return mat_;
}
cv::Mat dls::cayley_LS_M(const std::vector<double>& a, const std::vector<double>& b, const std::vector<double>& c, const std::vector<double>& u)
Mat dls::cayley_LS_M(const std::vector<double>& a, const std::vector<double>& b, const std::vector<double>& c, const std::vector<double>& u)
{
// TODO: input matrix pointer
// TODO: shift coefficients one position to left
cv::Mat M = cv::Mat::zeros(120, 120, CV_64F);
Mat M = Mat::zeros(120, 120, CV_64F);
M.at<double>(0,0)=u[1]; M.at<double>(0,35)=a[1]; M.at<double>(0,83)=b[1]; M.at<double>(0,118)=c[1];
M.at<double>(1,0)=u[4]; M.at<double>(1,1)=u[1]; M.at<double>(1,34)=a[1]; M.at<double>(1,35)=a[10]; M.at<double>(1,54)=b[1]; M.at<double>(1,83)=b[10]; M.at<double>(1,99)=c[1]; M.at<double>(1,118)=c[10];
@ -538,7 +538,7 @@ cv::Mat dls::cayley_LS_M(const std::vector<double>& a, const std::vector<double>
return M.t();
}
cv::Mat dls::Hessian(const double s[])
Mat dls::Hessian(const double s[])
{
// the vector of monomials is
// m = [ const ; s1^2 * s2 ; s1 * s2 ; s1 * s3 ; s2 * s3 ; s2^2 * s3 ; s2^3 ; ...
@ -577,73 +577,73 @@ cv::Mat dls::Hessian(const double s[])
Hs3[14]=0; Hs3[15]=3*s[2]*s[2]; Hs3[16]=s[0]*s[1]; Hs3[17]=0; Hs3[18]=s[0]*s[0]; Hs3[19]=0;
// fill Hessian matrix
cv::Mat H(3, 3, CV_64F);
H.at<double>(0,0) = cv::Mat(cv::Mat(f1coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs1)).at<double>(0,0);
H.at<double>(0,1) = cv::Mat(cv::Mat(f1coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs2)).at<double>(0,0);
H.at<double>(0,2) = cv::Mat(cv::Mat(f1coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs3)).at<double>(0,0);
Mat H(3, 3, CV_64F);
H.at<double>(0,0) = Mat(Mat(f1coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs1)).at<double>(0,0);
H.at<double>(0,1) = Mat(Mat(f1coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs2)).at<double>(0,0);
H.at<double>(0,2) = Mat(Mat(f1coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs3)).at<double>(0,0);
H.at<double>(1,0) = cv::Mat(cv::Mat(f2coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs1)).at<double>(0,0);
H.at<double>(1,1) = cv::Mat(cv::Mat(f2coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs2)).at<double>(0,0);
H.at<double>(1,2) = cv::Mat(cv::Mat(f2coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs3)).at<double>(0,0);
H.at<double>(1,0) = Mat(Mat(f2coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs1)).at<double>(0,0);
H.at<double>(1,1) = Mat(Mat(f2coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs2)).at<double>(0,0);
H.at<double>(1,2) = Mat(Mat(f2coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs3)).at<double>(0,0);
H.at<double>(2,0) = cv::Mat(cv::Mat(f3coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs1)).at<double>(0,0);
H.at<double>(2,1) = cv::Mat(cv::Mat(f3coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs2)).at<double>(0,0);
H.at<double>(2,2) = cv::Mat(cv::Mat(f3coeff).rowRange(1,21).t()*cv::Mat(20, 1, CV_64F, &Hs3)).at<double>(0,0);
H.at<double>(2,0) = Mat(Mat(f3coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs1)).at<double>(0,0);
H.at<double>(2,1) = Mat(Mat(f3coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs2)).at<double>(0,0);
H.at<double>(2,2) = Mat(Mat(f3coeff).rowRange(1,21).t()*Mat(20, 1, CV_64F, &Hs3)).at<double>(0,0);
return H;
}
cv::Mat dls::cayley2rotbar(const cv::Mat& s)
Mat dls::cayley2rotbar(const Mat& s)
{
double s_mul1 = cv::Mat(s.t()*s).at<double>(0,0);
cv::Mat s_mul2 = s*s.t();
cv::Mat eye = cv::Mat::eye(3, 3, CV_64F);
double s_mul1 = Mat(s.t()*s).at<double>(0,0);
Mat s_mul2 = s*s.t();
Mat eye = Mat::eye(3, 3, CV_64F);
return cv::Mat( eye.mul(1.-s_mul1) + skewsymm(&s).mul(2.) + s_mul2.mul(2.) ).t();
return Mat( eye.mul(1.-s_mul1) + skewsymm(&s).mul(2.) + s_mul2.mul(2.) ).t();
}
cv::Mat dls::skewsymm(const cv::Mat * X1)
Mat dls::skewsymm(const Mat * X1)
{
cv::MatConstIterator_<double> it = X1->begin<double>();
return (cv::Mat_<double>(3,3) << 0, -*(it+2), *(it+1),
MatConstIterator_<double> it = X1->begin<double>();
return (Mat_<double>(3,3) << 0, -*(it+2), *(it+1),
*(it+2), 0, -*(it+0),
-*(it+1), *(it+0), 0);
}
cv::Mat dls::rotx(const double t)
Mat dls::rotx(const double t)
{
// rotx: rotation about y-axis
double ct = cos(t);
double st = sin(t);
return (cv::Mat_<double>(3,3) << 1, 0, 0, 0, ct, -st, 0, st, ct);
return (Mat_<double>(3,3) << 1, 0, 0, 0, ct, -st, 0, st, ct);
}
cv::Mat dls::roty(const double t)
Mat dls::roty(const double t)
{
// roty: rotation about y-axis
double ct = cos(t);
double st = sin(t);
return (cv::Mat_<double>(3,3) << ct, 0, st, 0, 1, 0, -st, 0, ct);
return (Mat_<double>(3,3) << ct, 0, st, 0, 1, 0, -st, 0, ct);
}
cv::Mat dls::rotz(const double t)
Mat dls::rotz(const double t)
{
// rotz: rotation about y-axis
double ct = cos(t);
double st = sin(t);
return (cv::Mat_<double>(3,3) << ct, -st, 0, st, ct, 0, 0, 0, 1);
return (Mat_<double>(3,3) << ct, -st, 0, st, ct, 0, 0, 0, 1);
}
cv::Mat dls::mean(const cv::Mat& M)
Mat dls::mean(const Mat& M)
{
cv::Mat m = cv::Mat::zeros(3, 1, CV_64F);
Mat m = Mat::zeros(3, 1, CV_64F);
for (int i = 0; i < M.cols; ++i) m += M.col(i);
return m.mul(1./(double)M.cols);
}
bool dls::is_empty(const cv::Mat * M)
bool dls::is_empty(const Mat * M)
{
cv::MatConstIterator_<double> it = M->begin<double>(), it_end = M->end<double>();
MatConstIterator_<double> it = M->begin<double>(), it_end = M->end<double>();
for(; it != it_end; ++it)
{
if(*it < 0) return false;
@ -651,9 +651,11 @@ bool dls::is_empty(const cv::Mat * M)
return true;
}
bool dls::positive_eigenvalues(const cv::Mat * eigenvalues)
bool dls::positive_eigenvalues(const Mat * eigenvalues)
{
CV_Assert(eigenvalues && !eigenvalues->empty());
cv::MatConstIterator_<double> it = eigenvalues->begin<double>();
MatConstIterator_<double> it = eigenvalues->begin<double>();
return *(it) > 0 && *(it+1) > 0 && *(it+2) > 0;
}
}

View File

@ -2,11 +2,9 @@
#define DLS_H_
#include "precomp.hpp"
#include <iostream>
using namespace std;
using namespace cv;
namespace cv {
class dls
{
@ -770,4 +768,6 @@ public:
Mat eigenvectors() { return _eigenvectors; }
};
}
#endif // DLS_H

View File

@ -1,9 +1,12 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <iostream>
#include "precomp.hpp"
#include "epnp.h"
namespace cv
{
namespace cv {
epnp::epnp(const Mat& cameraMatrix, const Mat& opoints, const Mat& ipoints)
{

View File

@ -1,11 +1,14 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef epnp_h
#define epnp_h
#include "precomp.hpp"
#include "opencv2/core/core_c.h"
namespace cv
{
namespace cv {
class epnp {
public:

View File

@ -30,11 +30,9 @@
*/
#include "precomp.hpp"
#include "usac.hpp"
namespace cv
{
namespace cv {
class EMEstimatorCallback CV_FINAL : public PointSetRegistrator::Callback
{
@ -128,7 +126,7 @@ public:
}
Mat Bz(3, 3, CV_64F, bz);
cv::Mat xy1;
Mat xy1;
SVD::solveZ(Bz, xy1);
if (fabs(xy1.at<double>(2)) < 1e-10) continue;
@ -136,7 +134,7 @@ public:
ys.push_back(xy1.at<double>(1) / xy1.at<double>(2));
zs.push_back(z1);
cv::Mat Evec = EE.col(0) * xs.back() + EE.col(1) * ys.back() + EE.col(2) * zs.back() + EE.col(3);
Mat Evec = EE.col(0) * xs.back() + EE.col(1) * ys.back() + EE.col(2) * zs.back() + EE.col(3);
Evec /= norm(Evec);
memcpy(e + count * 9, Evec.ptr(), 9 * sizeof(double));
@ -401,10 +399,8 @@ protected:
}
};
}
// Input should be a vector of n 2D points or a Nx2 matrix
cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
Mat findEssentialMat( InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
int method, double prob, double threshold, OutputArray _mask)
{
CV_INSTRUMENT_REGION();
@ -455,16 +451,16 @@ cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2, InputArr
return E;
}
cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2, double focal, Point2d pp,
Mat findEssentialMat( InputArray _points1, InputArray _points2, double focal, Point2d pp,
int method, double prob, double threshold, OutputArray _mask)
{
CV_INSTRUMENT_REGION();
Mat cameraMatrix = (Mat_<double>(3,3) << focal, 0, pp.x, 0, focal, pp.y, 0, 0, 1);
return cv::findEssentialMat(_points1, _points2, cameraMatrix, method, prob, threshold, _mask);
return findEssentialMat(_points1, _points2, cameraMatrix, method, prob, threshold, _mask);
}
cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2,
Mat findEssentialMat( InputArray _points1, InputArray _points2,
InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
int method, double prob, double threshold, OutputArray _mask)
@ -493,7 +489,7 @@ cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2,
return findEssentialMat(_pointsUntistorted1, _pointsUntistorted2, cm0, method, prob, threshold, _mask);
}
cv::Mat cv::findEssentialMat( InputArray points1, InputArray points2,
Mat findEssentialMat( InputArray points1, InputArray points2,
InputArray cameraMatrix1, InputArray cameraMatrix2,
InputArray dist_coeff1, InputArray dist_coeff2, OutputArray mask, const UsacParams &params) {
Ptr<usac::Model> model;
@ -507,7 +503,7 @@ cv::Mat cv::findEssentialMat( InputArray points1, InputArray points2,
}
int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2,
int recoverPose( InputArray E, InputArray _points1, InputArray _points2,
InputArray _cameraMatrix, OutputArray _R, OutputArray _t, double distanceThresh,
InputOutputArray _mask, OutputArray triangulatedPoints)
{
@ -677,20 +673,20 @@ int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2,
}
}
int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
int recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
OutputArray _R, OutputArray _t, InputOutputArray _mask)
{
return cv::recoverPose(E, _points1, _points2, _cameraMatrix, _R, _t, 50, _mask);
return recoverPose(E, _points1, _points2, _cameraMatrix, _R, _t, 50, _mask);
}
int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2, OutputArray _R,
int recoverPose( InputArray E, InputArray _points1, InputArray _points2, OutputArray _R,
OutputArray _t, double focal, Point2d pp, InputOutputArray _mask)
{
Mat cameraMatrix = (Mat_<double>(3,3) << focal, 0, pp.x, 0, focal, pp.y, 0, 0, 1);
return cv::recoverPose(E, _points1, _points2, cameraMatrix, _R, _t, _mask);
return recoverPose(E, _points1, _points2, cameraMatrix, _R, _t, _mask);
}
void cv::decomposeEssentialMat( InputArray _E, OutputArray _R1, OutputArray _R2, OutputArray _t )
void decomposeEssentialMat( InputArray _E, OutputArray _R1, OutputArray _R2, OutputArray _t )
{
CV_INSTRUMENT_REGION();
@ -715,3 +711,5 @@ void cv::decomposeEssentialMat( InputArray _E, OutputArray _R1, OutputArray _R2,
R2.copyTo(_R2);
t.copyTo(_t);
}
}

View File

@ -46,8 +46,7 @@
#include "usac.hpp"
namespace cv
{
namespace cv {
/**
* This class estimates a homography \f$H\in \mathbb{R}^{3\times 3}\f$
@ -269,9 +268,7 @@ public:
Mat src, dst;
};
} // end namesapce cv
namespace cv{
static bool createAndRunRHORegistrator(double confidence,
int maxIters,
double ransacReprojThreshold,
@ -346,10 +343,8 @@ static bool createAndRunRHORegistrator(double confidence,
return result;
}
}
cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
Mat findHomography( InputArray _points1, InputArray _points2,
int method, double ransacReprojThreshold, OutputArray _mask,
const int maxIters, const double confidence)
{
@ -441,14 +436,14 @@ cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
return H;
}
cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
Mat findHomography( InputArray _points1, InputArray _points2,
OutputArray _mask, int method, double ransacReprojThreshold )
{
return cv::findHomography(_points1, _points2, method, ransacReprojThreshold, _mask);
return findHomography(_points1, _points2, method, ransacReprojThreshold, _mask);
}
cv::Mat cv::findHomography(InputArray srcPoints, InputArray dstPoints, OutputArray mask,
Mat findHomography(InputArray srcPoints, InputArray dstPoints, OutputArray mask,
const UsacParams &params) {
Ptr<usac::Model> model;
usac::setParameters(model, usac::EstimationMethod::Homography, params, mask.needed());
@ -470,9 +465,6 @@ cv::Mat cv::findHomography(InputArray srcPoints, InputArray dstPoints, OutputArr
that can be found at http://www-sop.inria.fr/robotvis/personnel/zzhang/zzhang-eng.html */
/************************************** 7-point algorithm *******************************/
namespace cv
{
/**
* Compute the fundamental matrix using the 7-point algorithm.
*
@ -826,9 +818,7 @@ public:
}
};
}
cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
Mat findFundamentalMat( InputArray _points1, InputArray _points2,
int method, double ransacReprojThreshold, double confidence,
int maxIters, OutputArray _mask )
{
@ -897,20 +887,20 @@ cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
return F;
}
cv::Mat cv::findFundamentalMat( cv::InputArray points1, cv::InputArray points2,
Mat findFundamentalMat( InputArray points1, InputArray points2,
int method, double ransacReprojThreshold, double confidence,
cv::OutputArray mask )
OutputArray mask )
{
return cv::findFundamentalMat(points1, points2, method, ransacReprojThreshold, confidence, 1000, mask);
return findFundamentalMat(points1, points2, method, ransacReprojThreshold, confidence, 1000, mask);
}
cv::Mat cv::findFundamentalMat( cv::InputArray points1, cv::InputArray points2, cv::OutputArray mask,
Mat findFundamentalMat( InputArray points1, InputArray points2, OutputArray mask,
int method, double ransacReprojThreshold, double confidence )
{
return cv::findFundamentalMat(points1, points2, method, ransacReprojThreshold, confidence, 1000, mask);
return findFundamentalMat(points1, points2, method, ransacReprojThreshold, confidence, 1000, mask);
}
cv::Mat cv::findFundamentalMat( InputArray points1, InputArray points2,
Mat findFundamentalMat( InputArray points1, InputArray points2,
OutputArray mask, const UsacParams &params) {
Ptr<usac::Model> model;
setParameters(model, usac::EstimationMethod::Fundamental, params, mask.needed());
@ -926,7 +916,7 @@ cv::Mat cv::findFundamentalMat( InputArray points1, InputArray points2,
void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
void computeCorrespondEpilines( InputArray _points, int whichImage,
InputArray _Fmat, OutputArray _lines )
{
CV_INSTRUMENT_REGION();
@ -1010,7 +1000,7 @@ static inline float scaleFor(float x){
}
void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
void convertPointsFromHomogeneous( InputArray _src, OutputArray _dst, int dtype )
{
CV_INSTRUMENT_REGION();
@ -1026,7 +1016,14 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
}
CV_Assert( npoints >= 0 && (depth == CV_32S || depth == CV_32F || depth == CV_64F));
int dtype = CV_MAKETYPE(depth <= CV_32F ? CV_32F : CV_64F, cn-1);
int ddepth;
if (dtype < 0)
ddepth = depth <= CV_32F ? CV_32F : CV_64F;
else
ddepth = CV_MAT_DEPTH(dtype);
CV_Assert(ddepth == CV_32F || ddepth == CV_64F);
dtype = CV_MAKETYPE(ddepth, cn-1);
_dst.create(npoints, 1, dtype);
Mat dst = _dst.getMat();
if( !dst.isContinuous() )
@ -1042,22 +1039,38 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
if( cn == 3 )
{
const Point3i* sptr = src.ptr<Point3i>();
Point2f* dptr = dst.ptr<Point2f>();
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i].z != 0 ? 1.f/sptr[i].z : 1.f;
dptr[i] = Point2f(sptr[i].x*scale, sptr[i].y*scale);
}
Point2f* fdptr = (Point2f*)dst.data;
Point2d* ddptr = (Point2d*)dst.data;
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i].z != 0 ? 1.f/sptr[i].z : 1.f;
fdptr[i] = Point2f(sptr[i].x*scale, sptr[i].y*scale);
}
else
for( i = 0; i < npoints; i++ )
{
double scale = sptr[i].z != 0 ? 1./sptr[i].z : 1.;
ddptr[i] = Point2d(sptr[i].x*scale, sptr[i].y*scale);
}
}
else
{
const Vec4i* sptr = src.ptr<Vec4i>();
Point3f* dptr = dst.ptr<Point3f>();
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i][3] != 0 ? 1.f/sptr[i][3] : 1.f;
dptr[i] = Point3f(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
Point3f* fdptr = (Point3f*)dst.data;
Point3d* ddptr = (Point3d*)dst.data;
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
{
float scale = sptr[i][3] != 0 ? 1.f/sptr[i][3] : 1.f;
fdptr[i] = Point3f(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
else
for( i = 0; i < npoints; i++ )
{
double scale = sptr[i][3] != 0 ? 1./sptr[i][3] : 1.;
ddptr[i] = Point3d(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
}
}
else if( depth == CV_32F )
@ -1065,22 +1078,38 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
if( cn == 3 )
{
const Point3f* sptr = src.ptr<Point3f>();
Point2f* dptr = dst.ptr<Point2f>();
for( i = 0; i < npoints; i++ )
{
float scale = scaleFor(sptr[i].z);
dptr[i] = Point2f(sptr[i].x*scale, sptr[i].y*scale);
}
Point2f* fdptr = (Point2f*)dst.data;
Point2d* ddptr = (Point2d*)dst.data;
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
{
float scale = scaleFor(sptr[i].z);
fdptr[i] = Point2f(sptr[i].x*scale, sptr[i].y*scale);
}
else
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor((double)sptr[i].z);
ddptr[i] = Point2d(sptr[i].x*scale, sptr[i].y*scale);
}
}
else
{
const Vec4f* sptr = src.ptr<Vec4f>();
Point3f* dptr = dst.ptr<Point3f>();
for( i = 0; i < npoints; i++ )
{
float scale = scaleFor(sptr[i][3]);
dptr[i] = Point3f(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
Point3f* fdptr = (Point3f*)dst.data;
Point3d* ddptr = (Point3d*)dst.data;
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
{
float scale = scaleFor(sptr[i][3]);
fdptr[i] = Point3f(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
else
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor((double)sptr[i][3]);
ddptr[i] = Point3d(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
}
}
else if( depth == CV_64F )
@ -1088,22 +1117,38 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
if( cn == 3 )
{
const Point3d* sptr = src.ptr<Point3d>();
Point2d* dptr = dst.ptr<Point2d>();
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor(sptr[i].z);
dptr[i] = Point2d(sptr[i].x*scale, sptr[i].y*scale);
}
Point2f* fdptr = (Point2f*)dst.data;
Point2d* ddptr = (Point2d*)dst.data;
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor(sptr[i].z);
fdptr[i] = Point2f((float)(sptr[i].x*scale), (float)(sptr[i].y*scale));
}
else
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor(sptr[i].z);
ddptr[i] = Point2d(sptr[i].x*scale, sptr[i].y*scale);
}
}
else
{
const Vec4d* sptr = src.ptr<Vec4d>();
Point3d* dptr = dst.ptr<Point3d>();
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor(sptr[i][3]);
dptr[i] = Point3d(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
Point3f* fdptr = (Point3f*)dst.data;
Point3d* ddptr = (Point3d*)dst.data;
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor(sptr[i][3]);
fdptr[i] = Point3f((float)(sptr[i][0]*scale), (float)(sptr[i][1]*scale), (float)(sptr[i][2]*scale));
}
else
for( i = 0; i < npoints; i++ )
{
double scale = scaleFor(sptr[i][3]);
ddptr[i] = Point3d(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
}
}
}
else
@ -1111,7 +1156,7 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
}
void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
void convertPointsToHomogeneous( InputArray _src, OutputArray _dst, int dtype )
{
CV_INSTRUMENT_REGION();
@ -1127,7 +1172,15 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
}
CV_Assert( npoints >= 0 && (depth == CV_32S || depth == CV_32F || depth == CV_64F));
int dtype = CV_MAKETYPE(depth, cn+1);
int ddepth;
if (dtype < 0)
ddepth = depth;
else
{
ddepth = CV_MAT_DEPTH(dtype);
CV_Assert(ddepth == CV_32F || ddepth == CV_64F || (ddepth == depth));
}
dtype = CV_MAKETYPE(ddepth, cn+1);
_dst.create(npoints, 1, dtype);
Mat dst = _dst.getMat();
if( !dst.isContinuous() )
@ -1138,63 +1191,91 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
}
CV_Assert( dst.isContinuous() );
if( depth == CV_32S )
if( cn == 2 )
{
if( cn == 2 )
Point3i* idptr = (Point3i*)dst.data;
Point3f* fdptr = (Point3f*)dst.data;
Point3d* ddptr = (Point3d*)dst.data;
if( depth == CV_32S )
{
const Point2i* sptr = src.ptr<Point2i>();
Point3i* dptr = dst.ptr<Point3i>();
for( i = 0; i < npoints; i++ )
dptr[i] = Point3i(sptr[i].x, sptr[i].y, 1);
if (ddepth == CV_32S)
for( i = 0; i < npoints; i++ )
idptr[i] = Point3i(sptr[i].x, sptr[i].y, 1);
else if(ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
fdptr[i] = Point3f((float)sptr[i].x, (float)sptr[i].y, 1.f);
else
for( i = 0; i < npoints; i++ )
ddptr[i] = Point3d(sptr[i].x, sptr[i].y, 1.);
}
else
{
const Point3i* sptr = src.ptr<Point3i>();
Vec4i* dptr = dst.ptr<Vec4i>();
for( i = 0; i < npoints; i++ )
dptr[i] = Vec4i(sptr[i].x, sptr[i].y, sptr[i].z, 1);
}
}
else if( depth == CV_32F )
{
if( cn == 2 )
else if( depth == CV_32F )
{
const Point2f* sptr = src.ptr<Point2f>();
Point3f* dptr = dst.ptr<Point3f>();
for( i = 0; i < npoints; i++ )
dptr[i] = Point3f(sptr[i].x, sptr[i].y, 1.f);
if(ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
fdptr[i] = Point3f((float)sptr[i].x, (float)sptr[i].y, 1.f);
else
for( i = 0; i < npoints; i++ )
ddptr[i] = Point3d(sptr[i].x, sptr[i].y, 1.);
}
else
{
const Point3f* sptr = src.ptr<Point3f>();
Vec4f* dptr = dst.ptr<Vec4f>();
for( i = 0; i < npoints; i++ )
dptr[i] = Vec4f(sptr[i].x, sptr[i].y, sptr[i].z, 1.f);
const Point2d* sptr = src.ptr<Point2d>();
if(ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
fdptr[i] = Point3f((float)sptr[i].x, (float)sptr[i].y, 1.f);
else
for( i = 0; i < npoints; i++ )
ddptr[i] = Point3d(sptr[i].x, sptr[i].y, 1.);
}
}
else if( depth == CV_64F )
else
{
if( cn == 2 )
Vec4i* idptr = (Vec4i*)dst.data;
Vec4f* fdptr = (Vec4f*)dst.data;
Vec4d* ddptr = (Vec4d*)dst.data;
if (depth == CV_32S)
{
const Point2d* sptr = src.ptr<Point2d>();
Point3d* dptr = dst.ptr<Point3d>();
for( i = 0; i < npoints; i++ )
dptr[i] = Point3d(sptr[i].x, sptr[i].y, 1.);
const Point3i* sptr = src.ptr<Point3i>();
if (ddepth == CV_32S)
for( i = 0; i < npoints; i++ )
idptr[i] = Vec4i(sptr[i].x, sptr[i].y, sptr[i].z, 1);
else if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
fdptr[i] = Vec4f((float)sptr[i].x, (float)sptr[i].y, (float)sptr[i].z, 1.f);
else
for( i = 0; i < npoints; i++ )
ddptr[i] = Vec4d(sptr[i].x, sptr[i].y, sptr[i].z, 1.);
}
else if (depth == CV_32F)
{
const Point3f* sptr = src.ptr<Point3f>();
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
fdptr[i] = Vec4f(sptr[i].x, sptr[i].y, sptr[i].z, 1.f);
else
for( i = 0; i < npoints; i++ )
ddptr[i] = Vec4d(sptr[i].x, sptr[i].y, sptr[i].z, 1.);
}
else
{
const Point3d* sptr = src.ptr<Point3d>();
Vec4d* dptr = dst.ptr<Vec4d>();
for( i = 0; i < npoints; i++ )
dptr[i] = Vec4d(sptr[i].x, sptr[i].y, sptr[i].z, 1.);
if (ddepth == CV_32F)
for( i = 0; i < npoints; i++ )
fdptr[i] = Vec4f((float)sptr[i].x, (float)sptr[i].y, (float)sptr[i].z, 1.f);
else
for( i = 0; i < npoints; i++ )
ddptr[i] = Vec4d(sptr[i].x, sptr[i].y, sptr[i].z, 1.);
}
}
else
CV_Error(Error::StsUnsupportedFormat, "");
}
void cv::convertPointsHomogeneous( InputArray _src, OutputArray _dst )
void convertPointsHomogeneous( InputArray _src, OutputArray _dst )
{
CV_INSTRUMENT_REGION();
@ -1207,7 +1288,7 @@ void cv::convertPointsHomogeneous( InputArray _src, OutputArray _dst )
convertPointsToHomogeneous(_src, _dst);
}
double cv::sampsonDistance(InputArray _pt1, InputArray _pt2, InputArray _F)
double sampsonDistance(InputArray _pt1, InputArray _pt2, InputArray _F)
{
CV_INSTRUMENT_REGION();
@ -1230,4 +1311,6 @@ double cv::sampsonDistance(InputArray _pt1, InputArray _pt2, InputArray _F)
return v*v / (F_pt1[0] + F_pt1[1] + Ft_pt2[0] + Ft_pt2[1]);
}
}
/* End of file. */

View File

@ -50,8 +50,7 @@
#include "precomp.hpp"
#include <memory>
namespace cv
{
namespace cv {
namespace HomographyDecomposition
{

View File

@ -1097,4 +1097,4 @@ void homographyHO(InputArray _srcPoints, InputArray _targPoints, Matx33d& H)
H = H * h22_inv;
}
}
} //namespace cv
} //namespace cv::_3d

View File

@ -255,5 +255,5 @@ void homographyHO(InputArray srcPoints, InputArray targPoints, Matx33d& H);
void normalizeDataIsotropic(InputArray Data, OutputArray DataN, OutputArray T, OutputArray Ti);
}
} //namespace cv
} //namespace cv::_3d
#endif

View File

@ -74,8 +74,7 @@
POSSIBILITY OF SUCH DAMAGE.
*/
namespace cv
{
namespace cv {
class LMSolverImpl CV_FINAL : public LMSolver
{

View File

@ -1,3 +1,7 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <cstring>
#include <cmath>
#include <iostream>
@ -5,6 +9,8 @@
#include "polynom_solver.h"
#include "p3p.h"
namespace cv {
void p3p::init_inverse_parameters()
{
inv_fx = 1. / fx;
@ -464,3 +470,5 @@ bool p3p::jacobi_4x4(double * A, double * D, double * U)
return false;
}
}

View File

@ -1,9 +1,14 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef P3P_H
#define P3P_H
#include "precomp.hpp"
namespace cv {
class p3p
{
public:
@ -68,4 +73,6 @@ class p3p
double inv_fx, inv_fy, cx_fx, cy_fy;
};
}
#endif // P3P_H

View File

@ -1,9 +1,15 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "polynom_solver.h"
#include <math.h>
#include <iostream>
namespace cv {
int solve_deg2(double a, double b, double c, double & x1, double & x2)
{
double delta = b * b - 4 * a * c;
@ -168,3 +174,5 @@ int solve_deg4(double a, double b, double c, double d, double e,
return nb_real_roots;
}
}

View File

@ -1,6 +1,12 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef POLYNOM_SOLVER_H
#define POLYNOM_SOLVER_H
namespace cv {
int solve_deg2(double a, double b, double c, double & x1, double & x2);
int solve_deg3(double a, double b, double c, double d,
@ -9,4 +15,6 @@ int solve_deg3(double a, double b, double c, double d,
int solve_deg4(double a, double b, double c, double d, double e,
double & x0, double & x1, double & x2, double & x3);
}
#endif // POLYNOM_SOLVER_H

143
modules/3d/src/precomp.hpp Normal file
View File

@ -0,0 +1,143 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/core/ocl.hpp"
#define GET_OPTIMIZED(func) (func)
namespace cv {
/**
* Compute the number of iterations given the confidence, outlier ratio, number
* of model points and the maximum iteration number.
*
* @param p confidence value
* @param ep outlier ratio
* @param modelPoints number of model points required for estimation
* @param maxIters maximum number of iterations
* @return
* \f[
* \frac{\ln(1-p)}{\ln\left(1-(1-ep)^\mathrm{modelPoints}\right)}
* \f]
*
* If the computed number of iterations is larger than maxIters, then maxIters is returned.
*/
int RANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters );
class CV_EXPORTS PointSetRegistrator : public Algorithm
{
public:
class CV_EXPORTS Callback
{
public:
virtual ~Callback() {}
virtual int runKernel(InputArray m1, InputArray m2, OutputArray model) const = 0;
virtual void computeError(InputArray m1, InputArray m2, InputArray model, OutputArray err) const = 0;
virtual bool checkSubset(InputArray, InputArray, int) const { return true; }
};
virtual void setCallback(const Ptr<PointSetRegistrator::Callback>& cb) = 0;
virtual bool run(InputArray m1, InputArray m2, OutputArray model, OutputArray mask) const = 0;
};
CV_EXPORTS Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& cb,
int modelPoints, double threshold,
double confidence=0.99, int maxIters=1000 );
CV_EXPORTS Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& cb,
int modelPoints, double confidence=0.99, int maxIters=1000 );
template<typename T> inline int compressElems( T* ptr, const uchar* mask, int mstep, int count )
{
int i, j;
for( i = j = 0; i < count; i++ )
if( mask[i*mstep] )
{
if( i > j )
ptr[j] = ptr[i];
j++;
}
return j;
}
static inline bool haveCollinearPoints( const Mat& m, int count )
{
int j, k, i = count-1;
const Point2f* ptr = m.ptr<Point2f>();
// check that the i-th selected point does not belong
// to a line connecting some previously selected points
// also checks that points are not too close to each other
for( j = 0; j < i; j++ )
{
double dx1 = ptr[j].x - ptr[i].x;
double dy1 = ptr[j].y - ptr[i].y;
for( k = 0; k < j; k++ )
{
double dx2 = ptr[k].x - ptr[i].x;
double dy2 = ptr[k].y - ptr[i].y;
if( fabs(dx2*dy1 - dy2*dx1) <= FLT_EPSILON*(fabs(dx1) + fabs(dy1) + fabs(dx2) + fabs(dy2)))
return true;
}
}
return false;
}
void findExtrinsicCameraParams2( const Mat& objectPoints,
const Mat& imagePoints, const Mat& A,
const Mat& distCoeffs, Mat& rvec, Mat& tvec,
int useExtrinsicGuess );
} // namespace cv
#endif

View File

@ -49,8 +49,7 @@
#include "usac.hpp"
namespace cv
{
namespace cv {
int RANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
{
@ -931,7 +930,7 @@ Mat estimateAffine2D(InputArray _from, InputArray _to, OutputArray _inliers,
{
if (method >= USAC_DEFAULT && method <= USAC_MAGSAC)
return cv::usac::estimateAffine2D(_from, _to, _inliers, method,
return usac::estimateAffine2D(_from, _to, _inliers, method,
ransacReprojThreshold, (int)maxIters, confidence, (int)refineIters);
Mat from = _from.getMat(), to = _to.getMat();

View File

@ -57,12 +57,8 @@
#include "rho.h"
#include "opencv2/core/utils/buffer_area.private.hpp"
/* For the sake of cv:: namespace ONLY: */
namespace cv{/* For C support, replace with extern "C" { */
namespace cv {
/* Constants */
const size_t HSIZE = (3*3*sizeof(float));

View File

@ -47,18 +47,11 @@
#ifndef __OPENCV_RHO_H__
#define __OPENCV_RHO_H__
/* Includes */
#include <opencv2/core.hpp>
/* Defines */
/* Flags */
#ifndef RHO_FLAG_NONE
#define RHO_FLAG_NONE (0U<<0)
@ -73,10 +66,8 @@
#define RHO_FLAG_ENABLE_FINAL_REFINEMENT (1U<<2)
#endif
/* Namespace cv */
namespace cv{
namespace cv {
/* Data structures */
@ -255,13 +246,7 @@ unsigned rhoHest(Ptr<RHO_HEST> p, /* Homography estimation context. */
const float* guessH, /* Extrinsic guess, NULL if none provided */
float* finalH); /* Final result. */
/* End Namespace cv */
}
#endif

View File

@ -48,14 +48,14 @@
#include "ap3p.h"
#include "ippe.hpp"
#include "sqpnp.hpp"
#include "calib3d_c_api.h"
#include "usac.hpp"
#include <opencv2/core/utils/logger.hpp>
namespace cv
{
namespace cv {
using namespace std;
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
static bool isPlanarObjectPoints(InputArray _objectPoints, double threshold)
{
@ -790,7 +790,7 @@ void solvePnPRefineVVS(InputArray _objectPoints, InputArray _imagePoints,
int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
bool useExtrinsicGuess, SolvePnPMethod flags,
bool useExtrinsicGuess, int flags,
InputArray _rvec, InputArray _tvec,
OutputArray reprojectionError) {
CV_INSTRUMENT_REGION();
@ -869,13 +869,8 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
tvec.create(3, 1, CV_64FC1);
}
CvMat c_objectPoints = cvMat(opoints), c_imagePoints = cvMat(ipoints);
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
(c_distCoeffs.rows && c_distCoeffs.cols) ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
findExtrinsicCameraParams2(opoints, ipoints, cameraMatrix, distCoeffs,
rvec, tvec, useExtrinsicGuess );
vec_rvecs.push_back(rvec);
vec_tvecs.push_back(tvec);
}

View File

@ -39,8 +39,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "precomp.hpp"
#include "sqpnp.hpp"
#include <opencv2/calib3d.hpp>
namespace cv {
namespace sqpnp {

View File

@ -42,6 +42,8 @@
#include "precomp.hpp"
#include "opencv2/core/core_c.h"
namespace cv {
// cvCorrectMatches function is Copyright (C) 2009, Jostein Austvik Jacobsen.
// cvTriangulatePoints function is derived from icvReconstructPointsFor3View, originally by Valery Mosyagin.
@ -343,7 +345,7 @@ icvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points
cvConvert( points2, new_points2 );
}
void cv::triangulatePoints( InputArray _projMatr1, InputArray _projMatr2,
void triangulatePoints( InputArray _projMatr1, InputArray _projMatr2,
InputArray _projPoints1, InputArray _projPoints2,
OutputArray _points4D )
{
@ -368,7 +370,7 @@ void cv::triangulatePoints( InputArray _projMatr1, InputArray _projMatr2,
icvTriangulatePoints(&cvMatr1, &cvMatr2, &cvPoints1, &cvPoints2, &cvPoints4D);
}
void cv::correctMatches( InputArray _F, InputArray _points1, InputArray _points2,
void correctMatches( InputArray _F, InputArray _points1, InputArray _points2,
OutputArray _newPoints1, OutputArray _newPoints2 )
{
CV_INSTRUMENT_REGION();
@ -386,3 +388,5 @@ void cv::correctMatches( InputArray _F, InputArray _points1, InputArray _points2
icvCorrectMatches(&cvF, &cvPoints1, &cvPoints2, &cvNewPoints1, &cvNewPoints2);
}
}

View File

@ -43,13 +43,10 @@
#include "precomp.hpp"
#include "distortion_model.hpp"
#include "calib3d_c_api.h"
#include "undistort.simd.hpp"
#include "undistort.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv
{
namespace cv {
Mat getDefaultNewCameraMatrix( InputArray _cameraMatrix, Size imgsize,
bool centerPrincipalPoint )
@ -68,8 +65,7 @@ Mat getDefaultNewCameraMatrix( InputArray _cameraMatrix, Size imgsize,
return newCameraMatrix;
}
namespace {
Ptr<ParallelLoopBody> getInitUndistortRectifyMapComputer(Size _size, Mat &_map1, Mat &_map2, int _m1type,
static Ptr<ParallelLoopBody> getInitUndistortRectifyMapComputer(Size _size, Mat &_map1, Mat &_map2, int _m1type,
const double* _ir, Matx33d &_matTilt,
double _u0, double _v0, double _fx, double _fy,
double _k1, double _k2, double _p1, double _p2,
@ -81,7 +77,6 @@ Ptr<ParallelLoopBody> getInitUndistortRectifyMapComputer(Size _size, Mat &_map1,
CV_CPU_DISPATCH(getInitUndistortRectifyMapComputer, (_size, _map1, _map2, _m1type, _ir, _matTilt, _u0, _v0, _fx, _fy, _k1, _k2, _p1, _p2, _k3, _k4, _k5, _k6, _s1, _s2, _s3, _s4),
CV_CPU_DISPATCH_MODES_ALL);
}
}
void initUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs,
InputArray _matR, InputArray _newCameraMatrix,
@ -157,7 +152,7 @@ void initUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs,
// Matrix for trapezoidal distortion of tilted image sensor
Matx33d matTilt = Matx33d::eye();
detail::computeTiltProjectionMatrix(tauX, tauY, &matTilt);
computeTiltProjectionMatrix(tauX, tauY, &matTilt);
parallel_for_(Range(0, size.height), *getInitUndistortRectifyMapComputer(
size, map1, map2, m1type, ir, matTilt, u0, v0,
@ -212,126 +207,74 @@ void undistort( InputArray _src, OutputArray _dst, InputArray _cameraMatrix,
}
}
}
CV_IMPL void
cvUndistort2( const CvArr* srcarr, CvArr* dstarr, const CvMat* Aarr, const CvMat* dist_coeffs, const CvMat* newAarr )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs), newA;
if( newAarr )
newA = cv::cvarrToMat(newAarr);
CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
cv::undistort( src, dst, A, distCoeffs, newA );
}
CV_IMPL void cvInitUndistortMap( const CvMat* Aarr, const CvMat* dist_coeffs,
CvArr* mapxarr, CvArr* mapyarr )
{
cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs);
cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;
if( mapyarr )
mapy0 = mapy = cv::cvarrToMat(mapyarr);
cv::initUndistortRectifyMap( A, distCoeffs, cv::Mat(), A,
mapx.size(), mapx.type(), mapx, mapy );
CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );
}
void
cvInitUndistortRectifyMap( const CvMat* Aarr, const CvMat* dist_coeffs,
const CvMat *Rarr, const CvMat* ArArr, CvArr* mapxarr, CvArr* mapyarr )
{
cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs, R, Ar;
cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0;
if( mapyarr )
mapy0 = mapy = cv::cvarrToMat(mapyarr);
if( dist_coeffs )
distCoeffs = cv::cvarrToMat(dist_coeffs);
if( Rarr )
R = cv::cvarrToMat(Rarr);
if( ArArr )
Ar = cv::cvarrToMat(ArArr);
cv::initUndistortRectifyMap( A, distCoeffs, R, Ar, mapx.size(), mapx.type(), mapx, mapy );
CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data );
}
static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
const CvMat* _distCoeffs,
const CvMat* matR, const CvMat* matP, cv::TermCriteria criteria)
static void undistortPointsInternal( const Mat& _src, Mat& _dst, const Mat& _cameraMatrix,
const Mat& _distCoeffs, const Mat& matR, const Mat& matP, TermCriteria criteria)
{
CV_Assert(criteria.isValid());
double A[3][3], RR[3][3], k[14]={0,0,0,0,0,0,0,0,0,0,0,0,0,0};
CvMat matA=cvMat(3, 3, CV_64F, A), _Dk;
CvMat _RR=cvMat(3, 3, CV_64F, RR);
Mat matA(3, 3, CV_64F, A), _Dk;
Mat _RR(3, 3, CV_64F, RR);
cv::Matx33d invMatTilt = cv::Matx33d::eye();
cv::Matx33d matTilt = cv::Matx33d::eye();
bool haveDistCoeffs = !_distCoeffs.empty();
CV_Assert( CV_IS_MAT(_src) && CV_IS_MAT(_dst) &&
(_src->rows == 1 || _src->cols == 1) &&
(_dst->rows == 1 || _dst->cols == 1) &&
_src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 &&
(CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) &&
(CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2));
CV_Assert( (_src.rows == 1 || _src.cols == 1) &&
(_dst.rows == 1 || _dst.cols == 1) &&
_src.cols + _src.rows - 1 == _dst.rows + _dst.cols - 1 &&
(_src.type() == CV_32FC2 || _src.type() == CV_64FC2) &&
(_dst.type() == CV_32FC2 || _dst.type() == CV_64FC2));
CV_Assert( CV_IS_MAT(_cameraMatrix) &&
_cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 );
CV_Assert( _cameraMatrix.rows == 3 && _cameraMatrix.cols == 3 && _cameraMatrix.channels() == 1 );
_cameraMatrix.convertTo(matA, CV_64F);
cvConvert( _cameraMatrix, &matA );
if( _distCoeffs )
if( haveDistCoeffs )
{
CV_Assert( CV_IS_MAT(_distCoeffs) &&
(_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
(_distCoeffs->rows*_distCoeffs->cols == 4 ||
_distCoeffs->rows*_distCoeffs->cols == 5 ||
_distCoeffs->rows*_distCoeffs->cols == 8 ||
_distCoeffs->rows*_distCoeffs->cols == 12 ||
_distCoeffs->rows*_distCoeffs->cols == 14));
CV_Assert(
(_distCoeffs.rows == 1 || _distCoeffs.cols == 1) &&
(_distCoeffs.rows*_distCoeffs.cols == 4 ||
_distCoeffs.rows*_distCoeffs.cols == 5 ||
_distCoeffs.rows*_distCoeffs.cols == 8 ||
_distCoeffs.rows*_distCoeffs.cols == 12 ||
_distCoeffs.rows*_distCoeffs.cols == 14));
_Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);
cvConvert( _distCoeffs, &_Dk );
_Dk = Mat( _distCoeffs.rows, _distCoeffs.cols,
CV_MAKETYPE(CV_64F,_distCoeffs.channels()), k);
_distCoeffs.convertTo(_Dk, CV_64F);
CV_Assert(_Dk.ptr<double>() == k);
if (k[12] != 0 || k[13] != 0)
{
cv::detail::computeTiltProjectionMatrix<double>(k[12], k[13], NULL, NULL, NULL, &invMatTilt);
cv::detail::computeTiltProjectionMatrix<double>(k[12], k[13], &matTilt, NULL, NULL);
computeTiltProjectionMatrix<double>(k[12], k[13], NULL, NULL, NULL, &invMatTilt);
computeTiltProjectionMatrix<double>(k[12], k[13], &matTilt, NULL, NULL);
}
}
if( matR )
if( !matR.empty() )
{
CV_Assert( CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3 );
cvConvert( matR, &_RR );
CV_Assert( matR.rows == 3 && matR.cols == 3 && matR.channels() == 1 );
matR.convertTo(_RR, CV_64F);
CV_Assert(_RR.ptr<double>() == &RR[0][0]);
}
else
cvSetIdentity(&_RR);
setIdentity(_RR);
if( matP )
if( !matP.empty() )
{
double PP[3][3];
CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP);
CV_Assert( CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4));
cvConvert( cvGetCols(matP, &_P3x3, 0, 3), &_PP );
cvMatMul( &_PP, &_RR, &_RR );
Mat _PP(3, 3, CV_64F, PP);
CV_Assert( matP.rows == 3 && (matP.cols == 3 || matP.cols == 4));
matP.colRange(0, 3).convertTo(_PP, CV_64F);
CV_Assert(_PP.ptr<double>() == &PP[0][0]);
_RR = _PP*_RR;
}
const CvPoint2D32f* srcf = (const CvPoint2D32f*)_src->data.ptr;
const CvPoint2D64f* srcd = (const CvPoint2D64f*)_src->data.ptr;
CvPoint2D32f* dstf = (CvPoint2D32f*)_dst->data.ptr;
CvPoint2D64f* dstd = (CvPoint2D64f*)_dst->data.ptr;
int stype = CV_MAT_TYPE(_src->type);
int dtype = CV_MAT_TYPE(_dst->type);
int sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype);
int dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype);
const Point2f* srcf = (const Point2f*)_src.data;
const Point2d* srcd = (const Point2d*)_src.data;
Point2f* dstf = (Point2f*)_dst.data;
Point2d* dstd = (Point2d*)_dst.data;
int stype = _src.type();
int dtype = _dst.type();
int sstep = _src.rows == 1 ? 1 : (int)(_src.step/_src.elemSize());
int dstep = _dst.rows == 1 ? 1 : (int)(_dst.step/_dst.elemSize());
double fx = A[0][0];
double fy = A[1][1];
@ -340,7 +283,7 @@ static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvM
double cx = A[0][2];
double cy = A[1][2];
int n = _src->rows + _src->cols - 1;
int n = _src.rows + _src.cols - 1;
for( int i = 0; i < n; i++ )
{
double x, y, x0 = 0, y0 = 0, u, v;
@ -358,7 +301,7 @@ static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvM
x = (x - cx)*ifx;
y = (y - cy)*ify;
if( _distCoeffs ) {
if( haveDistCoeffs ) {
// compensate tilt distortion
cv::Vec3d vecUntilt = invMatTilt * cv::Vec3d(x, y, 1);
double invProj = vecUntilt(2) ? 1./vecUntilt(2) : 1;
@ -370,9 +313,9 @@ static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvM
for( int j = 0; ; j++ )
{
if ((criteria.type & cv::TermCriteria::COUNT) && j >= criteria.maxCount)
if ((criteria.type & TermCriteria::COUNT) && j >= criteria.maxCount)
break;
if ((criteria.type & cv::TermCriteria::EPS) && error < criteria.epsilon)
if ((criteria.type & TermCriteria::EPS) && error < criteria.epsilon)
break;
double r2 = x*x + y*y;
double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
@ -387,11 +330,11 @@ static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvM
x = (x0 - deltaX)*icdist;
y = (y0 - deltaY)*icdist;
if(criteria.type & cv::TermCriteria::EPS)
if(criteria.type & TermCriteria::EPS)
{
double r4, r6, a1, a2, a3, cdist, icdist2;
double xd, yd, xd0, yd0;
cv::Vec3d vecTilt;
Vec3d vecTilt;
r2 = x*x + y*y;
r4 = r2*r2;
@ -436,25 +379,6 @@ static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvM
}
}
void cvUndistortPoints(const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
const CvMat* _distCoeffs,
const CvMat* matR, const CvMat* matP)
{
cvUndistortPointsInternal(_src, _dst, _cameraMatrix, _distCoeffs, matR, matP,
cv::TermCriteria(cv::TermCriteria::COUNT, 5, 0.01));
}
namespace cv {
void undistortPoints(InputArray _src, OutputArray _dst,
InputArray _cameraMatrix,
InputArray _distCoeffs,
InputArray _Rmat,
InputArray _Pmat)
{
undistortPoints(_src, _dst, _cameraMatrix, _distCoeffs, _Rmat, _Pmat, TermCriteria(TermCriteria::MAX_ITER, 5, 0.01));
}
void undistortPoints(InputArray _src, OutputArray _dst,
InputArray _cameraMatrix,
InputArray _distCoeffs,
@ -477,15 +401,7 @@ void undistortPoints(InputArray _src, OutputArray _dst,
_dst.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
Mat dst = _dst.getMat();
CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _ccameraMatrix = cvMat(cameraMatrix);
CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0;
if( !R.empty() )
pR = &(matR = cvMat(R));
if( !P.empty() )
pP = &(matP = cvMat(P));
if( !distCoeffs.empty() )
pD = &(_cdistCoeffs = cvMat(distCoeffs));
cvUndistortPointsInternal(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP, criteria);
undistortPointsInternal(src, dst, cameraMatrix, distCoeffs, R, P, criteria);
}
static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, enum UndistortTypes projType)
@ -599,8 +515,8 @@ float initWideAngleProjMap(InputArray _cameraMatrix0, InputArray _distCoeffs0,
Mat mapxy(dsize, CV_32FC2);
double k1 = k[0], k2 = k[1], k3 = k[2], p1 = k[3], p2 = k[4], k4 = k[5], k5 = k[6], k6 = k[7], s1 = k[8], s2 = k[9], s3 = k[10], s4 = k[11];
double fx = cameraMatrix.at<double>(0,0), fy = cameraMatrix.at<double>(1,1), cx = scenter.x, cy = scenter.y;
cv::Matx33d matTilt;
cv::detail::computeTiltProjectionMatrix(k[12], k[13], &matTilt);
Matx33d matTilt;
computeTiltProjectionMatrix(k[12], k[13], &matTilt);
for( int y = 0; y < dsize.height; y++ )
{
@ -641,5 +557,5 @@ float initWideAngleProjMap(InputArray _cameraMatrix0, InputArray _distCoeffs0,
return scale;
}
} // namespace
}
/* End of file */

View File

@ -337,4 +337,4 @@ public:
Ptr<EssentialNonMinimalSolver> EssentialNonMinimalSolver::create (const Mat &points_) {
return makePtr<EssentialNonMinimalSolverImpl>(points_);
}
}}
}}

View File

@ -335,4 +335,4 @@ public:
Ptr<AffineNonMinimalSolver> AffineNonMinimalSolver::create(const Mat &points_) {
return makePtr<AffineNonMinimalSolverImpl>(points_);
}
}}
}}

View File

@ -377,4 +377,4 @@ public:
Ptr<P3PSolver> P3PSolver::create(const Mat &points_, const Mat &calib_norm_pts, const Mat &K) {
return makePtr<P3PSolverImpl>(points_, calib_norm_pts, K);
}
}}
}}

View File

@ -584,4 +584,4 @@ Ptr<SPRT> SPRT::create (int state, const Ptr<Error> &err_, int points_size_,
return makePtr<SPRTImpl>(state, err_, points_size_, inlier_threshold_,
prob_pt_of_good_model, prob_pt_of_bad_model, time_sample, avg_num_models, score_type_);
}
}}
}}

View File

@ -530,4 +530,4 @@ Ptr<GridNeighborhoodGraph> GridNeighborhoodGraph::create(const Mat &points,
return makePtr<GridNeighborhoodGraphImpl>(points, points_size,
cell_size_x_img1_, cell_size_y_img1_, cell_size_x_img2_, cell_size_y_img2_, max_neighbors);
}
}}
}}

View File

@ -41,7 +41,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/3d.hpp"
namespace opencv_test { namespace {

View File

@ -41,7 +41,6 @@
#include "test_precomp.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/calib3d/calib3d_c.h"
namespace cvtest {
@ -826,13 +825,13 @@ void CV_FundamentalMatTest::get_test_array_types_and_sizes( int /*test_case_idx*
dims = cvtest::randInt(rng) % 2 + 2;
method = 1 << (cvtest::randInt(rng) % 4);
if( method == CV_FM_7POINT )
if( method == FM_7POINT )
pt_count = 7;
else
{
pt_count = MAX( pt_count, 8 + (method == CV_FM_8POINT) );
pt_count = MAX( pt_count, 8 + (method == FM_8POINT) );
if( pt_count >= 8 && cvtest::randInt(rng) % 2 )
method |= CV_FM_8POINT;
method |= FM_8POINT;
}
types[INPUT][0] = CV_MAKETYPE(pt_depth, 1);
@ -1012,7 +1011,7 @@ void CV_FundamentalMatTest::prepare_to_validation( int test_case_idx )
cvtest::convert(Fsrc, F, F.type());
if( method <= CV_FM_8POINT )
if( method <= FM_8POINT )
memset( status, 1, pt_count );
for( i = 0; i < pt_count; i++ )
@ -1125,7 +1124,7 @@ void CV_EssentialMatTest::get_test_array_types_and_sizes( int /*test_case_idx*/,
dims = cvtest::randInt(rng) % 2 + 2;
dims = 2;
method = CV_LMEDS << (cvtest::randInt(rng) % 2);
method = LMEDS << (cvtest::randInt(rng) % 2);
types[INPUT][0] = CV_MAKETYPE(pt_depth, 1);
@ -1389,7 +1388,7 @@ void CV_EssentialMatTest::prepare_to_validation( int test_case_idx )
pose_prop1[0] = 0;
// No check for CV_LMeDS on translation. Since it
// involves with some degraded problem, when data is exact inliers.
pose_prop2[0] = method == CV_LMEDS || pt_count == 5 ? 0 : MIN(terr1, terr2);
pose_prop2[0] = method == LMEDS || pt_count == 5 ? 0 : MIN(terr1, terr2);
// int inliers_count = countNonZero(test_mat[TEMP][1]);

View File

@ -42,6 +42,7 @@
//M*/
#include "test_precomp.hpp"
#include <opencv2/features2d.hpp>
namespace opencv_test { namespace {

View File

@ -8,7 +8,7 @@
#include <numeric>
#include "opencv2/ts.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/3d.hpp"
namespace cvtest
{

View File

@ -711,7 +711,7 @@ void CV_InitUndistortRectifyMapTest::run_func()
cv::Mat R = zero_R ? cv::Mat() : test_mat[INPUT][2];
cv::Mat new_cam = zero_new_cam ? cv::Mat() : test_mat[INPUT][3];
cv::Mat& mapx = test_mat[OUTPUT][0], &mapy = test_mat[OUTPUT][1];
cv::initUndistortRectifyMap(camera_mat,dist,R,new_cam,img_size,map_type,mapx,mapy);
initUndistortRectifyMap(camera_mat,dist,R,new_cam,img_size,map_type,mapx,mapy);
}
double CV_InitUndistortRectifyMapTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ )

View File

@ -20,7 +20,7 @@ foreach(mod ${OPENCV_MODULES_BUILD} ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MOD
endforeach()
ocv_list_sort(OPENCV_MODULES_MAIN)
ocv_list_sort(OPENCV_MODULES_EXTRA)
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect dnn ml flann photo stitching)
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video 3d stereo features2d calib objdetect dnn ml flann photo stitching)
list(REMOVE_ITEM OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES})
set(OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES} ${OPENCV_MODULES_MAIN})

View File

@ -0,0 +1,9 @@
set(the_description "Camera Calibration and 3D Reconstruction")
set(debug_modules "")
if(DEBUG_opencv_calib)
list(APPEND debug_modules opencv_highgui)
endif()
ocv_define_module(calib opencv_imgproc opencv_features2d opencv_flann opencv_3d opencv_stereo ${debug_modules}
WRAP java objc python js
)

View File

@ -0,0 +1,12 @@
@inproceedings{strobl2011iccv,
title={More accurate pinhole camera calibration with imperfect planar target},
author={Strobl, Klaus H. and Hirzinger, Gerd},
booktitle={2011 IEEE International Conference on Computer Vision (ICCV)},
pages={1068-1075},
month={Nov},
year={2011},
address={Barcelona, Spain},
publisher={IEEE},
url={https://elib.dlr.de/71888/1/strobl_2011iccv.pdf},
doi={10.1109/ICCVW.2011.6130369}
}

View File

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 33 KiB

Some files were not shown because too many files have changed in this diff Show More