mirror of
https://github.com/opencv/opencv.git
synced 2024-11-27 20:50:25 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
166ecaeda8
@ -252,8 +252,8 @@ OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BL
|
||||
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" WITH_CUDA
|
||||
VISIBLE_IF WITH_CUDA
|
||||
VERIFY HAVE_NVCUVID)
|
||||
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" (NOT CV_DISABLE_OPTIMIZATION)
|
||||
VISIBLE_IF NOT WINRT AND NOT CMAKE_CROSSCOMPILING
|
||||
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" (NOT CV_DISABLE_OPTIMIZATION AND NOT CMAKE_CROSSCOMPILING)
|
||||
VISIBLE_IF NOT WINRT
|
||||
VERIFY HAVE_EIGEN)
|
||||
OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" (NOT ANDROID)
|
||||
VISIBLE_IF NOT IOS AND NOT WINRT
|
||||
|
@ -153,14 +153,14 @@ void CvHaarEvaluator::generateFeatures()
|
||||
{
|
||||
features.push_back( Feature( offset, false,
|
||||
x, y, dx*3, dy, -1,
|
||||
x+dx, y, dx , dy, +3 ) );
|
||||
x+dx, y, dx , dy, +2 ) );
|
||||
}
|
||||
// haar_y3
|
||||
if ( (x+dx <= winSize.width) && (y+dy*3 <= winSize.height) )
|
||||
{
|
||||
features.push_back( Feature( offset, false,
|
||||
x, y, dx, dy*3, -1,
|
||||
x, y+dy, dx, dy, +3 ) );
|
||||
x, y+dy, dx, dy, +2 ) );
|
||||
}
|
||||
if( mode != CvHaarFeatureParams::BASIC )
|
||||
{
|
||||
|
@ -31,27 +31,33 @@ macro(ocv_lapack_check)
|
||||
else()
|
||||
# adding proxy opencv_lapack.h header
|
||||
set(CBLAS_H_PROXY_PATH ${CMAKE_BINARY_DIR}/opencv_lapack.h)
|
||||
if((APPLE OR OPENCV_SKIP_LAPACK_EXTERN_C) AND NOT OPENCV_FORCE_LAPACK_EXTERN_C)
|
||||
set(_lapack_include_str_extern_C "")
|
||||
set(_lapack_include_str_extern_C_end "")
|
||||
else()
|
||||
set(_lapack_include_str_extern_C "extern \"C\" {\n")
|
||||
set(_lapack_include_str_extern_C_end "}\n")
|
||||
|
||||
set(_lapack_add_extern_c NOT (APPLE OR OPENCV_SKIP_LAPACK_EXTERN_C) OR OPENCV_FORCE_LAPACK_EXTERN_C)
|
||||
|
||||
set(_lapack_content "// This file is auto-generated\n")
|
||||
if(${_lapack_add_extern_c})
|
||||
list(APPEND _lapack_content "extern \"C\" {")
|
||||
endif()
|
||||
set(_lapack_include_str "${_lapack_include_str_extern_C}\#include \"${OPENCV_CBLAS_H_PATH_${_lapack_impl}}\"")
|
||||
if(NOT OPENCV_SKIP_LAPACK_MSVC_FIX)
|
||||
list(APPEND _lapack_content "
|
||||
#ifdef _MSC_VER
|
||||
#include <complex.h>
|
||||
#define lapack_complex_float _Fcomplex
|
||||
#define lapack_complex_double _Dcomplex
|
||||
#endif
|
||||
")
|
||||
endif()
|
||||
list(APPEND _lapack_content "#include \"${OPENCV_CBLAS_H_PATH_${_lapack_impl}}\"")
|
||||
if(NOT "${OPENCV_CBLAS_H_PATH_${_lapack_impl}}" STREQUAL "${OPENCV_LAPACKE_H_PATH_${_lapack_impl}}")
|
||||
set(_lapack_include_str "${_lapack_include_str}\n#include \"${OPENCV_LAPACKE_H_PATH_${_lapack_impl}}\"")
|
||||
list(APPEND _lapack_content "#include \"${OPENCV_LAPACKE_H_PATH_${_lapack_impl}}\"")
|
||||
endif()
|
||||
set(_lapack_include_str "${_lapack_include_str}\n${_lapack_include_str_extern_C_end}")
|
||||
# update file contents (if required)
|
||||
set(__content_str "")
|
||||
if(EXISTS "${CBLAS_H_PROXY_PATH}")
|
||||
file(READ "${CBLAS_H_PROXY_PATH}" __content_str)
|
||||
endif()
|
||||
if(NOT " ${__content_str}" STREQUAL " ${_lapack_include_str}")
|
||||
file(WRITE "${CBLAS_H_PROXY_PATH}" "${_lapack_include_str}")
|
||||
if(${_lapack_add_extern_c})
|
||||
list(APPEND _lapack_content "}")
|
||||
endif()
|
||||
|
||||
string(REPLACE ";" "\n" _lapack_content "${_lapack_content}")
|
||||
ocv_update_file("${CBLAS_H_PROXY_PATH}" "${_lapack_content}")
|
||||
|
||||
try_compile(__VALID_LAPACK
|
||||
"${OpenCV_BINARY_DIR}"
|
||||
"${OpenCV_SOURCE_DIR}/cmake/checks/lapack_check.cpp"
|
||||
|
@ -64,7 +64,7 @@ if(WITH_GTK AND NOT HAVE_QT)
|
||||
if(WITH_OPENGL AND NOT HAVE_GTK3)
|
||||
ocv_check_modules(GTKGLEXT gtkglext-1.0)
|
||||
if(HAVE_GTKGLEXT)
|
||||
ocv_append_build_options(GTKGLEXT GTHREAD)
|
||||
ocv_append_build_options(HIGHGUI GTKGLEXT)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@ -40,19 +40,67 @@ To eliminate this warning remove WITH_CUDA=ON CMake configuration option.
|
||||
endif(WITH_CUDA)
|
||||
|
||||
# --- Eigen ---
|
||||
if(WITH_EIGEN)
|
||||
find_path(EIGEN_INCLUDE_PATH "Eigen/Core"
|
||||
PATHS /usr/local /opt /usr $ENV{EIGEN_ROOT}/include ENV ProgramFiles ENV ProgramW6432
|
||||
PATH_SUFFIXES include/eigen3 include/eigen2 Eigen/include/eigen3 Eigen/include/eigen2
|
||||
DOC "The path to Eigen3/Eigen2 headers"
|
||||
CMAKE_FIND_ROOT_PATH_BOTH)
|
||||
if(WITH_EIGEN AND NOT HAVE_EIGEN)
|
||||
find_package(Eigen3 QUIET)
|
||||
|
||||
if(EIGEN_INCLUDE_PATH)
|
||||
ocv_include_directories(${EIGEN_INCLUDE_PATH})
|
||||
ocv_parse_header("${EIGEN_INCLUDE_PATH}/Eigen/src/Core/util/Macros.h" EIGEN_VERSION_LINES EIGEN_WORLD_VERSION EIGEN_MAJOR_VERSION EIGEN_MINOR_VERSION)
|
||||
set(HAVE_EIGEN 1)
|
||||
if(Eigen3_FOUND)
|
||||
if(TARGET Eigen3::Eigen)
|
||||
# Use Eigen3 imported target if possible
|
||||
list(APPEND OPENCV_LINKER_LIBS Eigen3::Eigen)
|
||||
set(HAVE_EIGEN 1)
|
||||
else()
|
||||
if(DEFINED EIGEN3_INCLUDE_DIRS)
|
||||
set(EIGEN_INCLUDE_PATH ${EIGEN3_INCLUDE_DIRS})
|
||||
set(HAVE_EIGEN 1)
|
||||
elseif(DEFINED EIGEN3_INCLUDE_DIR)
|
||||
set(EIGEN_INCLUDE_PATH ${EIGEN3_INCLUDE_DIR})
|
||||
set(HAVE_EIGEN 1)
|
||||
endif()
|
||||
endif()
|
||||
if(HAVE_EIGEN)
|
||||
if(DEFINED EIGEN3_WORLD_VERSION) # CMake module
|
||||
set(EIGEN_WORLD_VERSION ${EIGEN3_WORLD_VERSION})
|
||||
set(EIGEN_MAJOR_VERSION ${EIGEN3_MAJOR_VERSION})
|
||||
set(EIGEN_MINOR_VERSION ${EIGEN3_MINOR_VERSION})
|
||||
else() # Eigen config file
|
||||
set(EIGEN_WORLD_VERSION ${EIGEN3_VERSION_MAJOR})
|
||||
set(EIGEN_MAJOR_VERSION ${EIGEN3_VERSION_MINOR})
|
||||
set(EIGEN_MINOR_VERSION ${EIGEN3_VERSION_PATCH})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endif(WITH_EIGEN)
|
||||
|
||||
if(NOT HAVE_EIGEN)
|
||||
if(NOT EIGEN_INCLUDE_PATH OR NOT EXISTS "${EIGEN_INCLUDE_PATH}")
|
||||
set(__find_paths "")
|
||||
set(__find_path_extra_options "")
|
||||
if(NOT CMAKE_CROSSCOMPILING)
|
||||
list(APPEND __find_paths /opt)
|
||||
endif()
|
||||
if(DEFINED ENV{EIGEN_ROOT})
|
||||
set(__find_paths "$ENV{EIGEN_ROOT}/include")
|
||||
list(APPEND __find_path_extra_options NO_DEFAULT_PATH)
|
||||
else()
|
||||
set(__find_paths ENV ProgramFiles ENV ProgramW6432)
|
||||
endif()
|
||||
find_path(EIGEN_INCLUDE_PATH "Eigen/Core"
|
||||
PATHS ${__find_paths}
|
||||
PATH_SUFFIXES include/eigen3 include/eigen2 Eigen/include/eigen3 Eigen/include/eigen2
|
||||
DOC "The path to Eigen3/Eigen2 headers"
|
||||
${__find_path_extra_options}
|
||||
)
|
||||
endif()
|
||||
if(EIGEN_INCLUDE_PATH AND EXISTS "${EIGEN_INCLUDE_PATH}")
|
||||
ocv_parse_header("${EIGEN_INCLUDE_PATH}/Eigen/src/Core/util/Macros.h" EIGEN_VERSION_LINES EIGEN_WORLD_VERSION EIGEN_MAJOR_VERSION EIGEN_MINOR_VERSION)
|
||||
set(HAVE_EIGEN 1)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
if(HAVE_EIGEN)
|
||||
if(EIGEN_INCLUDE_PATH AND EXISTS "${EIGEN_INCLUDE_PATH}")
|
||||
ocv_include_directories(SYSTEM ${EIGEN_INCLUDE_PATH})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# --- Clp ---
|
||||
# Ubuntu: sudo apt-get install coinor-libclp-dev coinor-libcoinutils-dev
|
||||
|
@ -209,7 +209,21 @@
|
||||
hal_id = {inria-00350283},
|
||||
hal_version = {v1},
|
||||
}
|
||||
|
||||
@article{Collins14
|
||||
year = {2014},
|
||||
issn = {0920-5691},
|
||||
journal = {International Journal of Computer Vision},
|
||||
volume = {109},
|
||||
number = {3},
|
||||
doi = {10.1007/s11263-014-0725-5},
|
||||
title = {Infinitesimal Plane-Based Pose Estimation},
|
||||
url = {http://dx.doi.org/10.1007/s11263-014-0725-5},
|
||||
publisher = {Springer US},
|
||||
keywords = {Plane; Pose; SfM; PnP; Homography},
|
||||
author = {Collins, Toby and Bartoli, Adrien},
|
||||
pages = {252-286},
|
||||
language = {English}
|
||||
}
|
||||
@article{Daniilidis98,
|
||||
author = {Konstantinos Daniilidis},
|
||||
title = {Hand-Eye Calibration Using Dual Quaternions},
|
||||
|
@ -21,7 +21,6 @@ train_data, and next 250 samples as test_data. So let's prepare them first.
|
||||
@code{.py}
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
img = cv.imread('digits.png')
|
||||
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
|
||||
@ -89,7 +88,6 @@ alphabets directly.
|
||||
@code{.py}
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Load the data, converters convert the letter to a number
|
||||
data= np.loadtxt('letter-recognition.data', dtype= 'float32', delimiter = ',',
|
||||
|
@ -231,13 +231,25 @@ enum { LMEDS = 4, //!< least-median of squares algorithm
|
||||
RHO = 16 //!< RHO algorithm
|
||||
};
|
||||
|
||||
enum { SOLVEPNP_ITERATIVE = 0,
|
||||
SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
|
||||
SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete
|
||||
SOLVEPNP_DLS = 3, //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct
|
||||
SOLVEPNP_UPNP = 4, //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
|
||||
SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17
|
||||
SOLVEPNP_MAX_COUNT //!< Used for count
|
||||
enum SolvePnPMethod {
|
||||
SOLVEPNP_ITERATIVE = 0,
|
||||
SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
|
||||
SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete
|
||||
SOLVEPNP_DLS = 3, //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct
|
||||
SOLVEPNP_UPNP = 4, //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
|
||||
SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17
|
||||
SOLVEPNP_IPPE = 6, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
|
||||
//!< Object points must be coplanar.
|
||||
SOLVEPNP_IPPE_SQUARE = 7, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
|
||||
//!< This is a special case suitable for marker pose estimation.\n
|
||||
//!< 4 coplanar object points must be defined in the following order:
|
||||
//!< - point 0: [-squareLength / 2, squareLength / 2, 0]
|
||||
//!< - point 1: [ squareLength / 2, squareLength / 2, 0]
|
||||
//!< - point 2: [ squareLength / 2, -squareLength / 2, 0]
|
||||
//!< - point 3: [-squareLength / 2, -squareLength / 2, 0]
|
||||
#ifndef CV_DOXYGEN
|
||||
SOLVEPNP_MAX_COUNT //!< Used for count
|
||||
#endif
|
||||
};
|
||||
|
||||
enum { CALIB_CB_ADAPTIVE_THRESH = 1,
|
||||
@ -610,6 +622,17 @@ Check @ref tutorial_homography "the corresponding tutorial" for more details
|
||||
*/
|
||||
|
||||
/** @brief Finds an object pose from 3D-2D point correspondences.
|
||||
This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
|
||||
coordinate frame to the camera coordinate frame, using different methods:
|
||||
- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): need 4 input points to return a unique solution.
|
||||
- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
|
||||
- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
|
||||
Number of input points must be 4. Object points must be defined in the following order:
|
||||
- point 0: [-squareLength / 2, squareLength / 2, 0]
|
||||
- point 1: [ squareLength / 2, squareLength / 2, 0]
|
||||
- point 2: [ squareLength / 2, -squareLength / 2, 0]
|
||||
- point 3: [-squareLength / 2, -squareLength / 2, 0]
|
||||
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
|
||||
|
||||
@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
|
||||
1xN/Nx1 3-channel, where N is the number of points. vector\<Point3f\> can be also passed here.
|
||||
@ -620,14 +643,14 @@ where N is the number of points. vector\<Point2f\> can be also passed here.
|
||||
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
|
||||
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
|
||||
assumed.
|
||||
@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from
|
||||
@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
|
||||
the model coordinate system to the camera coordinate system.
|
||||
@param tvec Output translation vector.
|
||||
@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
|
||||
the provided rvec and tvec values as initial approximations of the rotation and translation
|
||||
vectors, respectively, and further optimizes them.
|
||||
@param flags Method for solving a PnP problem:
|
||||
- **SOLVEPNP_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In
|
||||
- **SOLVEPNP_ITERATIVE** Iterative method is based on a Levenberg-Marquardt optimization. In
|
||||
this case the function finds such a pose that minimizes reprojection error, that is the sum
|
||||
of squared distances between the observed projections imagePoints and the projected (using
|
||||
projectPoints ) objectPoints .
|
||||
@ -637,18 +660,24 @@ In this case the function requires exactly four object and image points.
|
||||
- **SOLVEPNP_AP3P** Method is based on the paper of T. Ke, S. Roumeliotis
|
||||
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
|
||||
In this case the function requires exactly four object and image points.
|
||||
- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
|
||||
- **SOLVEPNP_EPNP** Method has been introduced by F. Moreno-Noguer, V. Lepetit and P. Fua in the
|
||||
paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp).
|
||||
- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
|
||||
- **SOLVEPNP_DLS** Method is based on the paper of J. Hesch and S. Roumeliotis.
|
||||
"A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct).
|
||||
- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
|
||||
F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
|
||||
- **SOLVEPNP_UPNP** Method is based on the paper of A. Penate-Sanchez, J. Andrade-Cetto,
|
||||
F. Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
|
||||
Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
|
||||
assuming that both have the same value. Then the cameraMatrix is updated with the estimated
|
||||
focal length.
|
||||
- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis.
|
||||
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). In this case the
|
||||
function requires exactly four object and image points.
|
||||
- **SOLVEPNP_IPPE** Method is based on the paper of T. Collins and A. Bartoli.
|
||||
"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points.
|
||||
- **SOLVEPNP_IPPE_SQUARE** Method is based on the paper of Toby Collins and Adrien Bartoli.
|
||||
"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation.
|
||||
It requires 4 coplanar object points defined in the following order:
|
||||
- point 0: [-squareLength / 2, squareLength / 2, 0]
|
||||
- point 1: [ squareLength / 2, squareLength / 2, 0]
|
||||
- point 2: [ squareLength / 2, -squareLength / 2, 0]
|
||||
- point 3: [-squareLength / 2, -squareLength / 2, 0]
|
||||
|
||||
The function estimates the object pose given a set of object points, their corresponding image
|
||||
projections, as well as the camera matrix and the distortion coefficients, see the figure below
|
||||
@ -704,7 +733,7 @@ using the perspective projection model \f$ \Pi \f$ and the camera intrinsic para
|
||||
\end{align*}
|
||||
\f]
|
||||
|
||||
The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow to transform
|
||||
The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming
|
||||
a 3D point expressed in the world frame into the camera frame:
|
||||
|
||||
\f[
|
||||
@ -765,6 +794,13 @@ a 3D point expressed in the world frame into the camera frame:
|
||||
- With **SOLVEPNP_ITERATIVE** method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
|
||||
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
|
||||
global solution to converge.
|
||||
- With **SOLVEPNP_IPPE** input points must be >= 4 and object points must be coplanar.
|
||||
- With **SOLVEPNP_IPPE_SQUARE** this is a special case suitable for marker pose estimation.
|
||||
Number of input points must be 4. Object points must be defined in the following order:
|
||||
- point 0: [-squareLength / 2, squareLength / 2, 0]
|
||||
- point 1: [ squareLength / 2, squareLength / 2, 0]
|
||||
- point 2: [ squareLength / 2, -squareLength / 2, 0]
|
||||
- point 3: [-squareLength / 2, -squareLength / 2, 0]
|
||||
*/
|
||||
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
@ -782,10 +818,10 @@ where N is the number of points. vector\<Point2f\> can be also passed here.
|
||||
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
|
||||
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
|
||||
assumed.
|
||||
@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
|
||||
@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
|
||||
the model coordinate system to the camera coordinate system.
|
||||
@param tvec Output translation vector.
|
||||
@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
|
||||
@param useExtrinsicGuess Parameter used for @ref SOLVEPNP_ITERATIVE. If true (1), the function uses
|
||||
the provided rvec and tvec values as initial approximations of the rotation and translation
|
||||
vectors, respectively, and further optimizes them.
|
||||
@param iterationsCount Number of iterations.
|
||||
@ -794,12 +830,12 @@ is the maximum allowed distance between the observed and computed point projecti
|
||||
an inlier.
|
||||
@param confidence The probability that the algorithm produces a useful result.
|
||||
@param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
|
||||
@param flags Method for solving a PnP problem (see solvePnP ).
|
||||
@param flags Method for solving a PnP problem (see @ref solvePnP ).
|
||||
|
||||
The function estimates an object pose given a set of object points, their corresponding image
|
||||
projections, as well as the camera matrix and the distortion coefficients. This function finds such
|
||||
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
|
||||
projections imagePoints and the projected (using projectPoints ) objectPoints. The use of RANSAC
|
||||
projections imagePoints and the projected (using @ref projectPoints ) objectPoints. The use of RANSAC
|
||||
makes the function resistant to outliers.
|
||||
|
||||
@note
|
||||
@ -819,6 +855,7 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint
|
||||
bool useExtrinsicGuess = false, int iterationsCount = 100,
|
||||
float reprojectionError = 8.0, double confidence = 0.99,
|
||||
OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );
|
||||
|
||||
/** @brief Finds an object pose from 3 3D-2D point correspondences.
|
||||
|
||||
@param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
|
||||
@ -830,17 +867,20 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint
|
||||
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
|
||||
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
|
||||
assumed.
|
||||
@param rvecs Output rotation vectors (see Rodrigues ) that, together with tvecs , brings points from
|
||||
@param rvecs Output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
|
||||
the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
|
||||
@param tvecs Output translation vectors.
|
||||
@param flags Method for solving a P3P problem:
|
||||
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
|
||||
"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
|
||||
- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis.
|
||||
- **SOLVEPNP_AP3P** Method is based on the paper of T. Ke and S. Roumeliotis.
|
||||
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
|
||||
|
||||
The function estimates the object pose given 3 object points, their corresponding image
|
||||
projections, as well as the camera matrix and the distortion coefficients.
|
||||
|
||||
@note
|
||||
The solutions are sorted by reprojection errors (lowest to highest).
|
||||
*/
|
||||
CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
@ -859,7 +899,7 @@ where N is the number of points. vector\<Point2f\> can also be passed here.
|
||||
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
|
||||
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
|
||||
assumed.
|
||||
@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from
|
||||
@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
|
||||
the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
|
||||
@param tvec Input/Output translation vector. Input values are used as an initial solution.
|
||||
@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
|
||||
@ -887,12 +927,12 @@ where N is the number of points. vector\<Point2f\> can also be passed here.
|
||||
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
|
||||
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
|
||||
assumed.
|
||||
@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from
|
||||
@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
|
||||
the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
|
||||
@param tvec Input/Output translation vector. Input values are used as an initial solution.
|
||||
@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
|
||||
@param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \f$\alpha\f$
|
||||
gain in the Gauss-Newton formulation.
|
||||
gain in the Damped Gauss-Newton formulation.
|
||||
|
||||
The function refines the object pose given at least 3 object points, their corresponding image
|
||||
projections, an initial solution for the rotation and translation vector,
|
||||
@ -906,6 +946,202 @@ CV_EXPORTS_W void solvePnPRefineVVS( InputArray objectPoints, InputArray imagePo
|
||||
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON),
|
||||
double VVSlambda = 1);
|
||||
|
||||
/** @brief Finds an object pose from 3D-2D point correspondences.
|
||||
This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
|
||||
couple), depending on the number of input points and the chosen method:
|
||||
- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
|
||||
- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
|
||||
- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
|
||||
Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
|
||||
- point 0: [-squareLength / 2, squareLength / 2, 0]
|
||||
- point 1: [ squareLength / 2, squareLength / 2, 0]
|
||||
- point 2: [ squareLength / 2, -squareLength / 2, 0]
|
||||
- point 3: [-squareLength / 2, -squareLength / 2, 0]
|
||||
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
|
||||
Only 1 solution is returned.
|
||||
|
||||
@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
|
||||
1xN/Nx1 3-channel, where N is the number of points. vector\<Point3f\> can be also passed here.
|
||||
@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
|
||||
where N is the number of points. vector\<Point2f\> can be also passed here.
|
||||
@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
|
||||
@param distCoeffs Input vector of distortion coefficients
|
||||
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
|
||||
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
|
||||
assumed.
|
||||
@param rvecs Vector of output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
|
||||
the model coordinate system to the camera coordinate system.
|
||||
@param tvecs Vector of output translation vectors.
|
||||
@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
|
||||
the provided rvec and tvec values as initial approximations of the rotation and translation
|
||||
vectors, respectively, and further optimizes them.
|
||||
@param flags Method for solving a PnP problem:
|
||||
- **SOLVEPNP_ITERATIVE** Iterative method is based on a Levenberg-Marquardt optimization. In
|
||||
this case the function finds such a pose that minimizes reprojection error, that is the sum
|
||||
of squared distances between the observed projections imagePoints and the projected (using
|
||||
projectPoints ) objectPoints .
|
||||
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
|
||||
"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
|
||||
In this case the function requires exactly four object and image points.
|
||||
- **SOLVEPNP_AP3P** Method is based on the paper of T. Ke, S. Roumeliotis
|
||||
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
|
||||
In this case the function requires exactly four object and image points.
|
||||
- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
|
||||
paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp).
|
||||
- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
|
||||
"A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct).
|
||||
- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
|
||||
F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
|
||||
Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
|
||||
assuming that both have the same value. Then the cameraMatrix is updated with the estimated
|
||||
focal length.
|
||||
- **SOLVEPNP_IPPE** Method is based on the paper of T. Collins and A. Bartoli.
|
||||
"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points.
|
||||
- **SOLVEPNP_IPPE_SQUARE** Method is based on the paper of Toby Collins and Adrien Bartoli.
|
||||
"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation.
|
||||
It requires 4 coplanar object points defined in the following order:
|
||||
- point 0: [-squareLength / 2, squareLength / 2, 0]
|
||||
- point 1: [ squareLength / 2, squareLength / 2, 0]
|
||||
- point 2: [ squareLength / 2, -squareLength / 2, 0]
|
||||
- point 3: [-squareLength / 2, -squareLength / 2, 0]
|
||||
@param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
|
||||
and useExtrinsicGuess is set to true.
|
||||
@param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
|
||||
and useExtrinsicGuess is set to true.
|
||||
@param reprojectionError Optional vector of reprojection error, that is the RMS error
|
||||
(\f$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \f$) between the input image points
|
||||
and the 3D object points projected with the estimated pose.
|
||||
|
||||
The function estimates the object pose given a set of object points, their corresponding image
|
||||
projections, as well as the camera matrix and the distortion coefficients, see the figure below
|
||||
(more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
|
||||
and the Z-axis forward).
|
||||
|
||||
![](pnp.jpg)
|
||||
|
||||
Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$
|
||||
using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$:
|
||||
|
||||
\f[
|
||||
\begin{align*}
|
||||
\begin{bmatrix}
|
||||
u \\
|
||||
v \\
|
||||
1
|
||||
\end{bmatrix} &=
|
||||
\bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{M}_w
|
||||
\begin{bmatrix}
|
||||
X_{w} \\
|
||||
Y_{w} \\
|
||||
Z_{w} \\
|
||||
1
|
||||
\end{bmatrix} \\
|
||||
\begin{bmatrix}
|
||||
u \\
|
||||
v \\
|
||||
1
|
||||
\end{bmatrix} &=
|
||||
\begin{bmatrix}
|
||||
f_x & 0 & c_x \\
|
||||
0 & f_y & c_y \\
|
||||
0 & 0 & 1
|
||||
\end{bmatrix}
|
||||
\begin{bmatrix}
|
||||
1 & 0 & 0 & 0 \\
|
||||
0 & 1 & 0 & 0 \\
|
||||
0 & 0 & 1 & 0
|
||||
\end{bmatrix}
|
||||
\begin{bmatrix}
|
||||
r_{11} & r_{12} & r_{13} & t_x \\
|
||||
r_{21} & r_{22} & r_{23} & t_y \\
|
||||
r_{31} & r_{32} & r_{33} & t_z \\
|
||||
0 & 0 & 0 & 1
|
||||
\end{bmatrix}
|
||||
\begin{bmatrix}
|
||||
X_{w} \\
|
||||
Y_{w} \\
|
||||
Z_{w} \\
|
||||
1
|
||||
\end{bmatrix}
|
||||
\end{align*}
|
||||
\f]
|
||||
|
||||
The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming
|
||||
a 3D point expressed in the world frame into the camera frame:
|
||||
|
||||
\f[
|
||||
\begin{align*}
|
||||
\begin{bmatrix}
|
||||
X_c \\
|
||||
Y_c \\
|
||||
Z_c \\
|
||||
1
|
||||
\end{bmatrix} &=
|
||||
\hspace{0.2em} ^{c}\bf{M}_w
|
||||
\begin{bmatrix}
|
||||
X_{w} \\
|
||||
Y_{w} \\
|
||||
Z_{w} \\
|
||||
1
|
||||
\end{bmatrix} \\
|
||||
\begin{bmatrix}
|
||||
X_c \\
|
||||
Y_c \\
|
||||
Z_c \\
|
||||
1
|
||||
\end{bmatrix} &=
|
||||
\begin{bmatrix}
|
||||
r_{11} & r_{12} & r_{13} & t_x \\
|
||||
r_{21} & r_{22} & r_{23} & t_y \\
|
||||
r_{31} & r_{32} & r_{33} & t_z \\
|
||||
0 & 0 & 0 & 1
|
||||
\end{bmatrix}
|
||||
\begin{bmatrix}
|
||||
X_{w} \\
|
||||
Y_{w} \\
|
||||
Z_{w} \\
|
||||
1
|
||||
\end{bmatrix}
|
||||
\end{align*}
|
||||
\f]
|
||||
|
||||
@note
|
||||
- An example of how to use solvePnP for planar augmented reality can be found at
|
||||
opencv_source_code/samples/python/plane_ar.py
|
||||
- If you are using Python:
|
||||
- Numpy array slices won't work as input because solvePnP requires contiguous
|
||||
arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
|
||||
modules/calib3d/src/solvepnp.cpp version 2.4.9)
|
||||
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
|
||||
to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
|
||||
which requires 2-channel information.
|
||||
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
|
||||
it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
|
||||
np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
|
||||
- The methods **SOLVEPNP_DLS** and **SOLVEPNP_UPNP** cannot be used as the current implementations are
|
||||
unstable and sometimes give completely wrong results. If you pass one of these two
|
||||
flags, **SOLVEPNP_EPNP** method will be used instead.
|
||||
- The minimum number of points is 4 in the general case. In the case of **SOLVEPNP_P3P** and **SOLVEPNP_AP3P**
|
||||
methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
|
||||
of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
|
||||
- With **SOLVEPNP_ITERATIVE** method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
|
||||
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
|
||||
global solution to converge.
|
||||
- With **SOLVEPNP_IPPE** input points must be >= 4 and object points must be coplanar.
|
||||
- With **SOLVEPNP_IPPE_SQUARE** this is a special case suitable for marker pose estimation.
|
||||
Number of input points must be 4. Object points must be defined in the following order:
|
||||
- point 0: [-squareLength / 2, squareLength / 2, 0]
|
||||
- point 1: [ squareLength / 2, squareLength / 2, 0]
|
||||
- point 2: [ squareLength / 2, -squareLength / 2, 0]
|
||||
- point 3: [-squareLength / 2, -squareLength / 2, 0]
|
||||
*/
|
||||
CV_EXPORTS_W int solvePnPGeneric( InputArray objectPoints, InputArray imagePoints,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
|
||||
bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE,
|
||||
InputArray rvec = noArray(), InputArray tvec = noArray(),
|
||||
OutputArray reprojectionError = noArray() );
|
||||
|
||||
/** @brief Finds an initial camera matrix from 3D-2D point correspondences.
|
||||
|
||||
@param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
|
||||
@ -1041,7 +1277,7 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz
|
||||
@param distCoeffs Input vector of distortion coefficients
|
||||
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
|
||||
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.
|
||||
@param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from
|
||||
@param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
|
||||
the model coordinate system to the camera coordinate system.
|
||||
@param tvec Translation vector.
|
||||
@param length Length of the painted axes in the same unit than tvec (usually in meters).
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include "precomp.hpp"
|
||||
#include "ap3p.h"
|
||||
|
||||
#include <cmath>
|
||||
@ -154,10 +155,11 @@ ap3p::ap3p(double _fx, double _fy, double _cx, double _cy) {
|
||||
// worldPoints: The positions of the 3 feature points stored as column vectors
|
||||
// solutionsR: 4 possible solutions of rotation matrix of the world w.r.t the camera frame
|
||||
// solutionsT: 4 possible solutions of translation of the world origin w.r.t the camera frame
|
||||
int ap3p::computePoses(const double featureVectors[3][3],
|
||||
const double worldPoints[3][3],
|
||||
int ap3p::computePoses(const double featureVectors[3][4],
|
||||
const double worldPoints[3][4],
|
||||
double solutionsR[4][3][3],
|
||||
double solutionsT[4][3]) {
|
||||
double solutionsT[4][3],
|
||||
bool p4p) {
|
||||
|
||||
//world point vectors
|
||||
double w1[3] = {worldPoints[0][0], worldPoints[1][0], worldPoints[2][0]};
|
||||
@ -246,6 +248,13 @@ int ap3p::computePoses(const double featureVectors[3][3],
|
||||
double b3p[3];
|
||||
vect_scale((delta / k3b3), b3, b3p);
|
||||
|
||||
double X3 = worldPoints[0][3];
|
||||
double Y3 = worldPoints[1][3];
|
||||
double Z3 = worldPoints[2][3];
|
||||
double mu3 = featureVectors[0][3];
|
||||
double mv3 = featureVectors[1][3];
|
||||
double reproj_errors[4];
|
||||
|
||||
int nb_solutions = 0;
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
double ctheta1p = s[i];
|
||||
@ -290,9 +299,29 @@ int ap3p::computePoses(const double featureVectors[3][3],
|
||||
solutionsR[nb_solutions][1][2] = R[2][1];
|
||||
solutionsR[nb_solutions][2][2] = R[2][2];
|
||||
|
||||
if (p4p) {
|
||||
double X3p = solutionsR[nb_solutions][0][0] * X3 + solutionsR[nb_solutions][0][1] * Y3 + solutionsR[nb_solutions][0][2] * Z3 + solutionsT[nb_solutions][0];
|
||||
double Y3p = solutionsR[nb_solutions][1][0] * X3 + solutionsR[nb_solutions][1][1] * Y3 + solutionsR[nb_solutions][1][2] * Z3 + solutionsT[nb_solutions][1];
|
||||
double Z3p = solutionsR[nb_solutions][2][0] * X3 + solutionsR[nb_solutions][2][1] * Y3 + solutionsR[nb_solutions][2][2] * Z3 + solutionsT[nb_solutions][2];
|
||||
double mu3p = X3p / Z3p;
|
||||
double mv3p = Y3p / Z3p;
|
||||
reproj_errors[nb_solutions] = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
|
||||
}
|
||||
|
||||
nb_solutions++;
|
||||
}
|
||||
|
||||
//sort the solutions
|
||||
if (p4p) {
|
||||
for (int i = 1; i < nb_solutions; i++) {
|
||||
for (int j = i; j > 0 && reproj_errors[j-1] > reproj_errors[j]; j--) {
|
||||
std::swap(reproj_errors[j], reproj_errors[j-1]);
|
||||
std::swap(solutionsR[j], solutionsR[j-1]);
|
||||
std::swap(solutionsT[j], solutionsT[j-1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nb_solutions;
|
||||
}
|
||||
|
||||
@ -311,9 +340,10 @@ bool ap3p::solve(cv::Mat &R, cv::Mat &tvec, const cv::Mat &opoints, const cv::Ma
|
||||
else
|
||||
extract_points<cv::Point3d, cv::Point2f>(opoints, ipoints, points);
|
||||
|
||||
bool result = solve(rotation_matrix, translation, points[0], points[1], points[2], points[3], points[4], points[5],
|
||||
points[6], points[7], points[8], points[9], points[10], points[11], points[12], points[13],
|
||||
points[14],
|
||||
bool result = solve(rotation_matrix, translation,
|
||||
points[0], points[1], points[2], points[3], points[4],
|
||||
points[5], points[6], points[7], points[8], points[9],
|
||||
points[10], points[11], points[12], points[13],points[14],
|
||||
points[15], points[16], points[17], points[18], points[19]);
|
||||
cv::Mat(3, 1, CV_64F, translation).copyTo(tvec);
|
||||
cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R);
|
||||
@ -335,10 +365,13 @@ int ap3p::solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv:
|
||||
else
|
||||
extract_points<cv::Point3d, cv::Point2f>(opoints, ipoints, points);
|
||||
|
||||
const bool p4p = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)) == 4;
|
||||
int solutions = solve(rotation_matrix, translation,
|
||||
points[0], points[1], points[2], points[3], points[4],
|
||||
points[5], points[6], points[7], points[8], points[9],
|
||||
points[10], points[11], points[12], points[13], points[14]);
|
||||
points[10], points[11], points[12], points[13], points[14],
|
||||
points[15], points[16], points[17], points[18], points[19],
|
||||
p4p);
|
||||
|
||||
for (int i = 0; i < solutions; i++) {
|
||||
cv::Mat R, tvec;
|
||||
@ -353,42 +386,33 @@ int ap3p::solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv:
|
||||
}
|
||||
|
||||
bool
|
||||
ap3p::solve(double R[3][3], double t[3], double mu0, double mv0, double X0, double Y0, double Z0, double mu1,
|
||||
double mv1,
|
||||
double X1, double Y1, double Z1, double mu2, double mv2, double X2, double Y2, double Z2, double mu3,
|
||||
double mv3, double X3, double Y3, double Z3) {
|
||||
ap3p::solve(double R[3][3], double t[3],
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
double mu1, double mv1, double X1, double Y1, double Z1,
|
||||
double mu2, double mv2, double X2, double Y2, double Z2,
|
||||
double mu3, double mv3, double X3, double Y3, double Z3) {
|
||||
double Rs[4][3][3], ts[4][3];
|
||||
|
||||
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2);
|
||||
const bool p4p = true;
|
||||
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2, mu3, mv3, X3, Y3, Z3, p4p);
|
||||
if (n == 0)
|
||||
return false;
|
||||
|
||||
int ns = 0;
|
||||
double min_reproj = 0;
|
||||
for (int i = 0; i < n; i++) {
|
||||
double X3p = Rs[i][0][0] * X3 + Rs[i][0][1] * Y3 + Rs[i][0][2] * Z3 + ts[i][0];
|
||||
double Y3p = Rs[i][1][0] * X3 + Rs[i][1][1] * Y3 + Rs[i][1][2] * Z3 + ts[i][1];
|
||||
double Z3p = Rs[i][2][0] * X3 + Rs[i][2][1] * Y3 + Rs[i][2][2] * Z3 + ts[i][2];
|
||||
double mu3p = cx + fx * X3p / Z3p;
|
||||
double mv3p = cy + fy * Y3p / Z3p;
|
||||
double reproj = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
|
||||
if (i == 0 || min_reproj > reproj) {
|
||||
ns = i;
|
||||
min_reproj = reproj;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
for (int j = 0; j < 3; j++)
|
||||
R[i][j] = Rs[ns][i][j];
|
||||
t[i] = ts[ns][i];
|
||||
R[i][j] = Rs[0][i][j];
|
||||
t[i] = ts[0][i];
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ap3p::solve(double R[4][3][3], double t[4][3], double mu0, double mv0, double X0, double Y0, double Z0, double mu1,
|
||||
double mv1, double X1, double Y1, double Z1, double mu2, double mv2, double X2, double Y2, double Z2) {
|
||||
int ap3p::solve(double R[4][3][3], double t[4][3],
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
double mu1, double mv1, double X1, double Y1, double Z1,
|
||||
double mu2, double mv2, double X2, double Y2, double Z2,
|
||||
double mu3, double mv3, double X3, double Y3, double Z3,
|
||||
bool p4p) {
|
||||
double mk0, mk1, mk2;
|
||||
double norm;
|
||||
|
||||
@ -413,13 +437,17 @@ int ap3p::solve(double R[4][3][3], double t[4][3], double mu0, double mv0, doubl
|
||||
mu2 *= mk2;
|
||||
mv2 *= mk2;
|
||||
|
||||
double featureVectors[3][3] = {{mu0, mu1, mu2},
|
||||
{mv0, mv1, mv2},
|
||||
{mk0, mk1, mk2}};
|
||||
double worldPoints[3][3] = {{X0, X1, X2},
|
||||
{Y0, Y1, Y2},
|
||||
{Z0, Z1, Z2}};
|
||||
mu3 = inv_fx * mu3 - cx_fx;
|
||||
mv3 = inv_fy * mv3 - cy_fy;
|
||||
double mk3 = 1; //not used
|
||||
|
||||
return computePoses(featureVectors, worldPoints, R, t);
|
||||
double featureVectors[3][4] = {{mu0, mu1, mu2, mu3},
|
||||
{mv0, mv1, mv2, mv3},
|
||||
{mk0, mk1, mk2, mk3}};
|
||||
double worldPoints[3][4] = {{X0, X1, X2, X3},
|
||||
{Y0, Y1, Y2, Y3},
|
||||
{Z0, Z1, Z2, Z3}};
|
||||
|
||||
return computePoses(featureVectors, worldPoints, R, t, p4p);
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef P3P_P3P_H
|
||||
#define P3P_P3P_H
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include <opencv2/core.hpp>
|
||||
|
||||
namespace cv {
|
||||
class ap3p {
|
||||
@ -18,7 +18,7 @@ private:
|
||||
void extract_points(const cv::Mat &opoints, const cv::Mat &ipoints, std::vector<double> &points) {
|
||||
points.clear();
|
||||
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
|
||||
points.resize(5*npoints);
|
||||
points.resize(5*4); //resize vector to fit for p4p case
|
||||
for (int i = 0; i < npoints; i++) {
|
||||
points[i * 5] = ipoints.at<IpointType>(i).x * fx + cx;
|
||||
points[i * 5 + 1] = ipoints.at<IpointType>(i).y * fy + cy;
|
||||
@ -26,6 +26,12 @@ private:
|
||||
points[i * 5 + 3] = opoints.at<OpointType>(i).y;
|
||||
points[i * 5 + 4] = opoints.at<OpointType>(i).z;
|
||||
}
|
||||
//Fill vectors with unused values for p3p case
|
||||
for (int i = npoints; i < 4; i++) {
|
||||
for (int j = 0; j < 5; j++) {
|
||||
points[i * 5 + j] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void init_inverse_parameters();
|
||||
@ -45,7 +51,9 @@ public:
|
||||
int solve(double R[4][3][3], double t[4][3],
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
double mu1, double mv1, double X1, double Y1, double Z1,
|
||||
double mu2, double mv2, double X2, double Y2, double Z2);
|
||||
double mu2, double mv2, double X2, double Y2, double Z2,
|
||||
double mu3, double mv3, double X3, double Y3, double Z3,
|
||||
bool p4p);
|
||||
|
||||
bool solve(double R[3][3], double t[3],
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
@ -59,8 +67,8 @@ public:
|
||||
// worldPoints: Positions of the 3 feature points stored as column vectors
|
||||
// solutionsR: 4 possible solutions of rotation matrix of the world w.r.t the camera frame
|
||||
// solutionsT: 4 possible solutions of translation of the world origin w.r.t the camera frame
|
||||
int computePoses(const double featureVectors[3][3], const double worldPoints[3][3], double solutionsR[4][3][3],
|
||||
double solutionsT[4][3]);
|
||||
int computePoses(const double featureVectors[3][4], const double worldPoints[3][4], double solutionsR[4][3][3],
|
||||
double solutionsT[4][3], bool p4p);
|
||||
|
||||
};
|
||||
}
|
||||
|
1100
modules/calib3d/src/ippe.cpp
Normal file
1100
modules/calib3d/src/ippe.cpp
Normal file
File diff suppressed because it is too large
Load Diff
259
modules/calib3d/src/ippe.hpp
Normal file
259
modules/calib3d/src/ippe.hpp
Normal file
@ -0,0 +1,259 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
// This file is based on file issued with the following license:
|
||||
|
||||
/*============================================================================
|
||||
|
||||
Copyright 2017 Toby Collins
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef OPENCV_CALIB3D_IPPE_HPP
|
||||
#define OPENCV_CALIB3D_IPPE_HPP
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
|
||||
namespace cv {
|
||||
namespace IPPE {
|
||||
|
||||
class PoseSolver {
|
||||
public:
|
||||
/**
|
||||
* @brief PoseSolver constructor
|
||||
*/
|
||||
PoseSolver();
|
||||
|
||||
/**
|
||||
* @brief Finds the two possible poses of a planar object given a set of correspondences and their respective reprojection errors.
|
||||
* The poses are sorted with the first having the lowest reprojection error.
|
||||
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates.
|
||||
* 1xN/Nx1 3-channel (float or double) where N is the number of points
|
||||
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. Points are in normalized pixel coordinates.
|
||||
* @param rvec1 First rotation solution (3x1 rotation vector)
|
||||
* @param tvec1 First translation solution (3x1 vector)
|
||||
* @param reprojErr1 Reprojection error of first solution
|
||||
* @param rvec2 Second rotation solution (3x1 rotation vector)
|
||||
* @param tvec2 Second translation solution (3x1 vector)
|
||||
* @param reprojErr2 Reprojection error of second solution
|
||||
*/
|
||||
void solveGeneric(InputArray objectPoints, InputArray imagePoints, OutputArray rvec1, OutputArray tvec1,
|
||||
float& reprojErr1, OutputArray rvec2, OutputArray tvec2, float& reprojErr2);
|
||||
|
||||
/**
|
||||
* @brief Finds the two possible poses of a square planar object and their respective reprojection errors using IPPE.
|
||||
* The poses are sorted so that the first one is the one with the lowest reprojection error.
|
||||
*
|
||||
* @param objectPoints Array of 4 coplanar object points defined in the following object coordinates:
|
||||
* - point 0: [-squareLength / 2.0, squareLength / 2.0, 0]
|
||||
* - point 1: [squareLength / 2.0, squareLength / 2.0, 0]
|
||||
* - point 2: [squareLength / 2.0, -squareLength / 2.0, 0]
|
||||
* - point 3: [-squareLength / 2.0, -squareLength / 2.0, 0]
|
||||
* 1xN/Nx1 3-channel (float or double) where N is the number of points
|
||||
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. Points are in normalized pixel coordinates.
|
||||
* @param rvec1 First rotation solution (3x1 rotation vector)
|
||||
* @param tvec1 First translation solution (3x1 vector)
|
||||
* @param reprojErr1 Reprojection error of first solution
|
||||
* @param rvec2 Second rotation solution (3x1 rotation vector)
|
||||
* @param tvec2 Second translation solution (3x1 vector)
|
||||
* @param reprojErr2 Reprojection error of second solution
|
||||
*/
|
||||
void solveSquare(InputArray objectPoints, InputArray imagePoints, OutputArray rvec1, OutputArray tvec1,
|
||||
float& reprojErr1, OutputArray rvec2, OutputArray tvec2, float& reprojErr2);
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Finds the two possible poses of a planar object given a set of correspondences in normalized pixel coordinates.
|
||||
* These poses are **NOT** sorted on reprojection error. Note that the returned poses are object-to-camera transforms, and not camera-to-object transforms.
|
||||
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double).
|
||||
* @param normalizedImagePoints Array of corresponding image points in normalized pixel coordinates, 1xN/Nx1 2-channel (float or double).
|
||||
* @param Ma First pose solution (unsorted)
|
||||
* @param Mb Second pose solution (unsorted)
|
||||
*/
|
||||
void solveGeneric(InputArray objectPoints, InputArray normalizedImagePoints, OutputArray Ma, OutputArray Mb);
|
||||
|
||||
/**
|
||||
* @brief Finds the two possible poses of a planar object in its canonical position, given a set of correspondences in normalized pixel coordinates.
|
||||
* These poses are **NOT** sorted on reprojection error. Note that the returned poses are object-to-camera transforms, and not camera-to-object transforms.
|
||||
* @param canonicalObjPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (double) where N is the number of points
|
||||
* @param normalizedInputPoints Array of corresponding image points in normalized pixel coordinates, 1xN/Nx1 2-channel (double) where N is the number of points
|
||||
* @param H Homography mapping canonicalObjPoints to normalizedInputPoints.
|
||||
* @param Ma
|
||||
* @param Mb
|
||||
*/
|
||||
void solveCanonicalForm(InputArray canonicalObjPoints, InputArray normalizedInputPoints, const Matx33d& H,
|
||||
OutputArray Ma, OutputArray Mb);
|
||||
|
||||
/**
|
||||
* @brief Computes the translation solution for a given rotation solution
|
||||
* @param objectPoints Array of corresponding object points, 1xN/Nx1 3-channel where N is the number of points
|
||||
* @param normalizedImagePoints Array of corresponding image points (undistorted), 1xN/Nx1 2-channel where N is the number of points
|
||||
* @param R Rotation solution (3x1 rotation vector)
|
||||
* @param t Translation solution (3x1 rotation vector)
|
||||
*/
|
||||
void computeTranslation(InputArray objectPoints, InputArray normalizedImgPoints, InputArray R, OutputArray t);
|
||||
|
||||
/**
|
||||
* @brief Computes the two rotation solutions from the Jacobian of a homography matrix H at a point (ux,uy) on the object plane.
|
||||
* For highest accuracy the Jacobian should be computed at the centroid of the point correspondences (see the IPPE paper for the explanation of this).
|
||||
* For a point (ux,uy) on the object plane, suppose the homography H maps (ux,uy) to a point (p,q) in the image (in normalized pixel coordinates).
|
||||
* The Jacobian matrix [J00, J01; J10,J11] is the Jacobian of the mapping evaluated at (ux,uy).
|
||||
* @param j00 Homography jacobian coefficent at (ux,uy)
|
||||
* @param j01 Homography jacobian coefficent at (ux,uy)
|
||||
* @param j10 Homography jacobian coefficent at (ux,uy)
|
||||
* @param j11 Homography jacobian coefficent at (ux,uy)
|
||||
* @param p The x coordinate of point (ux,uy) mapped into the image (undistorted and normalized position)
|
||||
* @param q The y coordinate of point (ux,uy) mapped into the image (undistorted and normalized position)
|
||||
*/
|
||||
void computeRotations(double j00, double j01, double j10, double j11, double p, double q, OutputArray _R1, OutputArray _R2);
|
||||
|
||||
/**
|
||||
* @brief Closed-form solution for the homography mapping with four corner correspondences of a square (it maps source points to target points).
|
||||
* The source points are the four corners of a zero-centred squared defined by:
|
||||
* - point 0: [-squareLength / 2.0, squareLength / 2.0]
|
||||
* - point 1: [squareLength / 2.0, squareLength / 2.0]
|
||||
* - point 2: [squareLength / 2.0, -squareLength / 2.0]
|
||||
* - point 3: [-squareLength / 2.0, -squareLength / 2.0]
|
||||
*
|
||||
* @param targetPoints Array of four corresponding target points, 1x4/4x1 2-channel. Note that the points should be ordered to correspond with points 0, 1, 2 and 3.
|
||||
* @param halfLength The square's half length (i.e. squareLength/2.0)
|
||||
* @param H Homograhy mapping the source points to the target points, 3x3 single channel
|
||||
*/
|
||||
void homographyFromSquarePoints(InputArray targetPoints, double halfLength, OutputArray H);
|
||||
|
||||
/**
|
||||
* @brief Fast conversion from a rotation matrix to a rotation vector using Rodrigues' formula
|
||||
* @param R Input rotation matrix, 3x3 1-channel (double)
|
||||
* @param r Output rotation vector, 3x1/1x3 1-channel (double)
|
||||
*/
|
||||
void rot2vec(InputArray R, OutputArray r);
|
||||
|
||||
/**
|
||||
* @brief Takes a set of planar object points and transforms them to 'canonical' object coordinates This is when they have zero mean and are on the plane z=0
|
||||
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
|
||||
* @param canonicalObjectPoints Object points in canonical coordinates 1xN/Nx1 2-channel (double)
|
||||
* @param MobjectPoints2Canonical Transform matrix mapping _objectPoints to _canonicalObjectPoints: 4x4 1-channel (double)
|
||||
*/
|
||||
void makeCanonicalObjectPoints(InputArray objectPoints, OutputArray canonicalObjectPoints, OutputArray MobjectPoints2Canonical);
|
||||
|
||||
/**
|
||||
* @brief Evaluates the Root Mean Squared (RMS) reprojection error of a pose solution.
|
||||
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
|
||||
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. This can either be in pixel coordinates or normalized pixel coordinates.
|
||||
* @param M Pose matrix from 3D object to camera coordinates: 4x4 1-channel (double)
|
||||
* @param err RMS reprojection error
|
||||
*/
|
||||
void evalReprojError(InputArray objectPoints, InputArray imagePoints, InputArray M, float& err);
|
||||
|
||||
/**
|
||||
* @brief Sorts two pose solutions according to their RMS reprojection error (lowest first).
|
||||
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
|
||||
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. This can either be in pixel coordinates or normalized pixel coordinates.
|
||||
* @param Ma Pose matrix 1: 4x4 1-channel
|
||||
* @param Mb Pose matrix 2: 4x4 1-channel
|
||||
* @param M1 Member of (Ma,Mb} with lowest RMS reprojection error. Performs deep copy.
|
||||
* @param M2 Member of (Ma,Mb} with highest RMS reprojection error. Performs deep copy.
|
||||
* @param err1 RMS reprojection error of _M1
|
||||
* @param err2 RMS reprojection error of _M2
|
||||
*/
|
||||
void sortPosesByReprojError(InputArray objectPoints, InputArray imagePoints, InputArray Ma, InputArray Mb, OutputArray M1, OutputArray M2, float& err1, float& err2);
|
||||
|
||||
/**
|
||||
* @brief Finds the rotation _Ra that rotates a vector _a to the z axis (0,0,1)
|
||||
* @param a vector: 3x1 mat (double)
|
||||
* @param Ra Rotation: 3x3 mat (double)
|
||||
*/
|
||||
void rotateVec2ZAxis(const Matx31d& a, Matx33d& Ra);
|
||||
|
||||
/**
|
||||
* @brief Computes the rotation _R that rotates the object points to the plane z=0. This uses the cross-product method with the first three object points.
|
||||
* @param objectPoints Array of N>=3 coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
|
||||
* @param R Rotation Mat: 3x3 (double)
|
||||
* @return Success (true) or failure (false)
|
||||
*/
|
||||
bool computeObjextSpaceR3Pts(InputArray objectPoints, Matx33d& R);
|
||||
|
||||
/**
|
||||
* @brief computeObjextSpaceRSvD Computes the rotation _R that rotates the object points to the plane z=0. This uses the cross-product method with the first three object points.
|
||||
* @param objectPointsZeroMean Zero-meaned coplanar object points: 3xN matrix (double) where N>=3
|
||||
* @param R Rotation Mat: 3x3 (double)
|
||||
*/
|
||||
void computeObjextSpaceRSvD(InputArray objectPointsZeroMean, OutputArray R);
|
||||
|
||||
/**
|
||||
* @brief Generates the 4 object points of a square planar object
|
||||
* @param squareLength The square's length (which is also it's width) in object coordinate units (e.g. millimeters, meters, etc.)
|
||||
* @param objectPoints Set of 4 object points (1x4 3-channel double)
|
||||
*/
|
||||
void generateSquareObjectCorners3D(double squareLength, OutputArray objectPoints);
|
||||
|
||||
/**
|
||||
* @brief Generates the 4 object points of a square planar object, without including the z-component (which is z=0 for all points).
|
||||
* @param squareLength The square's length (which is also it's width) in object coordinate units (e.g. millimeters, meters, etc.)
|
||||
* @param objectPoints Set of 4 object points (1x4 2-channel double)
|
||||
*/
|
||||
void generateSquareObjectCorners2D(double squareLength, OutputArray objectPoints);
|
||||
|
||||
/**
|
||||
* @brief Computes the average depth of an object given its pose in camera coordinates
|
||||
* @param objectPoints: Object points defined in 3D object space
|
||||
* @param rvec: Rotation component of pose
|
||||
* @param tvec: Translation component of pose
|
||||
* @return: average depth of the object
|
||||
*/
|
||||
double meanSceneDepth(InputArray objectPoints, InputArray rvec, InputArray tvec);
|
||||
|
||||
//! a small constant used to test 'small' values close to zero.
|
||||
double IPPE_SMALL;
|
||||
};
|
||||
} //namespace IPPE
|
||||
|
||||
namespace HomographyHO {
|
||||
|
||||
/**
|
||||
* @brief Computes the best-fitting homography matrix from source to target points using Harker and O'Leary's method:
|
||||
* Harker, M., O'Leary, P., Computation of Homographies, Proceedings of the British Machine Vision Conference 2005, Oxford, England.
|
||||
* This is not the author's implementation.
|
||||
* @param srcPoints Array of source points: 1xN/Nx1 2-channel (float or double) where N is the number of points
|
||||
* @param targPoints Array of target points: 1xN/Nx1 2-channel (float or double)
|
||||
* @param H Homography from source to target: 3x3 1-channel (double)
|
||||
*/
|
||||
void homographyHO(InputArray srcPoints, InputArray targPoints, Matx33d& H);
|
||||
|
||||
/**
|
||||
* @brief Performs data normalization before homography estimation. For details see Hartley, R., Zisserman, A., Multiple View Geometry in Computer Vision,
|
||||
* Cambridge University Press, Cambridge, 2001
|
||||
* @param Data Array of source data points: 1xN/Nx1 2-channel (float or double) where N is the number of points
|
||||
* @param DataN Normalized data points: 1xN/Nx1 2-channel (float or double) where N is the number of points
|
||||
* @param T Homogeneous transform from source to normalized: 3x3 1-channel (double)
|
||||
* @param Ti Homogeneous transform from normalized to source: 3x3 1-channel (double)
|
||||
*/
|
||||
void normalizeDataIsotropic(InputArray Data, OutputArray DataN, OutputArray T, OutputArray Ti);
|
||||
|
||||
}
|
||||
} //namespace cv
|
||||
#endif
|
@ -49,9 +49,11 @@ bool p3p::solve(cv::Mat& R, cv::Mat& tvec, const cv::Mat& opoints, const cv::Mat
|
||||
else
|
||||
extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points);
|
||||
|
||||
bool result = solve(rotation_matrix, translation, points[0], points[1], points[2], points[3], points[4], points[5],
|
||||
points[6], points[7], points[8], points[9], points[10], points[11], points[12], points[13], points[14],
|
||||
points[15], points[16], points[17], points[18], points[19]);
|
||||
bool result = solve(rotation_matrix, translation,
|
||||
points[0], points[1], points[2], points[3], points[4],
|
||||
points[5], points[6], points[7], points[8], points[9],
|
||||
points[10], points[11], points[12], points[13], points[14],
|
||||
points[15], points[16], points[17], points[18], points[19]);
|
||||
cv::Mat(3, 1, CV_64F, translation).copyTo(tvec);
|
||||
cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R);
|
||||
return result;
|
||||
@ -75,10 +77,13 @@ int p3p::solve(std::vector<cv::Mat>& Rs, std::vector<cv::Mat>& tvecs, const cv::
|
||||
else
|
||||
extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points);
|
||||
|
||||
const bool p4p = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)) == 4;
|
||||
int solutions = solve(rotation_matrix, translation,
|
||||
points[0], points[1], points[2], points[3], points[4],
|
||||
points[5], points[6], points[7], points[8], points[9],
|
||||
points[10], points[11], points[12], points[13], points[14]);
|
||||
points[10], points[11], points[12], points[13], points[14],
|
||||
points[15], points[16], points[17], points[18], points[19],
|
||||
p4p);
|
||||
|
||||
for (int i = 0; i < solutions; i++) {
|
||||
cv::Mat R, tvec;
|
||||
@ -100,39 +105,27 @@ bool p3p::solve(double R[3][3], double t[3],
|
||||
{
|
||||
double Rs[4][3][3], ts[4][3];
|
||||
|
||||
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2);
|
||||
const bool p4p = true;
|
||||
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2, mu3, mv3, X3, Y3, Z3, p4p);
|
||||
|
||||
if (n == 0)
|
||||
return false;
|
||||
|
||||
int ns = 0;
|
||||
double min_reproj = 0;
|
||||
for(int i = 0; i < n; i++) {
|
||||
double X3p = Rs[i][0][0] * X3 + Rs[i][0][1] * Y3 + Rs[i][0][2] * Z3 + ts[i][0];
|
||||
double Y3p = Rs[i][1][0] * X3 + Rs[i][1][1] * Y3 + Rs[i][1][2] * Z3 + ts[i][1];
|
||||
double Z3p = Rs[i][2][0] * X3 + Rs[i][2][1] * Y3 + Rs[i][2][2] * Z3 + ts[i][2];
|
||||
double mu3p = cx + fx * X3p / Z3p;
|
||||
double mv3p = cy + fy * Y3p / Z3p;
|
||||
double reproj = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
|
||||
if (i == 0 || min_reproj > reproj) {
|
||||
ns = i;
|
||||
min_reproj = reproj;
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < 3; i++) {
|
||||
for(int j = 0; j < 3; j++)
|
||||
R[i][j] = Rs[ns][i][j];
|
||||
t[i] = ts[ns][i];
|
||||
R[i][j] = Rs[0][i][j];
|
||||
t[i] = ts[0][i];
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int p3p::solve(double R[4][3][3], double t[4][3],
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
double mu1, double mv1, double X1, double Y1, double Z1,
|
||||
double mu2, double mv2, double X2, double Y2, double Z2)
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
double mu1, double mv1, double X1, double Y1, double Z1,
|
||||
double mu2, double mv2, double X2, double Y2, double Z2,
|
||||
double mu3, double mv3, double X3, double Y3, double Z3,
|
||||
bool p4p)
|
||||
{
|
||||
double mk0, mk1, mk2;
|
||||
double norm;
|
||||
@ -152,6 +145,9 @@ int p3p::solve(double R[4][3][3], double t[4][3],
|
||||
norm = sqrt(mu2 * mu2 + mv2 * mv2 + 1);
|
||||
mk2 = 1. / norm; mu2 *= mk2; mv2 *= mk2;
|
||||
|
||||
mu3 = inv_fx * mu3 - cx_fx;
|
||||
mv3 = inv_fy * mv3 - cy_fy;
|
||||
|
||||
double distances[3];
|
||||
distances[0] = sqrt( (X1 - X2) * (X1 - X2) + (Y1 - Y2) * (Y1 - Y2) + (Z1 - Z2) * (Z1 - Z2) );
|
||||
distances[1] = sqrt( (X0 - X2) * (X0 - X2) + (Y0 - Y2) * (Y0 - Y2) + (Z0 - Z2) * (Z0 - Z2) );
|
||||
@ -167,6 +163,7 @@ int p3p::solve(double R[4][3][3], double t[4][3],
|
||||
int n = solve_for_lengths(lengths, distances, cosines);
|
||||
|
||||
int nb_solutions = 0;
|
||||
double reproj_errors[4];
|
||||
for(int i = 0; i < n; i++) {
|
||||
double M_orig[3][3];
|
||||
|
||||
@ -185,9 +182,29 @@ int p3p::solve(double R[4][3][3], double t[4][3],
|
||||
if (!align(M_orig, X0, Y0, Z0, X1, Y1, Z1, X2, Y2, Z2, R[nb_solutions], t[nb_solutions]))
|
||||
continue;
|
||||
|
||||
if (p4p) {
|
||||
double X3p = R[nb_solutions][0][0] * X3 + R[nb_solutions][0][1] * Y3 + R[nb_solutions][0][2] * Z3 + t[nb_solutions][0];
|
||||
double Y3p = R[nb_solutions][1][0] * X3 + R[nb_solutions][1][1] * Y3 + R[nb_solutions][1][2] * Z3 + t[nb_solutions][1];
|
||||
double Z3p = R[nb_solutions][2][0] * X3 + R[nb_solutions][2][1] * Y3 + R[nb_solutions][2][2] * Z3 + t[nb_solutions][2];
|
||||
double mu3p = X3p / Z3p;
|
||||
double mv3p = Y3p / Z3p;
|
||||
reproj_errors[nb_solutions] = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
|
||||
}
|
||||
|
||||
nb_solutions++;
|
||||
}
|
||||
|
||||
if (p4p) {
|
||||
//sort the solutions
|
||||
for (int i = 1; i < nb_solutions; i++) {
|
||||
for (int j = i; j > 0 && reproj_errors[j-1] > reproj_errors[j]; j--) {
|
||||
std::swap(reproj_errors[j], reproj_errors[j-1]);
|
||||
std::swap(R[j], R[j-1]);
|
||||
std::swap(t[j], t[j-1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nb_solutions;
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,9 @@ class p3p
|
||||
int solve(double R[4][3][3], double t[4][3],
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
double mu1, double mv1, double X1, double Y1, double Z1,
|
||||
double mu2, double mv2, double X2, double Y2, double Z2);
|
||||
double mu2, double mv2, double X2, double Y2, double Z2,
|
||||
double mu3, double mv3, double X3, double Y3, double Z3,
|
||||
bool p4p);
|
||||
bool solve(double R[3][3], double t[3],
|
||||
double mu0, double mv0, double X0, double Y0, double Z0,
|
||||
double mu1, double mv1, double X1, double Y1, double Z1,
|
||||
@ -36,7 +38,7 @@ class p3p
|
||||
{
|
||||
points.clear();
|
||||
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
|
||||
points.resize(5*npoints);
|
||||
points.resize(5*4); //resize vector to fit for p4p case
|
||||
for(int i = 0; i < npoints; i++)
|
||||
{
|
||||
points[i*5] = ipoints.at<IpointType>(i).x*fx + cx;
|
||||
@ -45,6 +47,12 @@ class p3p
|
||||
points[i*5+3] = opoints.at<OpointType>(i).y;
|
||||
points[i*5+4] = opoints.at<OpointType>(i).z;
|
||||
}
|
||||
//Fill vectors with unused values for p3p case
|
||||
for (int i = npoints; i < 4; i++) {
|
||||
for (int j = 0; j < 5; j++) {
|
||||
points[i * 5 + j] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
void init_inverse_parameters();
|
||||
int solve_for_lengths(double lengths[4][3], double distances[3], double cosines[3]);
|
||||
|
@ -46,12 +46,44 @@
|
||||
#include "epnp.h"
|
||||
#include "p3p.h"
|
||||
#include "ap3p.h"
|
||||
#include "ippe.hpp"
|
||||
#include "calib3d_c_api.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
|
||||
static bool isPlanarObjectPoints(InputArray _objectPoints, double threshold)
|
||||
{
|
||||
CV_CheckType(_objectPoints.type(), _objectPoints.type() == CV_32FC3 || _objectPoints.type() == CV_64FC3,
|
||||
"Type of _objectPoints must be CV_32FC3 or CV_64FC3");
|
||||
Mat objectPoints;
|
||||
if (_objectPoints.type() == CV_32FC3)
|
||||
{
|
||||
_objectPoints.getMat().convertTo(objectPoints, CV_64F);
|
||||
}
|
||||
else
|
||||
{
|
||||
objectPoints = _objectPoints.getMat();
|
||||
}
|
||||
|
||||
Scalar meanValues = mean(objectPoints);
|
||||
int nbPts = objectPoints.checkVector(3, CV_64F);
|
||||
Mat objectPointsCentred = objectPoints - meanValues;
|
||||
objectPointsCentred = objectPointsCentred.reshape(1, nbPts);
|
||||
|
||||
Mat w, u, vt;
|
||||
Mat MM = objectPointsCentred.t() * objectPointsCentred;
|
||||
SVDecomp(MM, w, u, vt);
|
||||
|
||||
return (w.at<double>(2) < w.at<double>(1) * threshold);
|
||||
}
|
||||
|
||||
static bool approxEqual(double a, double b, double eps)
|
||||
{
|
||||
return std::fabs(a-b) < eps;
|
||||
}
|
||||
#endif
|
||||
|
||||
void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
|
||||
InputArray rvec, InputArray tvec, float length, int thickness)
|
||||
{
|
||||
@ -80,120 +112,24 @@ void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray d
|
||||
line(image, imagePoints[0], imagePoints[3], Scalar(255, 0, 0), thickness);
|
||||
}
|
||||
|
||||
bool solvePnP( InputArray _opoints, InputArray _ipoints,
|
||||
InputArray _cameraMatrix, InputArray _distCoeffs,
|
||||
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
|
||||
bool solvePnP( InputArray opoints, InputArray ipoints,
|
||||
InputArray cameraMatrix, InputArray distCoeffs,
|
||||
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, int flags )
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
|
||||
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
|
||||
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
|
||||
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
|
||||
vector<Mat> rvecs, tvecs;
|
||||
int solutions = solvePnPGeneric(opoints, ipoints, cameraMatrix, distCoeffs, rvecs, tvecs, useExtrinsicGuess, (SolvePnPMethod)flags, rvec, tvec);
|
||||
|
||||
Mat rvec, tvec;
|
||||
if( flags != SOLVEPNP_ITERATIVE )
|
||||
useExtrinsicGuess = false;
|
||||
|
||||
if( useExtrinsicGuess )
|
||||
if (solutions > 0)
|
||||
{
|
||||
int rtype = _rvec.type(), ttype = _tvec.type();
|
||||
Size rsize = _rvec.size(), tsize = _tvec.size();
|
||||
CV_Assert( (rtype == CV_32F || rtype == CV_64F) &&
|
||||
(ttype == CV_32F || ttype == CV_64F) );
|
||||
CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
|
||||
(tsize == Size(1, 3) || tsize == Size(3, 1)) );
|
||||
int rdepth = rvec.empty() ? CV_64F : rvec.depth();
|
||||
int tdepth = tvec.empty() ? CV_64F : tvec.depth();
|
||||
rvecs[0].convertTo(rvec, rdepth);
|
||||
tvecs[0].convertTo(tvec, tdepth);
|
||||
}
|
||||
else
|
||||
{
|
||||
int mtype = CV_64F;
|
||||
// use CV_32F if all PnP inputs are CV_32F and outputs are empty
|
||||
if (_ipoints.depth() == _cameraMatrix.depth() && _ipoints.depth() == _opoints.depth() &&
|
||||
_rvec.empty() && _tvec.empty())
|
||||
mtype = _opoints.depth();
|
||||
|
||||
_rvec.create(3, 1, mtype);
|
||||
_tvec.create(3, 1, mtype);
|
||||
}
|
||||
rvec = _rvec.getMat();
|
||||
tvec = _tvec.getMat();
|
||||
|
||||
Mat cameraMatrix0 = _cameraMatrix.getMat();
|
||||
Mat distCoeffs0 = _distCoeffs.getMat();
|
||||
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
|
||||
Mat distCoeffs = Mat_<double>(distCoeffs0);
|
||||
bool result = false;
|
||||
|
||||
if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
|
||||
{
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
epnp PnP(cameraMatrix, opoints, undistortedPoints);
|
||||
|
||||
Mat R;
|
||||
PnP.compute_pose(R, tvec);
|
||||
Rodrigues(R, rvec);
|
||||
result = true;
|
||||
}
|
||||
else if (flags == SOLVEPNP_P3P)
|
||||
{
|
||||
CV_Assert( npoints == 4);
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
p3p P3Psolver(cameraMatrix);
|
||||
|
||||
Mat R;
|
||||
result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
|
||||
if (result)
|
||||
Rodrigues(R, rvec);
|
||||
}
|
||||
else if (flags == SOLVEPNP_AP3P)
|
||||
{
|
||||
CV_Assert( npoints == 4);
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
ap3p P3Psolver(cameraMatrix);
|
||||
|
||||
Mat R;
|
||||
result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
|
||||
if (result)
|
||||
Rodrigues(R, rvec);
|
||||
}
|
||||
else if (flags == SOLVEPNP_ITERATIVE)
|
||||
{
|
||||
CvMat c_objectPoints = cvMat(opoints), c_imagePoints = cvMat(ipoints);
|
||||
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
|
||||
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
|
||||
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
|
||||
(c_distCoeffs.rows && c_distCoeffs.cols) ? &c_distCoeffs : 0,
|
||||
&c_rvec, &c_tvec, useExtrinsicGuess );
|
||||
result = true;
|
||||
}
|
||||
/*else if (flags == SOLVEPNP_DLS)
|
||||
{
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
|
||||
dls PnP(opoints, undistortedPoints);
|
||||
|
||||
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
|
||||
bool result = PnP.compute_pose(R, tvec);
|
||||
if (result)
|
||||
Rodrigues(R, rvec);
|
||||
return result;
|
||||
}
|
||||
else if (flags == SOLVEPNP_UPNP)
|
||||
{
|
||||
upnp PnP(cameraMatrix, opoints, ipoints);
|
||||
|
||||
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
|
||||
PnP.compute_pose(R, tvec);
|
||||
Rodrigues(R, rvec);
|
||||
return true;
|
||||
}*/
|
||||
else
|
||||
CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
|
||||
return result;
|
||||
return solutions > 0;
|
||||
}
|
||||
|
||||
class PnPRansacCallback CV_FINAL : public PointSetRegistrator::Callback
|
||||
@ -258,10 +194,10 @@ public:
|
||||
};
|
||||
|
||||
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
|
||||
InputArray _cameraMatrix, InputArray _distCoeffs,
|
||||
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
|
||||
int iterationsCount, float reprojectionError, double confidence,
|
||||
OutputArray _inliers, int flags)
|
||||
InputArray _cameraMatrix, InputArray _distCoeffs,
|
||||
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
|
||||
int iterationsCount, float reprojectionError, double confidence,
|
||||
OutputArray _inliers, int flags)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
@ -410,7 +346,8 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
|
||||
|
||||
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
|
||||
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
|
||||
CV_Assert( npoints == 3 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
|
||||
CV_Assert( npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
|
||||
CV_Assert( npoints == 3 || npoints == 4 );
|
||||
CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );
|
||||
|
||||
Mat cameraMatrix0 = _cameraMatrix.getMat();
|
||||
@ -420,7 +357,7 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
|
||||
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
std::vector<Mat> Rs, ts;
|
||||
std::vector<Mat> Rs, ts, rvecs;
|
||||
|
||||
int solutions = 0;
|
||||
if (flags == SOLVEPNP_P3P)
|
||||
@ -438,19 +375,91 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (_rvecs.needed()) {
|
||||
_rvecs.create(solutions, 1, CV_64F);
|
||||
Mat objPts, imgPts;
|
||||
opoints.convertTo(objPts, CV_64F);
|
||||
ipoints.convertTo(imgPts, CV_64F);
|
||||
if (imgPts.cols > 1)
|
||||
{
|
||||
imgPts = imgPts.reshape(1);
|
||||
imgPts = imgPts.t();
|
||||
}
|
||||
else
|
||||
imgPts = imgPts.reshape(1, 2*imgPts.rows);
|
||||
|
||||
if (_tvecs.needed()) {
|
||||
_tvecs.create(solutions, 1, CV_64F);
|
||||
}
|
||||
|
||||
for (int i = 0; i < solutions; i++) {
|
||||
vector<double> reproj_errors(solutions);
|
||||
for (size_t i = 0; i < reproj_errors.size(); i++)
|
||||
{
|
||||
Mat rvec;
|
||||
Rodrigues(Rs[i], rvec);
|
||||
_tvecs.getMatRef(i) = ts[i];
|
||||
_rvecs.getMatRef(i) = rvec;
|
||||
rvecs.push_back(rvec);
|
||||
|
||||
Mat projPts;
|
||||
projectPoints(objPts, rvec, ts[i], _cameraMatrix, _distCoeffs, projPts);
|
||||
|
||||
projPts = projPts.reshape(1, 2*projPts.rows);
|
||||
Mat err = imgPts - projPts;
|
||||
|
||||
err = err.t() * err;
|
||||
reproj_errors[i] = err.at<double>(0,0);
|
||||
}
|
||||
|
||||
//sort the solutions
|
||||
for (int i = 1; i < solutions; i++)
|
||||
{
|
||||
for (int j = i; j > 0 && reproj_errors[j-1] > reproj_errors[j]; j--)
|
||||
{
|
||||
std::swap(reproj_errors[j], reproj_errors[j-1]);
|
||||
std::swap(rvecs[j], rvecs[j-1]);
|
||||
std::swap(ts[j], ts[j-1]);
|
||||
}
|
||||
}
|
||||
|
||||
int depthRot = _rvecs.fixedType() ? _rvecs.depth() : CV_64F;
|
||||
int depthTrans = _tvecs.fixedType() ? _tvecs.depth() : CV_64F;
|
||||
_rvecs.create(solutions, 1, CV_MAKETYPE(depthRot, _rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
|
||||
_tvecs.create(solutions, 1, CV_MAKETYPE(depthTrans, _tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
|
||||
|
||||
for (int i = 0; i < solutions; i++)
|
||||
{
|
||||
Mat rvec0, tvec0;
|
||||
if (depthRot == CV_64F)
|
||||
rvec0 = rvecs[i];
|
||||
else
|
||||
rvecs[i].convertTo(rvec0, depthRot);
|
||||
|
||||
if (depthTrans == CV_64F)
|
||||
tvec0 = ts[i];
|
||||
else
|
||||
ts[i].convertTo(tvec0, depthTrans);
|
||||
|
||||
if (_rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR)
|
||||
{
|
||||
Mat rref = _rvecs.getMat_();
|
||||
|
||||
if (_rvecs.depth() == CV_32F)
|
||||
rref.at<Vec3f>(0,i) = Vec3f(rvec0.at<float>(0,0), rvec0.at<float>(1,0), rvec0.at<float>(2,0));
|
||||
else
|
||||
rref.at<Vec3d>(0,i) = Vec3d(rvec0.at<double>(0,0), rvec0.at<double>(1,0), rvec0.at<double>(2,0));
|
||||
}
|
||||
else
|
||||
{
|
||||
_rvecs.getMatRef(i) = rvec0;
|
||||
}
|
||||
|
||||
if (_tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR)
|
||||
{
|
||||
|
||||
Mat tref = _tvecs.getMat_();
|
||||
|
||||
if (_tvecs.depth() == CV_32F)
|
||||
tref.at<Vec3f>(0,i) = Vec3f(tvec0.at<float>(0,0), tvec0.at<float>(1,0), tvec0.at<float>(2,0));
|
||||
else
|
||||
tref.at<Vec3d>(0,i) = Vec3d(tvec0.at<double>(0,0), tvec0.at<double>(1,0), tvec0.at<double>(2,0));
|
||||
}
|
||||
else
|
||||
{
|
||||
_tvecs.getMatRef(i) = tvec0;
|
||||
}
|
||||
}
|
||||
|
||||
return solutions;
|
||||
@ -723,4 +732,314 @@ void solvePnPRefineVVS(InputArray _objectPoints, InputArray _imagePoints,
|
||||
solvePnPRefine(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, SOLVEPNP_REFINE_VVS, _criteria, _VVSlambda);
|
||||
}
|
||||
|
||||
int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
|
||||
InputArray _cameraMatrix, InputArray _distCoeffs,
|
||||
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
|
||||
bool useExtrinsicGuess, SolvePnPMethod flags,
|
||||
InputArray _rvec, InputArray _tvec,
|
||||
OutputArray reprojectionError) {
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
|
||||
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
|
||||
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
|
||||
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
|
||||
|
||||
if( flags != SOLVEPNP_ITERATIVE )
|
||||
useExtrinsicGuess = false;
|
||||
|
||||
if (useExtrinsicGuess)
|
||||
CV_Assert( !_rvec.empty() && !_tvec.empty() );
|
||||
|
||||
if( useExtrinsicGuess )
|
||||
{
|
||||
int rtype = _rvec.type(), ttype = _tvec.type();
|
||||
Size rsize = _rvec.size(), tsize = _tvec.size();
|
||||
CV_Assert( (rtype == CV_32FC1 || rtype == CV_64FC1) &&
|
||||
(ttype == CV_32FC1 || ttype == CV_64FC1) );
|
||||
CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
|
||||
(tsize == Size(1, 3) || tsize == Size(3, 1)) );
|
||||
}
|
||||
|
||||
Mat cameraMatrix0 = _cameraMatrix.getMat();
|
||||
Mat distCoeffs0 = _distCoeffs.getMat();
|
||||
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
|
||||
Mat distCoeffs = Mat_<double>(distCoeffs0);
|
||||
|
||||
vector<Mat> vec_rvecs, vec_tvecs;
|
||||
if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
|
||||
{
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
epnp PnP(cameraMatrix, opoints, undistortedPoints);
|
||||
|
||||
Mat rvec, tvec, R;
|
||||
PnP.compute_pose(R, tvec);
|
||||
Rodrigues(R, rvec);
|
||||
|
||||
vec_rvecs.push_back(rvec);
|
||||
vec_tvecs.push_back(tvec);
|
||||
}
|
||||
else if (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P)
|
||||
{
|
||||
vector<Mat> rvecs, tvecs;
|
||||
solveP3P(_opoints, _ipoints, _cameraMatrix, _distCoeffs, rvecs, tvecs, flags);
|
||||
vec_rvecs.insert(vec_rvecs.end(), rvecs.begin(), rvecs.end());
|
||||
vec_tvecs.insert(vec_tvecs.end(), tvecs.begin(), tvecs.end());
|
||||
}
|
||||
else if (flags == SOLVEPNP_ITERATIVE)
|
||||
{
|
||||
Mat rvec, tvec;
|
||||
if (useExtrinsicGuess)
|
||||
{
|
||||
rvec = _rvec.getMat();
|
||||
tvec = _tvec.getMat();
|
||||
}
|
||||
else
|
||||
{
|
||||
rvec.create(3, 1, CV_64FC1);
|
||||
tvec.create(3, 1, CV_64FC1);
|
||||
}
|
||||
|
||||
CvMat c_objectPoints = cvMat(opoints), c_imagePoints = cvMat(ipoints);
|
||||
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
|
||||
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
|
||||
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
|
||||
(c_distCoeffs.rows && c_distCoeffs.cols) ? &c_distCoeffs : 0,
|
||||
&c_rvec, &c_tvec, useExtrinsicGuess );
|
||||
|
||||
vec_rvecs.push_back(rvec);
|
||||
vec_tvecs.push_back(tvec);
|
||||
}
|
||||
else if (flags == SOLVEPNP_IPPE)
|
||||
{
|
||||
CV_DbgAssert(isPlanarObjectPoints(opoints, 1e-3));
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
|
||||
IPPE::PoseSolver poseSolver;
|
||||
Mat rvec1, tvec1, rvec2, tvec2;
|
||||
float reprojErr1, reprojErr2;
|
||||
try
|
||||
{
|
||||
poseSolver.solveGeneric(opoints, undistortedPoints, rvec1, tvec1, reprojErr1, rvec2, tvec2, reprojErr2);
|
||||
|
||||
if (reprojErr1 < reprojErr2)
|
||||
{
|
||||
vec_rvecs.push_back(rvec1);
|
||||
vec_tvecs.push_back(tvec1);
|
||||
|
||||
vec_rvecs.push_back(rvec2);
|
||||
vec_tvecs.push_back(tvec2);
|
||||
}
|
||||
else
|
||||
{
|
||||
vec_rvecs.push_back(rvec2);
|
||||
vec_tvecs.push_back(tvec2);
|
||||
|
||||
vec_rvecs.push_back(rvec1);
|
||||
vec_tvecs.push_back(tvec1);
|
||||
}
|
||||
}
|
||||
catch (...) { }
|
||||
}
|
||||
else if (flags == SOLVEPNP_IPPE_SQUARE)
|
||||
{
|
||||
CV_Assert(npoints == 4);
|
||||
|
||||
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
|
||||
double Xs[4][3];
|
||||
if (opoints.depth() == CV_32F)
|
||||
{
|
||||
for (int i = 0; i < 4; i++)
|
||||
{
|
||||
for (int j = 0; j < 3; j++)
|
||||
{
|
||||
Xs[i][j] = opoints.ptr<Vec3f>(0)[i](j);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i = 0; i < 4; i++)
|
||||
{
|
||||
for (int j = 0; j < 3; j++)
|
||||
{
|
||||
Xs[i][j] = opoints.ptr<Vec3d>(0)[i](j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const double equalThreshold = 1e-9;
|
||||
//Z must be zero
|
||||
for (int i = 0; i < 4; i++)
|
||||
{
|
||||
CV_DbgCheck(Xs[i][2], approxEqual(Xs[i][2], 0, equalThreshold), "Z object point coordinate must be zero!");
|
||||
}
|
||||
//Y0 == Y1 && Y2 == Y3
|
||||
CV_DbgCheck(Xs[0][1], approxEqual(Xs[0][1], Xs[1][1], equalThreshold), "Object points must be: Y0 == Y1!");
|
||||
CV_DbgCheck(Xs[2][1], approxEqual(Xs[2][1], Xs[3][1], equalThreshold), "Object points must be: Y2 == Y3!");
|
||||
//X0 == X3 && X1 == X2
|
||||
CV_DbgCheck(Xs[0][0], approxEqual(Xs[0][0], Xs[3][0], equalThreshold), "Object points must be: X0 == X3!");
|
||||
CV_DbgCheck(Xs[1][0], approxEqual(Xs[1][0], Xs[2][0], equalThreshold), "Object points must be: X1 == X2!");
|
||||
//X1 == Y1 && X3 == Y3
|
||||
CV_DbgCheck(Xs[1][0], approxEqual(Xs[1][0], Xs[1][1], equalThreshold), "Object points must be: X1 == Y1!");
|
||||
CV_DbgCheck(Xs[3][0], approxEqual(Xs[3][0], Xs[3][1], equalThreshold), "Object points must be: X3 == Y3!");
|
||||
#endif
|
||||
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
|
||||
IPPE::PoseSolver poseSolver;
|
||||
Mat rvec1, tvec1, rvec2, tvec2;
|
||||
float reprojErr1, reprojErr2;
|
||||
try
|
||||
{
|
||||
poseSolver.solveSquare(opoints, undistortedPoints, rvec1, tvec1, reprojErr1, rvec2, tvec2, reprojErr2);
|
||||
|
||||
if (reprojErr1 < reprojErr2)
|
||||
{
|
||||
vec_rvecs.push_back(rvec1);
|
||||
vec_tvecs.push_back(tvec1);
|
||||
|
||||
vec_rvecs.push_back(rvec2);
|
||||
vec_tvecs.push_back(tvec2);
|
||||
}
|
||||
else
|
||||
{
|
||||
vec_rvecs.push_back(rvec2);
|
||||
vec_tvecs.push_back(tvec2);
|
||||
|
||||
vec_rvecs.push_back(rvec1);
|
||||
vec_tvecs.push_back(tvec1);
|
||||
}
|
||||
} catch (...) { }
|
||||
}
|
||||
/*else if (flags == SOLVEPNP_DLS)
|
||||
{
|
||||
Mat undistortedPoints;
|
||||
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
|
||||
|
||||
dls PnP(opoints, undistortedPoints);
|
||||
|
||||
Mat rvec, tvec, R;
|
||||
bool result = PnP.compute_pose(R, tvec);
|
||||
if (result)
|
||||
{
|
||||
Rodrigues(R, rvec);
|
||||
vec_rvecs.push_back(rvec);
|
||||
vec_tvecs.push_back(tvec);
|
||||
}
|
||||
}
|
||||
else if (flags == SOLVEPNP_UPNP)
|
||||
{
|
||||
upnp PnP(cameraMatrix, opoints, ipoints);
|
||||
|
||||
Mat rvec, tvec, R;
|
||||
PnP.compute_pose(R, tvec);
|
||||
Rodrigues(R, rvec);
|
||||
vec_rvecs.push_back(rvec);
|
||||
vec_tvecs.push_back(tvec);
|
||||
}*/
|
||||
else
|
||||
CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
|
||||
|
||||
CV_Assert(vec_rvecs.size() == vec_tvecs.size());
|
||||
|
||||
int solutions = static_cast<int>(vec_rvecs.size());
|
||||
|
||||
int depthRot = _rvecs.fixedType() ? _rvecs.depth() : CV_64F;
|
||||
int depthTrans = _tvecs.fixedType() ? _tvecs.depth() : CV_64F;
|
||||
_rvecs.create(solutions, 1, CV_MAKETYPE(depthRot, _rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
|
||||
_tvecs.create(solutions, 1, CV_MAKETYPE(depthTrans, _tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
|
||||
|
||||
for (int i = 0; i < solutions; i++)
|
||||
{
|
||||
Mat rvec0, tvec0;
|
||||
if (depthRot == CV_64F)
|
||||
rvec0 = vec_rvecs[i];
|
||||
else
|
||||
vec_rvecs[i].convertTo(rvec0, depthRot);
|
||||
|
||||
if (depthTrans == CV_64F)
|
||||
tvec0 = vec_tvecs[i];
|
||||
else
|
||||
vec_tvecs[i].convertTo(tvec0, depthTrans);
|
||||
|
||||
if (_rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR)
|
||||
{
|
||||
Mat rref = _rvecs.getMat_();
|
||||
|
||||
if (_rvecs.depth() == CV_32F)
|
||||
rref.at<Vec3f>(0,i) = Vec3f(rvec0.at<float>(0,0), rvec0.at<float>(1,0), rvec0.at<float>(2,0));
|
||||
else
|
||||
rref.at<Vec3d>(0,i) = Vec3d(rvec0.at<double>(0,0), rvec0.at<double>(1,0), rvec0.at<double>(2,0));
|
||||
}
|
||||
else
|
||||
{
|
||||
_rvecs.getMatRef(i) = rvec0;
|
||||
}
|
||||
|
||||
if (_tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR)
|
||||
{
|
||||
|
||||
Mat tref = _tvecs.getMat_();
|
||||
|
||||
if (_tvecs.depth() == CV_32F)
|
||||
tref.at<Vec3f>(0,i) = Vec3f(tvec0.at<float>(0,0), tvec0.at<float>(1,0), tvec0.at<float>(2,0));
|
||||
else
|
||||
tref.at<Vec3d>(0,i) = Vec3d(tvec0.at<double>(0,0), tvec0.at<double>(1,0), tvec0.at<double>(2,0));
|
||||
}
|
||||
else
|
||||
{
|
||||
_tvecs.getMatRef(i) = tvec0;
|
||||
}
|
||||
}
|
||||
|
||||
if (reprojectionError.needed())
|
||||
{
|
||||
int type = reprojectionError.type();
|
||||
reprojectionError.create(solutions, 1, type);
|
||||
CV_CheckType(reprojectionError.type(), type == CV_32FC1 || type == CV_64FC1,
|
||||
"Type of reprojectionError must be CV_32FC1 or CV_64FC1!");
|
||||
|
||||
Mat objectPoints, imagePoints;
|
||||
if (_opoints.depth() == CV_32F)
|
||||
{
|
||||
_opoints.getMat().convertTo(objectPoints, CV_64F);
|
||||
}
|
||||
else
|
||||
{
|
||||
objectPoints = _opoints.getMat();
|
||||
}
|
||||
if (_ipoints.depth() == CV_32F)
|
||||
{
|
||||
_ipoints.getMat().convertTo(imagePoints, CV_64F);
|
||||
}
|
||||
else
|
||||
{
|
||||
imagePoints = _ipoints.getMat();
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < vec_rvecs.size(); i++)
|
||||
{
|
||||
vector<Point2d> projectedPoints;
|
||||
projectPoints(objectPoints, vec_rvecs[i], vec_tvecs[i], cameraMatrix, distCoeffs, projectedPoints);
|
||||
double rmse = norm(projectedPoints, imagePoints, NORM_L2) / sqrt(2*projectedPoints.size());
|
||||
|
||||
Mat err = reprojectionError.getMat();
|
||||
if (type == CV_32F)
|
||||
{
|
||||
err.at<float>(0,static_cast<int>(i)) = static_cast<float>(rmse);
|
||||
}
|
||||
else
|
||||
{
|
||||
err.at<double>(0,static_cast<int>(i)) = rmse;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return solutions;
|
||||
}
|
||||
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -188,7 +188,7 @@ enum NormTypes {
|
||||
norm = \forkthree
|
||||
{ \| \texttt{src1} \| _{L_2} ^{2} = \sum_I \texttt{src1}(I)^2} {if \(\texttt{normType} = \texttt{NORM_L2SQR}\)}
|
||||
{ \| \texttt{src1} - \texttt{src2} \| _{L_2} ^{2} = \sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2 }{if \(\texttt{normType} = \texttt{NORM_L2SQR}\) }
|
||||
{ \left(\frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}}\right)^2 }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2}\) }
|
||||
{ \left(\frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}}\right)^2 }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2SQR}\) }
|
||||
\f]
|
||||
*/
|
||||
NORM_L2SQR = 5,
|
||||
|
@ -34,11 +34,11 @@ public final class CvType {
|
||||
|
||||
public static final int makeType(int depth, int channels) {
|
||||
if (channels <= 0 || channels >= CV_CN_MAX) {
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Channels count should be 1.." + (CV_CN_MAX - 1));
|
||||
}
|
||||
if (depth < 0 || depth >= CV_DEPTH_MAX) {
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Data type depth should be 0.." + (CV_DEPTH_MAX - 1));
|
||||
}
|
||||
return (depth & (CV_DEPTH_MAX - 1)) + ((channels - 1) << CV_CN_SHIFT);
|
||||
@ -103,7 +103,7 @@ public final class CvType {
|
||||
case CV_64F:
|
||||
return 8 * channels(type);
|
||||
default:
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Unsupported CvType value: " + type);
|
||||
}
|
||||
}
|
||||
@ -136,7 +136,7 @@ public final class CvType {
|
||||
s = "CV_16F";
|
||||
break;
|
||||
default:
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Unsupported CvType value: " + type);
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ public class Mat {
|
||||
public Mat(long addr)
|
||||
{
|
||||
if (addr == 0)
|
||||
throw new java.lang.UnsupportedOperationException("Native object address is NULL");
|
||||
throw new UnsupportedOperationException("Native object address is NULL");
|
||||
nativeObj = addr;
|
||||
}
|
||||
|
||||
@ -1074,7 +1074,7 @@ public class Mat {
|
||||
public int put(int row, int col, double... data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1086,7 +1086,7 @@ public class Mat {
|
||||
public int put(int[] idx, double... data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1100,7 +1100,7 @@ public class Mat {
|
||||
public int put(int row, int col, float[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1108,14 +1108,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32F) {
|
||||
return nPutF(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(idx,data)
|
||||
public int put(int[] idx, float[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1125,14 +1125,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32F) {
|
||||
return nPutFIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(row,col,data)
|
||||
public int put(int row, int col, int[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1140,14 +1140,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32S) {
|
||||
return nPutI(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(idx,data)
|
||||
public int put(int[] idx, int[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1157,14 +1157,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32S) {
|
||||
return nPutIIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(row,col,data)
|
||||
public int put(int row, int col, short[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1172,14 +1172,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) {
|
||||
return nPutS(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(idx,data)
|
||||
public int put(int[] idx, short[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1189,14 +1189,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) {
|
||||
return nPutSIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(row,col,data)
|
||||
public int put(int row, int col, byte[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1204,14 +1204,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) {
|
||||
return nPutB(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(idx,data)
|
||||
public int put(int[] idx, byte[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1221,14 +1221,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) {
|
||||
return nPutBIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(row,col,data,offset,length)
|
||||
public int put(int row, int col, byte[] data, int offset, int length) {
|
||||
int t = type();
|
||||
if (data == null || length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1236,14 +1236,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) {
|
||||
return nPutBwOffset(nativeObj, row, col, length, offset, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::put(idx,data,offset,length)
|
||||
public int put(int[] idx, byte[] data, int offset, int length) {
|
||||
int t = type();
|
||||
if (data == null || length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1253,14 +1253,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) {
|
||||
return nPutBwIdxOffset(nativeObj, idx, length, offset, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(row,col,data)
|
||||
public int get(int row, int col, byte[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1268,14 +1268,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) {
|
||||
return nGetB(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(idx,data)
|
||||
public int get(int[] idx, byte[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1285,14 +1285,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) {
|
||||
return nGetBIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(row,col,data)
|
||||
public int get(int row, int col, short[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1300,14 +1300,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) {
|
||||
return nGetS(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(idx,data)
|
||||
public int get(int[] idx, short[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1317,14 +1317,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) {
|
||||
return nGetSIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(row,col,data)
|
||||
public int get(int row, int col, int[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1332,14 +1332,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32S) {
|
||||
return nGetI(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(idx,data)
|
||||
public int get(int[] idx, int[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1349,14 +1349,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32S) {
|
||||
return nGetIIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(row,col,data)
|
||||
public int get(int row, int col, float[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1364,14 +1364,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32F) {
|
||||
return nGetF(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(idx,data)
|
||||
public int get(int[] idx, float[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1381,14 +1381,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_32F) {
|
||||
return nGetFIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(row,col,data)
|
||||
public int get(int row, int col, double[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1396,14 +1396,14 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_64F) {
|
||||
return nGetD(nativeObj, row, col, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(idx,data)
|
||||
public int get(int[] idx, double[] data) {
|
||||
int t = type();
|
||||
if (data == null || data.length % CvType.channels(t) != 0)
|
||||
throw new java.lang.UnsupportedOperationException(
|
||||
throw new UnsupportedOperationException(
|
||||
"Provided data element number (" +
|
||||
(data == null ? 0 : data.length) +
|
||||
") should be multiple of the Mat channels count (" +
|
||||
@ -1413,7 +1413,7 @@ public class Mat {
|
||||
if (CvType.depth(t) == CvType.CV_64F) {
|
||||
return nGetDIdx(nativeObj, idx, data.length, data);
|
||||
}
|
||||
throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
throw new UnsupportedOperationException("Mat data type is not compatible: " + t);
|
||||
}
|
||||
|
||||
// javadoc:Mat::get(row,col)
|
||||
|
@ -816,6 +816,7 @@ CV__DNN_INLINE_NS_BEGIN
|
||||
* * `*.t7` | `*.net` (Torch, http://torch.ch/)
|
||||
* * `*.weights` (Darknet, https://pjreddie.com/darknet/)
|
||||
* * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
|
||||
* * `*.onnx` (ONNX, https://onnx.ai/)
|
||||
* @param[in] config Text file contains network configuration. It could be a
|
||||
* file with the following extensions:
|
||||
* * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
|
||||
@ -864,6 +865,23 @@ CV__DNN_INLINE_NS_BEGIN
|
||||
*/
|
||||
CV_EXPORTS_W Net readNetFromONNX(const String &onnxFile);
|
||||
|
||||
/** @brief Reads a network model from <a href="https://onnx.ai/">ONNX</a>
|
||||
* in-memory buffer.
|
||||
* @param buffer memory address of the first byte of the buffer.
|
||||
* @param sizeBuffer size of the buffer.
|
||||
* @returns Network object that ready to do forward, throw an exception
|
||||
* in failure cases.
|
||||
*/
|
||||
CV_EXPORTS Net readNetFromONNX(const char* buffer, size_t sizeBuffer);
|
||||
|
||||
/** @brief Reads a network model from <a href="https://onnx.ai/">ONNX</a>
|
||||
* in-memory buffer.
|
||||
* @param buffer in-memory buffer that stores the ONNX model bytes.
|
||||
* @returns Network object that ready to do forward, throw an exception
|
||||
* in failure cases.
|
||||
*/
|
||||
CV_EXPORTS_W Net readNetFromONNX(const std::vector<uchar>& buffer);
|
||||
|
||||
/** @brief Creates blob from .pb file.
|
||||
* @param path to the .pb file with input tensor.
|
||||
* @returns Mat.
|
||||
|
@ -29,6 +29,8 @@ class BatchNormLayerImpl CV_FINAL : public BatchNormLayer
|
||||
public:
|
||||
Mat weights_, bias_;
|
||||
UMat umat_weight, umat_bias;
|
||||
mutable int dims;
|
||||
|
||||
|
||||
BatchNormLayerImpl(const LayerParams& params)
|
||||
{
|
||||
@ -142,6 +144,7 @@ public:
|
||||
std::vector<MatShape> &outputs,
|
||||
std::vector<MatShape> &internals) const CV_OVERRIDE
|
||||
{
|
||||
dims = inputs[0].size();
|
||||
if (!useGlobalStats && inputs[0][0] != 1)
|
||||
CV_Error(Error::StsNotImplemented, "Batch normalization in training mode with batch size > 1");
|
||||
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
|
||||
@ -150,9 +153,9 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
return (backendId == DNN_BACKEND_OPENCV) ||
|
||||
(backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
|
||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
|
||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
@ -178,11 +181,12 @@ public:
|
||||
}
|
||||
|
||||
UMat &inpBlob = inputs[0];
|
||||
CV_Assert(inpBlob.dims == 2 || inpBlob.dims == 4);
|
||||
int groups = inpBlob.size[0];
|
||||
int channels = inpBlob.size[1];
|
||||
int rows = inpBlob.dims > 2 ? inpBlob.size[2] : 1;
|
||||
int cols = inpBlob.dims > 2 ? inpBlob.size[3] : 1;
|
||||
int planeSize = 1;
|
||||
for (size_t i = 2; i < inpBlob.dims; i++) {
|
||||
planeSize *= inpBlob.size[i];
|
||||
}
|
||||
|
||||
String opts = (use_half) ? " -DDtype=half" : " -DDtype=float";
|
||||
for (size_t ii = 0; ii < outputs.size(); ii++)
|
||||
@ -196,7 +200,7 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
MatShape s = shape(groups * channels, rows * cols);
|
||||
MatShape s = shape(groups * channels, planeSize);
|
||||
UMat src = inputs[ii].reshape(1, s.size(), &s[0]);
|
||||
UMat dst = outputs[ii].reshape(1, s.size(), &s[0]);
|
||||
int number = (s[1] % 8 == 0) ? 8 : ((s[1] % 4 == 0) ? 4 : 1);
|
||||
@ -248,9 +252,10 @@ public:
|
||||
CV_Assert(inputs.size() == 1);
|
||||
|
||||
Mat &inpBlob = inputs[0];
|
||||
CV_Assert(inpBlob.dims == 2 || inpBlob.dims == 4);
|
||||
int rows = inpBlob.dims > 2 ? inpBlob.size[2] : 1;
|
||||
int cols = inpBlob.dims > 2 ? inpBlob.size[3] : 1;
|
||||
int planeSize = 1;
|
||||
for (size_t i = 2; i < inpBlob.dims; i++) {
|
||||
planeSize *= inpBlob.size[i];
|
||||
}
|
||||
|
||||
for (size_t ii = 0; ii < outputs.size(); ii++)
|
||||
{
|
||||
@ -262,8 +267,8 @@ public:
|
||||
{
|
||||
float w = weights_.at<float>(n);
|
||||
float b = bias_.at<float>(n);
|
||||
Mat inpBlobPlane(rows, cols, CV_32F, inpBlob.ptr<float>(num, n));
|
||||
Mat outBlobPlane(rows, cols, CV_32F, outBlob.ptr<float>(num, n));
|
||||
Mat inpBlobPlane(1, planeSize, CV_32F, inpBlob.ptr<float>(num, n));
|
||||
Mat outBlobPlane(1, planeSize, CV_32F, outBlob.ptr<float>(num, n));
|
||||
inpBlobPlane.convertTo(outBlobPlane, CV_32F, w, b);
|
||||
}
|
||||
}
|
||||
|
@ -57,6 +57,24 @@ public:
|
||||
CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model");
|
||||
}
|
||||
|
||||
ONNXImporter(const char* buffer, size_t sizeBuffer)
|
||||
{
|
||||
struct _Buf : public std::streambuf
|
||||
{
|
||||
_Buf(const char* buffer, size_t sizeBuffer)
|
||||
{
|
||||
char* p = const_cast<char*>(buffer);
|
||||
setg(p, p, p + sizeBuffer);
|
||||
}
|
||||
};
|
||||
|
||||
_Buf buf(buffer, sizeBuffer);
|
||||
std::istream input(&buf);
|
||||
|
||||
if (!model_proto.ParseFromIstream(&input))
|
||||
CV_Error(Error::StsUnsupportedFormat, "Failed to parse onnx model from in-memory byte array.");
|
||||
}
|
||||
|
||||
void populateNet(Net dstNet);
|
||||
};
|
||||
|
||||
@ -768,37 +786,42 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
}
|
||||
replaceLayerParam(layerParams, "mode", "interpolation");
|
||||
}
|
||||
else if (layer_type == "LogSoftmax")
|
||||
{
|
||||
layerParams.type = "Softmax";
|
||||
layerParams.set("log_softmax", true);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int j = 0; j < node_proto.input_size(); j++) {
|
||||
if (layer_id.find(node_proto.input(j)) == layer_id.end())
|
||||
layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
|
||||
layer_id.insert(std::make_pair(layerParams.name, LayerInfo(id, 0)));
|
||||
int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
|
||||
layer_id.insert(std::make_pair(layerParams.name, LayerInfo(id, 0)));
|
||||
|
||||
|
||||
std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
|
||||
for (int j = 0; j < node_proto.input_size(); j++) {
|
||||
layerId = layer_id.find(node_proto.input(j));
|
||||
if (layerId != layer_id.end()) {
|
||||
dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j);
|
||||
// Collect input shapes.
|
||||
shapeIt = outShapes.find(node_proto.input(j));
|
||||
CV_Assert(shapeIt != outShapes.end());
|
||||
layerInpShapes.push_back(shapeIt->second);
|
||||
}
|
||||
}
|
||||
std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
|
||||
for (int j = 0; j < node_proto.input_size(); j++) {
|
||||
layerId = layer_id.find(node_proto.input(j));
|
||||
if (layerId != layer_id.end()) {
|
||||
dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j);
|
||||
// Collect input shapes.
|
||||
shapeIt = outShapes.find(node_proto.input(j));
|
||||
CV_Assert(shapeIt != outShapes.end());
|
||||
layerInpShapes.push_back(shapeIt->second);
|
||||
}
|
||||
}
|
||||
|
||||
// Compute shape of output blob for this layer.
|
||||
Ptr<Layer> layer = dstNet.getLayer(id);
|
||||
layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
|
||||
CV_Assert(!layerOutShapes.empty());
|
||||
outShapes[layerParams.name] = layerOutShapes[0];
|
||||
}
|
||||
}
|
||||
// Compute shape of output blob for this layer.
|
||||
Ptr<Layer> layer = dstNet.getLayer(id);
|
||||
layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
|
||||
CV_Assert(!layerOutShapes.empty());
|
||||
outShapes[layerParams.name] = layerOutShapes[0];
|
||||
}
|
||||
}
|
||||
|
||||
Net readNetFromONNX(const String& onnxFile)
|
||||
{
|
||||
@ -808,6 +831,19 @@ Net readNetFromONNX(const String& onnxFile)
|
||||
return net;
|
||||
}
|
||||
|
||||
Net readNetFromONNX(const char* buffer, size_t sizeBuffer)
|
||||
{
|
||||
ONNXImporter onnxImporter(buffer, sizeBuffer);
|
||||
Net net;
|
||||
onnxImporter.populateNet(net);
|
||||
return net;
|
||||
}
|
||||
|
||||
Net readNetFromONNX(const std::vector<uchar>& buffer)
|
||||
{
|
||||
return readNetFromONNX(reinterpret_cast<const char*>(buffer.data()), buffer.size());
|
||||
}
|
||||
|
||||
Mat readTensorFromONNX(const String& path)
|
||||
{
|
||||
opencv_onnx::TensorProto tensor_proto = opencv_onnx::TensorProto();
|
||||
|
@ -1423,6 +1423,43 @@ void TFImporter::populateNet(Net dstNet)
|
||||
|
||||
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
|
||||
}
|
||||
else if (type == "StridedSlice")
|
||||
{
|
||||
CV_Assert(layer.input_size() == 4);
|
||||
Mat begins = getTensorContent(getConstBlob(layer, value_id, 1));
|
||||
Mat ends = getTensorContent(getConstBlob(layer, value_id, 2));
|
||||
Mat strides = getTensorContent(getConstBlob(layer, value_id, 3));
|
||||
CV_CheckTypeEQ(begins.type(), CV_32SC1, "");
|
||||
CV_CheckTypeEQ(ends.type(), CV_32SC1, "");
|
||||
CV_CheckTypeEQ(strides.type(), CV_32SC1, "");
|
||||
const int num = begins.total();
|
||||
CV_Assert_N(num == ends.total(), num == strides.total());
|
||||
|
||||
int end_mask = getLayerAttr(layer, "end_mask").i();
|
||||
for (int i = 0; i < num; ++i)
|
||||
{
|
||||
if (end_mask & (1 << i))
|
||||
ends.at<int>(i) = -1;
|
||||
if (strides.at<int>(i) != 1)
|
||||
CV_Error(Error::StsNotImplemented,
|
||||
format("StridedSlice with stride %d", strides.at<int>(i)));
|
||||
}
|
||||
if (begins.total() == 4 && getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
|
||||
{
|
||||
// Swap NHWC parameters' order to NCHW.
|
||||
std::swap(begins.at<int>(2), begins.at<int>(3));
|
||||
std::swap(begins.at<int>(1), begins.at<int>(2));
|
||||
std::swap(ends.at<int>(2), ends.at<int>(3));
|
||||
std::swap(ends.at<int>(1), ends.at<int>(2));
|
||||
}
|
||||
layerParams.set("begin", DictValue::arrayInt((int*)begins.data, begins.total()));
|
||||
layerParams.set("end", DictValue::arrayInt((int*)ends.data, ends.total()));
|
||||
|
||||
int id = dstNet.addLayer(name, "Slice", layerParams);
|
||||
layer_id[name] = id;
|
||||
|
||||
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
|
||||
}
|
||||
else if (type == "Mul")
|
||||
{
|
||||
bool haveConst = false;
|
||||
|
@ -167,6 +167,13 @@ TEST_P(Test_ONNX_layers, BatchNormalization)
|
||||
testONNXModels("batch_norm");
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_layers, BatchNormalization3D)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
|
||||
throw SkipTestException("");
|
||||
testONNXModels("batch_norm_3d");
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_layers, Transpose)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
|
||||
@ -238,6 +245,12 @@ TEST_P(Test_ONNX_layers, Reshape)
|
||||
testONNXModels("unsqueeze");
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_layers, Softmax)
|
||||
{
|
||||
testONNXModels("softmax");
|
||||
testONNXModels("log_softmax", npy, 0, 0, false, false);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
|
||||
|
||||
class Test_ONNX_nets : public Test_ONNX_layers {};
|
||||
|
@ -188,6 +188,13 @@ TEST_P(Test_TensorFlow_layers, batch_norm)
|
||||
runTensorFlowNet("mvn_batch_norm_1x1");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, batch_norm3D)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
|
||||
throw SkipTestException("");
|
||||
runTensorFlowNet("batch_norm3d");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, slim_batch_norm)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
|
||||
@ -656,6 +663,7 @@ TEST_P(Test_TensorFlow_layers, slice)
|
||||
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
throw SkipTestException("");
|
||||
runTensorFlowNet("slice_4d");
|
||||
runTensorFlowNet("strided_slice");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, softmax)
|
||||
|
@ -1,7 +1,5 @@
|
||||
package org.opencv.imgproc;
|
||||
|
||||
import java.lang.Math;
|
||||
|
||||
//javadoc:Moments
|
||||
public class Moments {
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -544,21 +544,41 @@ float cv::intersectConvexConvex( InputArray _p1, InputArray _p2, OutputArray _p1
|
||||
return 0.f;
|
||||
}
|
||||
|
||||
if( pointPolygonTest(_InputArray(fp1, n), fp2[0], false) >= 0 )
|
||||
bool intersected = false;
|
||||
|
||||
// check if all of fp2's vertices is inside/on the edge of fp1.
|
||||
int nVertices = 0;
|
||||
for (int i=0; i<m; ++i)
|
||||
nVertices += pointPolygonTest(_InputArray(fp1, n), fp2[i], false) >= 0;
|
||||
|
||||
// if all of fp2's vertices is inside/on the edge of fp1.
|
||||
if (nVertices == m)
|
||||
{
|
||||
intersected = true;
|
||||
result = fp2;
|
||||
nr = m;
|
||||
}
|
||||
else if( pointPolygonTest(_InputArray(fp2, m), fp1[0], false) >= 0 )
|
||||
else // otherwise check if fp2 is inside fp1.
|
||||
{
|
||||
result = fp1;
|
||||
nr = n;
|
||||
nVertices = 0;
|
||||
for (int i=0; i<n; ++i)
|
||||
nVertices += pointPolygonTest(_InputArray(fp2, m), fp1[i], false) >= 0;
|
||||
|
||||
// // if all of fp1's vertices is inside/on the edge of fp2.
|
||||
if (nVertices == n)
|
||||
{
|
||||
intersected = true;
|
||||
result = fp1;
|
||||
nr = n;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
||||
if (!intersected)
|
||||
{
|
||||
_p12.release();
|
||||
return 0.f;
|
||||
}
|
||||
|
||||
area = (float)contourArea(_InputArray(result, nr), false);
|
||||
}
|
||||
|
||||
|
@ -2687,9 +2687,9 @@ TEST(Imgproc_ColorLab_Full, bitExactness)
|
||||
<< "Iteration: " << iter << endl
|
||||
<< "Hash vs Correct hash: " << h << ", " << goodHash << endl
|
||||
<< "Error in: (" << x << ", " << y << ")" << endl
|
||||
<< "Reference value: " << gx[0] << " " << gx[1] << " " << gx[2] << endl
|
||||
<< "Actual value: " << rx[0] << " " << rx[1] << " " << rx[2] << endl
|
||||
<< "Src value: " << px[0] << " " << px[1] << " " << px[2] << endl
|
||||
<< "Reference value: " << int(gx[0]) << " " << int(gx[1]) << " " << int(gx[2]) << endl
|
||||
<< "Actual value: " << int(rx[0]) << " " << int(rx[1]) << " " << int(rx[2]) << endl
|
||||
<< "Src value: " << int(px[0]) << " " << int(px[1]) << " " << int(px[2]) << endl
|
||||
<< "Size: (" << probe.rows << ", " << probe.cols << ")" << endl;
|
||||
|
||||
break;
|
||||
@ -2780,9 +2780,9 @@ TEST(Imgproc_ColorLuv_Full, bitExactness)
|
||||
<< "Iteration: " << iter << endl
|
||||
<< "Hash vs Correct hash: " << h << ", " << goodHash << endl
|
||||
<< "Error in: (" << x << ", " << y << ")" << endl
|
||||
<< "Reference value: " << gx[0] << " " << gx[1] << " " << gx[2] << endl
|
||||
<< "Actual value: " << rx[0] << " " << rx[1] << " " << rx[2] << endl
|
||||
<< "Src value: " << px[0] << " " << px[1] << " " << px[2] << endl
|
||||
<< "Reference value: " << int(gx[0]) << " " << int(gx[1]) << " " << int(gx[2]) << endl
|
||||
<< "Actual value: " << int(rx[0]) << " " << int(rx[1]) << " " << int(rx[2]) << endl
|
||||
<< "Src value: " << int(px[0]) << " " << int(px[1]) << " " << int(px[2]) << endl
|
||||
<< "Size: (" << probe.rows << ", " << probe.cols << ")" << endl;
|
||||
|
||||
break;
|
||||
|
260
modules/imgproc/test/test_intersectconvexconvex.cpp
Normal file
260
modules/imgproc/test/test_intersectconvexconvex.cpp
Normal file
@ -0,0 +1,260 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
namespace opencv_test { namespace {
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, no_intersection)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(290, 126));
|
||||
convex1.push_back(cv::Point(284, 132));
|
||||
convex1.push_back(cv::Point(281, 133));
|
||||
convex1.push_back(cv::Point(256, 124));
|
||||
convex1.push_back(cv::Point(249, 116));
|
||||
convex1.push_back(cv::Point(234, 91));
|
||||
convex1.push_back(cv::Point(232, 86));
|
||||
convex1.push_back(cv::Point(232, 79));
|
||||
convex1.push_back(cv::Point(251, 69));
|
||||
convex1.push_back(cv::Point(257, 68));
|
||||
convex1.push_back(cv::Point(297, 85));
|
||||
convex1.push_back(cv::Point(299, 87));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(192, 236));
|
||||
convex2.push_back(cv::Point(190, 245));
|
||||
convex2.push_back(cv::Point(177, 260));
|
||||
convex2.push_back(cv::Point(154, 271));
|
||||
convex2.push_back(cv::Point(142, 270));
|
||||
convex2.push_back(cv::Point(135, 263));
|
||||
convex2.push_back(cv::Point(131, 254));
|
||||
convex2.push_back(cv::Point(132, 240));
|
||||
convex2.push_back(cv::Point(172, 213));
|
||||
convex2.push_back(cv::Point(176, 216));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
EXPECT_TRUE(intersection.empty());
|
||||
EXPECT_NEAR(area, 0, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, no_intersection_with_1_vertex_on_edge_1)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(0,0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(0, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(0, 210));
|
||||
convex2.push_back(cv::Point(-30, 210));
|
||||
convex2.push_back(cv::Point(-37, 170));
|
||||
convex2.push_back(cv::Point(-7, 172));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
EXPECT_TRUE(intersection.empty());
|
||||
EXPECT_NEAR(area, 0, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, no_intersection_with_1_vertex_on_edge_2)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(0,0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(0, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(740, 210));
|
||||
convex2.push_back(cv::Point(750, 100));
|
||||
convex2.push_back(cv::Point(790, 250));
|
||||
convex2.push_back(cv::Point(800, 500));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
EXPECT_TRUE(intersection.empty());
|
||||
EXPECT_NEAR(area, 0, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, intersection_with_1_vertex_on_edge)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(0,0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(0, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(30, 210));
|
||||
convex2.push_back(cv::Point(0,210));
|
||||
convex2.push_back(cv::Point(7, 172));
|
||||
convex2.push_back(cv::Point(37, 170));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
std::vector<cv::Point> expected_intersection;
|
||||
expected_intersection.push_back(cv::Point(0, 210));
|
||||
expected_intersection.push_back(cv::Point(7, 172));
|
||||
expected_intersection.push_back(cv::Point(37, 170));
|
||||
expected_intersection.push_back(cv::Point(30, 210));
|
||||
|
||||
EXPECT_EQ(intersection, expected_intersection);
|
||||
EXPECT_NEAR(area, 1163, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, intersection_with_2_vertices_on_edge)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(0,0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(0, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(30, 210));
|
||||
convex2.push_back(cv::Point(37, 170));
|
||||
convex2.push_back(cv::Point(0,210));
|
||||
convex2.push_back(cv::Point(0, 300));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
std::vector<cv::Point> expected_intersection;
|
||||
expected_intersection.push_back(cv::Point(0, 300));
|
||||
expected_intersection.push_back(cv::Point(0, 210));
|
||||
expected_intersection.push_back(cv::Point(37, 170));
|
||||
expected_intersection.push_back(cv::Point(30, 210));
|
||||
|
||||
EXPECT_EQ(intersection, expected_intersection);
|
||||
EXPECT_NEAR(area, 1950, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, intersection_1)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(0,0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(0, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(20,210));
|
||||
convex2.push_back(cv::Point(30, 210));
|
||||
convex2.push_back(cv::Point(37, 170));
|
||||
convex2.push_back(cv::Point(7, 172));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
std::vector<cv::Point> expected_intersection;
|
||||
expected_intersection.push_back(cv::Point(7, 172));
|
||||
expected_intersection.push_back(cv::Point(37, 170));
|
||||
expected_intersection.push_back(cv::Point(30, 210));
|
||||
expected_intersection.push_back(cv::Point(20, 210));
|
||||
|
||||
EXPECT_EQ(intersection, expected_intersection);
|
||||
EXPECT_NEAR(area, 783, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, intersection_2)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(0,0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(0, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(-2,210));
|
||||
convex2.push_back(cv::Point(-5, 300));
|
||||
convex2.push_back(cv::Point(37, 150));
|
||||
convex2.push_back(cv::Point(7, 172));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
std::vector<cv::Point> expected_intersection;
|
||||
expected_intersection.push_back(cv::Point(0, 202));
|
||||
expected_intersection.push_back(cv::Point(7, 172));
|
||||
expected_intersection.push_back(cv::Point(37, 150));
|
||||
expected_intersection.push_back(cv::Point(0, 282));
|
||||
|
||||
EXPECT_EQ(intersection, expected_intersection);
|
||||
EXPECT_NEAR(area, 1857.19836425781, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, intersection_3)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(15, 0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(15, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(0,210));
|
||||
convex2.push_back(cv::Point(30, 210));
|
||||
convex2.push_back(cv::Point(37, 170));
|
||||
convex2.push_back(cv::Point(7, 172));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
std::vector<cv::Point> expected_intersection;
|
||||
expected_intersection.push_back(cv::Point(15, 171));
|
||||
expected_intersection.push_back(cv::Point(37, 170));
|
||||
expected_intersection.push_back(cv::Point(30, 210));
|
||||
expected_intersection.push_back(cv::Point(15, 210));
|
||||
|
||||
EXPECT_EQ(intersection, expected_intersection);
|
||||
|
||||
EXPECT_NEAR(area, 723.866760253906, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
TEST(Imgproc_IntersectConvexConvex, intersection_4)
|
||||
{
|
||||
std::vector<cv::Point> convex1;
|
||||
convex1.push_back(cv::Point(15, 0));
|
||||
convex1.push_back(cv::Point(740, 0));
|
||||
convex1.push_back(cv::Point(740, 540));
|
||||
convex1.push_back(cv::Point(15, 540));
|
||||
|
||||
std::vector<cv::Point> convex2;
|
||||
convex2.push_back(cv::Point(15, 0));
|
||||
convex2.push_back(cv::Point(740, 0));
|
||||
convex2.push_back(cv::Point(740, 540));
|
||||
convex2.push_back(cv::Point(15, 540));
|
||||
|
||||
std::vector<cv::Point> intersection;
|
||||
float area = cv::intersectConvexConvex(convex1, convex2, intersection);
|
||||
|
||||
std::vector<cv::Point> expected_intersection;
|
||||
expected_intersection.push_back(cv::Point(15, 0));
|
||||
expected_intersection.push_back(cv::Point(740, 0));
|
||||
expected_intersection.push_back(cv::Point(740, 540));
|
||||
expected_intersection.push_back(cv::Point(15, 540));
|
||||
|
||||
EXPECT_EQ(intersection, expected_intersection);
|
||||
EXPECT_NEAR(area, 391500, std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
} // namespace
|
||||
} // opencv_test
|
@ -87,9 +87,9 @@ public class Utils {
|
||||
*/
|
||||
public static void bitmapToMat(Bitmap bmp, Mat mat, boolean unPremultiplyAlpha) {
|
||||
if (bmp == null)
|
||||
throw new java.lang.IllegalArgumentException("bmp == null");
|
||||
throw new IllegalArgumentException("bmp == null");
|
||||
if (mat == null)
|
||||
throw new java.lang.IllegalArgumentException("mat == null");
|
||||
throw new IllegalArgumentException("mat == null");
|
||||
nBitmapToMat2(bmp, mat.nativeObj, unPremultiplyAlpha);
|
||||
}
|
||||
|
||||
@ -117,9 +117,9 @@ public class Utils {
|
||||
*/
|
||||
public static void matToBitmap(Mat mat, Bitmap bmp, boolean premultiplyAlpha) {
|
||||
if (mat == null)
|
||||
throw new java.lang.IllegalArgumentException("mat == null");
|
||||
throw new IllegalArgumentException("mat == null");
|
||||
if (bmp == null)
|
||||
throw new java.lang.IllegalArgumentException("bmp == null");
|
||||
throw new IllegalArgumentException("bmp == null");
|
||||
nMatToBitmap2(mat.nativeObj, bmp, premultiplyAlpha);
|
||||
}
|
||||
|
||||
|
@ -159,11 +159,11 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_Point(Mat m, List<Point> pts) {
|
||||
if (pts == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
int count = m.rows();
|
||||
int type = m.type();
|
||||
if (m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat should have one column\n" + m);
|
||||
throw new IllegalArgumentException("Input Mat should have one column\n" + m);
|
||||
|
||||
pts.clear();
|
||||
if (type == CvType.CV_32SC2) {
|
||||
@ -185,7 +185,7 @@ public class Converters {
|
||||
pts.add(new Point(buff[i * 2], buff[i * 2 + 1]));
|
||||
}
|
||||
} else {
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"Input Mat should be of CV_32SC2, CV_32FC2 or CV_64FC2 type\n" + m);
|
||||
}
|
||||
}
|
||||
@ -204,11 +204,11 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_Point3(Mat m, List<Point3> pts) {
|
||||
if (pts == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
int count = m.rows();
|
||||
int type = m.type();
|
||||
if (m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat should have one column\n" + m);
|
||||
throw new IllegalArgumentException("Input Mat should have one column\n" + m);
|
||||
|
||||
pts.clear();
|
||||
if (type == CvType.CV_32SC3) {
|
||||
@ -230,7 +230,7 @@ public class Converters {
|
||||
pts.add(new Point3(buff[i * 3], buff[i * 3 + 1], buff[i * 3 + 2]));
|
||||
}
|
||||
} else {
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"Input Mat should be of CV_32SC3, CV_32FC3 or CV_64FC3 type\n" + m);
|
||||
}
|
||||
}
|
||||
@ -255,10 +255,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_Mat(Mat m, List<Mat> mats) {
|
||||
if (mats == null)
|
||||
throw new java.lang.IllegalArgumentException("mats == null");
|
||||
throw new IllegalArgumentException("mats == null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_32SC2 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_32SC2 != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
mats.clear();
|
||||
@ -289,10 +289,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_float(Mat m, List<Float> fs) {
|
||||
if (fs == null)
|
||||
throw new java.lang.IllegalArgumentException("fs == null");
|
||||
throw new IllegalArgumentException("fs == null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_32FC1 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_32FC1 != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
fs.clear();
|
||||
@ -322,10 +322,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_uchar(Mat m, List<Byte> us) {
|
||||
if (us == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_8UC1 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_8UC1 != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
us.clear();
|
||||
@ -372,10 +372,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_int(Mat m, List<Integer> is) {
|
||||
if (is == null)
|
||||
throw new java.lang.IllegalArgumentException("is == null");
|
||||
throw new IllegalArgumentException("is == null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_32SC1 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_32SC1 != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
is.clear();
|
||||
@ -388,10 +388,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_char(Mat m, List<Byte> bs) {
|
||||
if (bs == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_8SC1 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_8SC1 != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
bs.clear();
|
||||
@ -424,10 +424,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_Rect(Mat m, List<Rect> rs) {
|
||||
if (rs == null)
|
||||
throw new java.lang.IllegalArgumentException("rs == null");
|
||||
throw new IllegalArgumentException("rs == null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_32SC4 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_32SC4 != m.type() || m.rows()!=1\n" + m);
|
||||
|
||||
rs.clear();
|
||||
@ -460,10 +460,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_Rect2d(Mat m, List<Rect2d> rs) {
|
||||
if (rs == null)
|
||||
throw new java.lang.IllegalArgumentException("rs == null");
|
||||
throw new IllegalArgumentException("rs == null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_64FC4 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_64FC4 != m.type() || m.rows()!=1\n" + m);
|
||||
|
||||
rs.clear();
|
||||
@ -499,10 +499,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_KeyPoint(Mat m, List<KeyPoint> kps) {
|
||||
if (kps == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_64FC(7) != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_64FC(7) != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
kps.clear();
|
||||
@ -530,10 +530,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_vector_Point(Mat m, List<MatOfPoint> pts) {
|
||||
if (pts == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
|
||||
if (m == null)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat can't be null");
|
||||
throw new IllegalArgumentException("Input Mat can't be null");
|
||||
|
||||
List<Mat> mats = new ArrayList<Mat>(m.rows());
|
||||
Mat_to_vector_Mat(m, mats);
|
||||
@ -548,10 +548,10 @@ public class Converters {
|
||||
// vector_vector_Point2f
|
||||
public static void Mat_to_vector_vector_Point2f(Mat m, List<MatOfPoint2f> pts) {
|
||||
if (pts == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
|
||||
if (m == null)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat can't be null");
|
||||
throw new IllegalArgumentException("Input Mat can't be null");
|
||||
|
||||
List<Mat> mats = new ArrayList<Mat>(m.rows());
|
||||
Mat_to_vector_Mat(m, mats);
|
||||
@ -580,10 +580,10 @@ public class Converters {
|
||||
// vector_vector_Point3f
|
||||
public static void Mat_to_vector_vector_Point3f(Mat m, List<MatOfPoint3f> pts) {
|
||||
if (pts == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
|
||||
if (m == null)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat can't be null");
|
||||
throw new IllegalArgumentException("Input Mat can't be null");
|
||||
|
||||
List<Mat> mats = new ArrayList<Mat>(m.rows());
|
||||
Mat_to_vector_Mat(m, mats);
|
||||
@ -625,10 +625,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_vector_KeyPoint(Mat m, List<MatOfKeyPoint> kps) {
|
||||
if (kps == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
|
||||
if (m == null)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat can't be null");
|
||||
throw new IllegalArgumentException("Input Mat can't be null");
|
||||
|
||||
List<Mat> mats = new ArrayList<Mat>(m.rows());
|
||||
Mat_to_vector_Mat(m, mats);
|
||||
@ -659,10 +659,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_double(Mat m, List<Double> ds) {
|
||||
if (ds == null)
|
||||
throw new java.lang.IllegalArgumentException("ds == null");
|
||||
throw new IllegalArgumentException("ds == null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_64FC1 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_64FC1 != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
ds.clear();
|
||||
@ -695,10 +695,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_DMatch(Mat m, List<DMatch> matches) {
|
||||
if (matches == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_64FC4 != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_64FC4 != m.type() || m.cols()!=1\n" + m);
|
||||
|
||||
matches.clear();
|
||||
@ -725,10 +725,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_vector_DMatch(Mat m, List<MatOfDMatch> lvdm) {
|
||||
if (lvdm == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
|
||||
if (m == null)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat can't be null");
|
||||
throw new IllegalArgumentException("Input Mat can't be null");
|
||||
|
||||
List<Mat> mats = new ArrayList<Mat>(m.rows());
|
||||
Mat_to_vector_Mat(m, mats);
|
||||
@ -757,10 +757,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_vector_char(Mat m, List<List<Byte>> llb) {
|
||||
if (llb == null)
|
||||
throw new java.lang.IllegalArgumentException("Output List can't be null");
|
||||
throw new IllegalArgumentException("Output List can't be null");
|
||||
|
||||
if (m == null)
|
||||
throw new java.lang.IllegalArgumentException("Input Mat can't be null");
|
||||
throw new IllegalArgumentException("Input Mat can't be null");
|
||||
|
||||
List<Mat> mats = new ArrayList<Mat>(m.rows());
|
||||
Mat_to_vector_Mat(m, mats);
|
||||
@ -796,10 +796,10 @@ public class Converters {
|
||||
|
||||
public static void Mat_to_vector_RotatedRect(Mat m, List<RotatedRect> rs) {
|
||||
if (rs == null)
|
||||
throw new java.lang.IllegalArgumentException("rs == null");
|
||||
throw new IllegalArgumentException("rs == null");
|
||||
int count = m.rows();
|
||||
if (CvType.CV_32FC(5) != m.type() || m.cols() != 1)
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
throw new IllegalArgumentException(
|
||||
"CvType.CV_32FC5 != m.type() || m.rows()!=1\n" + m);
|
||||
|
||||
rs.clear();
|
||||
|
@ -31,7 +31,13 @@ def createFasterRCNNGraph(modelPath, configPath, outputPath):
|
||||
aspect_ratios = [float(ar) for ar in grid_anchor_generator['aspect_ratios']]
|
||||
width_stride = float(grid_anchor_generator['width_stride'][0])
|
||||
height_stride = float(grid_anchor_generator['height_stride'][0])
|
||||
features_stride = float(config['feature_extractor'][0]['first_stage_features_stride'][0])
|
||||
|
||||
feature_extractor = config['feature_extractor'][0]
|
||||
if 'type' in feature_extractor and feature_extractor['type'][0] == 'faster_rcnn_nas':
|
||||
features_stride = 16.0
|
||||
else:
|
||||
features_stride = float(feature_extractor['first_stage_features_stride'][0])
|
||||
|
||||
first_stage_nms_iou_threshold = float(config['first_stage_nms_iou_threshold'][0])
|
||||
first_stage_max_proposals = int(config['first_stage_max_proposals'][0])
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user