Doxygen documentation: more fixes and cleanups

This commit is contained in:
Maksim Shabunin 2014-11-21 11:28:14 +03:00
parent f9a83c28e5
commit 1523fdcc1c
41 changed files with 894 additions and 859 deletions

View File

@ -146,43 +146,59 @@ if(BUILD_DOCS AND HAVE_SPHINX)
endif()
# ========= Doxygen docs =========
macro(make_reference result modules_list black_list)
set(_res)
foreach(m ${${modules_list}})
list(FIND ${black_list} ${m} _pos)
if(${_pos} EQUAL -1)
set(_res "${_res} @ref ${m} | ${m} \n")
endif()
endforeach()
set(${result} ${_res})
endmacro()
if(BUILD_DOCS AND HAVE_DOXYGEN)
# documented modules list
set(candidates)
list(APPEND candidates ${BASE_MODULES} ${EXTRA_MODULES})
# blacklisted modules
ocv_list_filterout(candidates "^ts$")
# not documented modules list
list(APPEND blacklist "ts" "java" "python2" "python3" "world")
# gathering headers
set(all_headers) # files and dirs to process
set(all_images) # image search paths
set(reflist) # modules reference
foreach(m ${candidates})
set(reflist "${reflist} \n- @subpage ${m}")
set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include")
if(EXISTS ${header_dir})
set(all_headers ${all_headers} ${header_dir})
set(paths_include)
set(paths_doc)
set(paths_bib)
foreach(m ${BASE_MODULES} ${EXTRA_MODULES})
list(FIND blacklist ${m} _pos)
if(${_pos} EQUAL -1)
set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include")
if(EXISTS "${header_dir}")
list(APPEND paths_include "${header_dir}")
endif()
set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
if(EXISTS "${docs_dir}")
list(APPEND paths_doc "${docs_dir}")
file(GLOB bib_file "${docs_dir}" "*.bib")
if(EXISTS "${bib_file}")
list(APPEND paths_bib "${bib_file}")
endif()
endif()
endif()
set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
if(EXISTS ${docs_dir})
set(all_images ${all_images} ${docs_dir})
set(all_headers ${all_headers} ${docs_dir})
endif()
endforeach()
# additional config
set(doxyfile "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile")
set(rootfile "${CMAKE_CURRENT_BINARY_DIR}/root.markdown")
set(all_headers ${all_headers} ${rootfile})
string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_INPUT_LIST "${all_headers}")
string(REGEX REPLACE ";" " \\\\\\n" CMAKE_DOXYGEN_IMAGE_PATH "${all_images}")
set(bibfile "${CMAKE_CURRENT_SOURCE_DIR}/opencv.bib")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${paths_include} ; ${paths_doc}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp ; ${paths_doc}")
set(CMAKE_DOXYGEN_LAYOUT "${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml")
set(CMAKE_DOXYGEN_OUTPUT_PATH "doxygen")
set(CMAKE_DOXYGEN_MODULES_REFERENCE "${reflist}")
set(CMAKE_DOXYGEN_EXAMPLE_PATH "${CMAKE_SOURCE_DIR}/samples/cpp")
set(CMAKE_EXTRA_BIB_FILES "${bibfile} ${paths_bib}")
# generate references
make_reference(CMAKE_DOXYGEN_MAIN_REFERENCE BASE_MODULES blacklist)
make_reference(CMAKE_DOXYGEN_EXTRA_REFERENCE EXTRA_MODULES blacklist)
# writing file
configure_file(Doxyfile.in ${doxyfile} @ONLY)

View File

@ -85,7 +85,7 @@ SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE = @CMAKE_DOXYGEN_LAYOUT@
CITE_BIB_FILES = @CMAKE_CURRENT_SOURCE_DIR@/opencv.bib
CITE_BIB_FILES = @CMAKE_EXTRA_BIB_FILES@
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
@ -222,6 +222,7 @@ INCLUDE_FILE_PATTERNS =
PREDEFINED = __cplusplus=1 \
HAVE_IPP_A=1 \
CVAPI(x)=x \
CV_DOXYGEN= \
CV_EXPORTS= \
CV_EXPORTS_W= \
CV_EXPORTS_W_SIMPLE= \

View File

@ -3,9 +3,14 @@ OpenCV modules {#mainpage}
@subpage intro
Module name | Folder
------------- | -------------
@ref core | core
@ref imgproc | imgproc
### Main modules
<!-- @CMAKE_DOXYGEN_MODULES_REFERENCE@ -->
Module name | Folder
-------------- | -------------
@CMAKE_DOXYGEN_MAIN_REFERENCE@
### Extra modules
Module name | Folder
-------------- | -------------
@CMAKE_DOXYGEN_EXTRA_REFERENCE@

View File

@ -3,6 +3,12 @@
#include <camera_properties.h>
/** @defgroup androidcamera Android Camera Support
*/
//! @addtogroup androidcamera
//! @{
class CameraActivity
{
public:
@ -44,4 +50,6 @@ private:
int frameHeight;
};
//! @}
#endif

View File

@ -1,6 +1,9 @@
#ifndef CAMERA_PROPERTIES_H
#define CAMERA_PROPERTIES_H
//! @addtogroup androidcamera
//! @{
enum {
ANDROID_CAMERA_PROPERTY_FRAMEWIDTH = 0,
ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT = 1,
@ -67,4 +70,6 @@ enum {
ANDROID_CAMERA_FOCUS_DISTANCE_FAR_INDEX
};
//! @}
#endif // CAMERA_PROPERTIES_H

View File

@ -127,19 +127,19 @@ pattern (every view is described by several 3D-2D point correspondences).
@note
- A calibration sample for 3 cameras in horizontal position can be found at
opencv\_source\_code/samples/cpp/3calibration.cpp
opencv_source_code/samples/cpp/3calibration.cpp
- A calibration sample based on a sequence of images can be found at
opencv\_source\_code/samples/cpp/calibration.cpp
opencv_source_code/samples/cpp/calibration.cpp
- A calibration sample in order to do 3D reconstruction can be found at
opencv\_source\_code/samples/cpp/build3dmodel.cpp
opencv_source_code/samples/cpp/build3dmodel.cpp
- A calibration sample of an artificially generated camera and chessboard patterns can be
found at opencv\_source\_code/samples/cpp/calibration\_artificial.cpp
found at opencv_source_code/samples/cpp/calibration_artificial.cpp
- A calibration example on stereo calibration can be found at
opencv\_source\_code/samples/cpp/stereo\_calib.cpp
opencv_source_code/samples/cpp/stereo_calib.cpp
- A calibration example on stereo matching can be found at
opencv\_source\_code/samples/cpp/stereo\_match.cpp
opencv_source_code/samples/cpp/stereo_match.cpp
- (Python) A camera calibration sample can be found at
opencv\_source\_code/samples/python2/calibrate.py
opencv_source_code/samples/python2/calibrate.py
@{
@defgroup calib3d_fisheye Fisheye camera model
@ -257,9 +257,9 @@ CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobi
/** @brief Finds a perspective transformation between two planes.
@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV\_32FC2
@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector\<Point2f\> .
@param dstPoints Coordinates of the points in the target plane, a matrix of the type CV\_32FC2 or
@param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector\<Point2f\> .
@param method Method used to computed a homography matrix. The following methods are possible:
- **0** - a regular method using all the points
@ -315,7 +315,7 @@ cannot be estimated, an empty one will be returned.
@note
- A example on calculating a homography for image matching can be found at
opencv\_source\_code/samples/cpp/video\_homography.cpp
opencv_source_code/samples/cpp/video_homography.cpp
*/
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
@ -485,22 +485,22 @@ the vector is NULL/empty, the zero distortion coefficients are assumed.
@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
the model coordinate system to the camera coordinate system.
@param tvec Output translation vector.
@param useExtrinsicGuess Parameter used for SOLVEPNP\_ITERATIVE. If true (1), the function uses
@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
@param flags Method for solving a PnP problem:
- **SOLVEPNP\_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In
- **SOLVEPNP_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In
this case the function finds such a pose that minimizes reprojection error, that is the sum
of squared distances between the observed projections imagePoints and the projected (using
projectPoints ) objectPoints .
- **SOLVEPNP\_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
"Complete Solution Classification for the Perspective-Three-Point Problem". In this case the
function requires exactly four object and image points.
- **SOLVEPNP\_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation".
- **SOLVEPNP\_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
"A Direct Least-Squares (DLS) Method for PnP".
- **SOLVEPNP\_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
Estimation". In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
assuming that both have the same value. Then the cameraMatrix is updated with the estimated
@ -511,7 +511,7 @@ projections, as well as the camera matrix and the distortion coefficients.
@note
- An example of how to use solvePnP for planar augmented reality can be found at
opencv\_source\_code/samples/python2/plane\_ar.py
opencv_source_code/samples/python2/plane_ar.py
*/
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
@ -531,7 +531,7 @@ the vector is NULL/empty, the zero distortion coefficients are assumed.
@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
the model coordinate system to the camera coordinate system.
@param tvec Output translation vector.
@param useExtrinsicGuess Parameter used for SOLVEPNP\_ITERATIVE. If true (1), the function uses
@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
@param iterationsCount Number of iterations.
@ -550,7 +550,7 @@ makes the function resistant to outliers.
@note
- An example of how to use solvePNPRansac for object detection can be found at
opencv\_source\_code/samples/cpp/tutorial\_code/calib3d/real\_time\_pose\_estimation/
opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
*/
CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
@ -582,16 +582,16 @@ CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
@param image Source chessboard view. It must be an 8-bit grayscale or color image.
@param patternSize Number of inner corners per a chessboard row and column
( patternSize = cvSize(points\_per\_row,points\_per\_colum) = cvSize(columns,rows) ).
( patternSize = cvSize(points_per_row,points_per_colum) = cvSize(columns,rows) ).
@param corners Output array of detected corners.
@param flags Various operation flags that can be zero or a combination of the following values:
- **CV\_CALIB\_CB\_ADAPTIVE\_THRESH** Use adaptive thresholding to convert the image to black
- **CV_CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black
and white, rather than a fixed threshold level (computed from the average image brightness).
- **CV\_CALIB\_CB\_NORMALIZE\_IMAGE** Normalize the image gamma with equalizeHist before
- **CV_CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with equalizeHist before
applying fixed or adaptive thresholding.
- **CV\_CALIB\_CB\_FILTER\_QUADS** Use additional criteria (like contour area, perimeter,
- **CV_CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter,
square-like shape) to filter out false quads extracted at the contour retrieval stage.
- **CALIB\_CB\_FAST\_CHECK** Run a fast check on the image that looks for chessboard corners,
- **CALIB_CB_FAST_CHECK** Run a fast check on the image that looks for chessboard corners,
and shortcut the call if none is found. This can drastically speed up the call in the
degenerate condition when no chessboard is observed.
@ -637,7 +637,7 @@ CV_EXPORTS bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners,
@param image Destination image. It must be an 8-bit color image.
@param patternSize Number of inner corners per a chessboard row and column
(patternSize = cv::Size(points\_per\_row,points\_per\_column)).
(patternSize = cv::Size(points_per_row,points_per_column)).
@param corners Array of detected corners, the output of findChessboardCorners.
@param patternWasFound Parameter indicating whether the complete board was found or not. The
return value of findChessboardCorners should be passed here.
@ -652,12 +652,12 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz
@param image grid view of input circles; it must be an 8-bit grayscale or color image.
@param patternSize number of circles per row and column
( patternSize = Size(points\_per\_row, points\_per\_colum) ).
( patternSize = Size(points_per_row, points_per_colum) ).
@param centers output array of detected centers.
@param flags various operation flags that can be one of the following values:
- **CALIB\_CB\_SYMMETRIC\_GRID** uses symmetric pattern of circles.
- **CALIB\_CB\_ASYMMETRIC\_GRID** uses asymmetric pattern of circles.
- **CALIB\_CB\_CLUSTERING** uses a special algorithm for grid detection. It is more robust to
- **CALIB_CB_SYMMETRIC_GRID** uses symmetric pattern of circles.
- **CALIB_CB_ASYMMETRIC_GRID** uses asymmetric pattern of circles.
- **CALIB_CB_CLUSTERING** uses a special algorithm for grid detection. It is more robust to
perspective distortions but much more sensitive to background clutter.
@param blobDetector feature detector that finds blobs like dark circles on light background.
@ -703,7 +703,7 @@ together.
@param imageSize Size of the image used only to initialize the intrinsic camera matrix.
@param cameraMatrix Output 3x3 floating-point camera matrix
\f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
and/or CV\_CALIB\_FIX\_ASPECT\_RATIO are specified, some or all of fx, fy, cx, cy must be
and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.
@param distCoeffs Output vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements.
@ -714,33 +714,33 @@ space (in which object points are specified) to the world coordinate space, that
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
@param tvecs Output vector of translation vectors estimated for each pattern view.
@param flags Different flags that may be zero or a combination of the following values:
- **CV\_CALIB\_USE\_INTRINSIC\_GUESS** cameraMatrix contains valid initial values of
- **CV_CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
center ( imageSize is used), and focal distances are computed in a least-squares fashion.
Note, that if intrinsic parameters are known, there is no need to use this function just to
estimate extrinsic parameters. Use solvePnP instead.
- **CV\_CALIB\_FIX\_PRINCIPAL\_POINT** The principal point is not changed during the global
- **CV_CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
optimization. It stays at the center or at a different location specified when
CV\_CALIB\_USE\_INTRINSIC\_GUESS is set too.
- **CV\_CALIB\_FIX\_ASPECT\_RATIO** The functions considers only fy as a free parameter. The
CV_CALIB_USE_INTRINSIC_GUESS is set too.
- **CV_CALIB_FIX_ASPECT_RATIO** The functions considers only fy as a free parameter. The
ratio fx/fy stays the same as in the input cameraMatrix . When
CV\_CALIB\_USE\_INTRINSIC\_GUESS is not set, the actual input values of fx and fy are
CV_CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
ignored, only their ratio is computed and used further.
- **CV\_CALIB\_ZERO\_TANGENT\_DIST** Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
- **CV_CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
to zeros and stay zero.
- **CV\_CALIB\_FIX\_K1,...,CV\_CALIB\_FIX\_K6** The corresponding radial distortion
coefficient is not changed during the optimization. If CV\_CALIB\_USE\_INTRINSIC\_GUESS is
- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** The corresponding radial distortion
coefficient is not changed during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is
set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CV\_CALIB\_RATIONAL\_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the
- **CV_CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the rational model and return 8 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB\_THIN\_PRISM\_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the thin prism model and return 12 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB\_FIX\_S1\_S2\_S3\_S4** The thin prism distortion coefficients are not changed during
the optimization. If CV\_CALIB\_USE\_INTRINSIC\_GUESS is set, the coefficient from the
- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
@param criteria Termination criteria for the iterative optimization algorithm.
@ -750,7 +750,7 @@ points and their corresponding 2D projections in each view must be specified. Th
by using an object with a known geometry and easily detectable feature points. Such an object is
called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
a calibration rig (see findChessboardCorners ). Currently, initialization of intrinsic parameters
(when CV\_CALIB\_USE\_INTRINSIC\_GUESS is not set) is only implemented for planar calibration
(when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
be used as long as initial cameraMatrix is provided.
@ -758,7 +758,7 @@ The algorithm performs the following steps:
- Compute the initial intrinsic parameters (the option only available for planar calibration
patterns) or read them from the input parameters. The distortion coefficients are all set to
zeros initially unless some of CV\_CALIB\_FIX\_K? are specified.
zeros initially unless some of CV_CALIB_FIX_K? are specified.
- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
done using solvePnP .
@ -822,8 +822,8 @@ observed by the first camera.
observed by the second camera.
@param cameraMatrix1 Input/output first camera matrix:
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
any of CV\_CALIB\_USE\_INTRINSIC\_GUESS , CV\_CALIB\_FIX\_ASPECT\_RATIO ,
CV\_CALIB\_FIX\_INTRINSIC , or CV\_CALIB\_FIX\_FOCAL\_LENGTH are specified, some or all of the
any of CV_CALIB_USE_INTRINSIC_GUESS , CV_CALIB_FIX_ASPECT_RATIO ,
CV_CALIB_FIX_INTRINSIC , or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
matrix components must be initialized. See the flags description for details.
@param distCoeffs1 Input/output vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 ot 12 elements. The
@ -837,30 +837,30 @@ is similar to distCoeffs1 .
@param E Output essential matrix.
@param F Output fundamental matrix.
@param flags Different flags that may be zero or a combination of the following values:
- **CV\_CALIB\_FIX\_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F
- **CV_CALIB_FIX_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F
matrices are estimated.
- **CV\_CALIB\_USE\_INTRINSIC\_GUESS** Optimize some or all of the intrinsic parameters
- **CV_CALIB_USE_INTRINSIC_GUESS** Optimize some or all of the intrinsic parameters
according to the specified flags. Initial values are provided by the user.
- **CV\_CALIB\_FIX\_PRINCIPAL\_POINT** Fix the principal points during the optimization.
- **CV\_CALIB\_FIX\_FOCAL\_LENGTH** Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
- **CV\_CALIB\_FIX\_ASPECT\_RATIO** Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
- **CV_CALIB_FIX_PRINCIPAL_POINT** Fix the principal points during the optimization.
- **CV_CALIB_FIX_FOCAL_LENGTH** Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
- **CV_CALIB_FIX_ASPECT_RATIO** Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
.
- **CV\_CALIB\_SAME\_FOCAL\_LENGTH** Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
- **CV\_CALIB\_ZERO\_TANGENT\_DIST** Set tangential distortion coefficients for each camera to
- **CV_CALIB_SAME_FOCAL_LENGTH** Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
- **CV_CALIB_ZERO_TANGENT_DIST** Set tangential distortion coefficients for each camera to
zeros and fix there.
- **CV\_CALIB\_FIX\_K1,...,CV\_CALIB\_FIX\_K6** Do not change the corresponding radial
distortion coefficient during the optimization. If CV\_CALIB\_USE\_INTRINSIC\_GUESS is set,
- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** Do not change the corresponding radial
distortion coefficient during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set,
the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CV\_CALIB\_RATIONAL\_MODEL** Enable coefficients k4, k5, and k6. To provide the backward
- **CV_CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward
compatibility, this extra flag should be explicitly specified to make the calibration
function use the rational model and return 8 coefficients. If the flag is not set, the
function computes and returns only 5 distortion coefficients.
- **CALIB\_THIN\_PRISM\_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the thin prism model and return 12 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB\_FIX\_S1\_S2\_S3\_S4** The thin prism distortion coefficients are not changed during
the optimization. If CV\_CALIB\_USE\_INTRINSIC\_GUESS is set, the coefficient from the
- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
@param criteria Termination criteria for the iterative optimization algorithm.
@ -888,10 +888,10 @@ Besides the stereo-related information, the function can also perform a full cal
two cameras. However, due to the high dimensionality of the parameter space and noise in the input
data, the function can diverge from the correct solution. If the intrinsic parameters can be
estimated with high accuracy for each of the cameras individually (for example, using
calibrateCamera ), you are recommended to do so and then pass CV\_CALIB\_FIX\_INTRINSIC flag to the
calibrateCamera ), you are recommended to do so and then pass CV_CALIB_FIX_INTRINSIC flag to the
function along with the computed intrinsic parameters. Otherwise, if all the parameters are
estimated at once, it makes sense to restrict some parameters, for example, pass
CV\_CALIB\_SAME\_FOCAL\_LENGTH and CV\_CALIB\_ZERO\_TANGENT\_DIST flags, which is usually a
CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST flags, which is usually a
reasonable assumption.
Similarly to calibrateCamera , the function minimizes the total re-projection error for all the
@ -923,7 +923,7 @@ camera.
@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
@param flags Operation flags that may be zero or CV\_CALIB\_ZERO\_DISPARITY . If the flag is set,
@param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
@ -936,7 +936,7 @@ pixels from the original images from the cameras are retained in the rectified i
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.
@param newImageSize New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo\_calib.cpp sample in OpenCV samples directory). When (0,0)
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
@param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
@ -962,7 +962,7 @@ coordinates. The function distinguishes the following two cases:
\f[\texttt{P2} = \begin{bmatrix} f & 0 & cx_2 & T_x*f \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if
CV\_CALIB\_ZERO\_DISPARITY is set.
CV_CALIB_ZERO_DISPARITY is set.
- **Vertical stereo**: the first and the second camera views are shifted relative to each other
mainly in vertical direction (and probably a bit in the horizontal direction too). The epipolar
@ -972,14 +972,14 @@ coordinates. The function distinguishes the following two cases:
\f[\texttt{P2} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_2 & T_y*f \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if CALIB\_ZERO\_DISPARITY is
where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if CALIB_ZERO_DISPARITY is
set.
As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
initialize the rectification map for each camera.
See below the screenshot from the stereo\_calib.cpp sample. Some red horizontal lines pass through
See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
the corresponding image regions. This means that the images are well rectified, which is what most
stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
their interiors are all valid pixels.
@ -1057,7 +1057,7 @@ undistorted image. See roi1, roi2 description in stereoRectify .
@param centerPrincipalPoint Optional flag that indicates whether in the new camera matrix the
principal point should be at the image center or not. By default, the principal point is chosen to
best fit a subset of the source image (determined by alpha) to the corrected image.
@return new\_camera\_matrix Output new camera matrix.
@return new_camera_matrix Output new camera matrix.
The function computes and returns the optimal new camera matrix based on the free scaling parameter.
By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
@ -1111,10 +1111,10 @@ CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
floating-point (single or double precision).
@param points2 Array of the second image points of the same size and format as points1 .
@param method Method for computing a fundamental matrix.
- **CV\_FM\_7POINT** for a 7-point algorithm. \f$N = 7\f$
- **CV\_FM\_8POINT** for an 8-point algorithm. \f$N \ge 8\f$
- **CV\_FM\_RANSAC** for the RANSAC algorithm. \f$N \ge 8\f$
- **CV\_FM\_LMEDS** for the LMedS algorithm. \f$N \ge 8\f$
- **CV_FM_7POINT** for a 7-point algorithm. \f$N = 7\f$
- **CV_FM_8POINT** for an 8-point algorithm. \f$N \ge 8\f$
- **CV_FM_RANSAC** for the RANSAC algorithm. \f$N \ge 8\f$
- **CV_FM_LMEDS** for the LMedS algorithm. \f$N \ge 8\f$
@param param1 Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
@ -1269,7 +1269,7 @@ CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray point
/** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.
@param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV\_32FC2 or
@param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or
vector\<Point2f\> .
@param whichImage Index of the image (1 or 2) that contains the points .
@param F Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify .
@ -1364,16 +1364,16 @@ CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost
@param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
floating-point disparity image.
@param \_3dImage Output 3-channel floating-point image of the same size as disparity . Each
element of \_3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
@param _3dImage Output 3-channel floating-point image of the same size as disparity . Each
element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
map.
@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained with stereoRectify.
@param handleMissingValues Indicates, whether the function should handle missing values (i.e.
points where the disparity was not computed). If handleMissingValues=true, then pixels with the
minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
to 3D points with a very large Z value (currently set to 10000).
@param ddepth The optional output array depth. If it is -1, the output image will have CV\_32F
depth. ddepth can also be set to CV\_16S, CV\_32S or CV\_32F.
@param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
The function transforms a single-channel disparity map to a 3-channel image representing a 3D
surface. That is, for each pixel (x,y) andthe corresponding disparity d=disparity(x,y) , it
@ -1523,19 +1523,19 @@ public:
one as follows:
- By default, the algorithm is single-pass, which means that you consider only 5 directions
instead of 8. Set mode=StereoSGBM::MODE\_HH in createStereoSGBM to run the full variant of the
instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the
algorithm but beware that it may consume a lot of memory.
- The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the
blocks to single pixels.
- Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi
sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well.
- Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for
example: pre-filtering (StereoBM::PREFILTER\_XSOBEL type) and post-filtering (uniqueness
example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness
check, quadratic interpolation and speckle filtering).
@note
- (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found
at opencv\_source\_code/samples/python2/stereo\_match.py
at opencv_source_code/samples/python2/stereo_match.py
*/
class CV_EXPORTS_W StereoSGBM : public StereoMatcher
{
@ -1573,9 +1573,9 @@ public:
@param P2 The second parameter controlling the disparity smoothness. The larger the values are,
the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1
between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor
pixels. The algorithm requires P2 \> P1 . See stereo\_match.cpp sample where some reasonably good
P1 and P2 values are shown (like 8\*number\_of\_image\_channels\*SADWindowSize\*SADWindowSize and
32\*number\_of\_image\_channels\*SADWindowSize\*SADWindowSize , respectively).
pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good
P1 and P2 values are shown (like 8\*number_of_image_channels\*SADWindowSize\*SADWindowSize and
32\*number_of_image_channels\*SADWindowSize\*SADWindowSize , respectively).
@param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right
disparity check. Set it to a non-positive value to disable the check.
@param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first
@ -1590,7 +1590,7 @@ public:
@param speckleRange Maximum disparity variation within each connected component. If you do speckle
filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.
Normally, 1 or 2 is good enough.
@param mode Set it to StereoSGBM::MODE\_HH to run the full-scale two-pass dynamic programming
@param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming
algorithm. It will consume O(W\*H\*numDisparities) bytes, which is large for 640x480 stereo and
huge for HD-size pictures. By default, it is set to false .
@ -1687,7 +1687,7 @@ namespace fisheye
1-channel or 1x1 3-channel
@param P New camera matrix (3x3) or new projection matrix (3x4)
@param size Undistorted image size.
@param m1type Type of the first output map that can be CV\_32FC1 or CV\_16SC2 . See convertMaps()
@param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
for details.
@param map1 The first output map.
@param map2 The second output map.
@ -1712,10 +1712,10 @@ namespace fisheye
performed.
See below the results of undistortImage.
- a\) result of undistort of perspective camera model (all possible coefficients (k\_1, k\_2, k\_3,
k\_4, k\_5, k\_6) of distortion were optimized under calibration)
- b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k\_1, k\_2,
k\_3, k\_4) of fisheye distortion were optimized under calibration)
- a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
k_4, k_5, k_6) of distortion were optimized under calibration)
- b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
k_3, k_4) of fisheye distortion were optimized under calibration)
- c\) original image was captured with fisheye lens
Pictures a) and b) almost the same. But if we consider points of image located far from the center
@ -1749,10 +1749,10 @@ namespace fisheye
@param imagePoints vector of vectors of the projections of calibration pattern points.
imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
objectPoints[i].size() for each i.
@param image\_size Size of the image used only to initialize the intrinsic camera matrix.
@param image_size Size of the image used only to initialize the intrinsic camera matrix.
@param K Output 3x3 floating-point camera matrix
\f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If
fisheye::CALIB\_USE\_INTRINSIC\_GUESS/ is specified, some or all of fx, fy, cx, cy must be
fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.
@param D Output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
@ -1762,14 +1762,14 @@ namespace fisheye
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
@param tvecs Output vector of translation vectors estimated for each pattern view.
@param flags Different flags that may be zero or a combination of the following values:
- **fisheye::CALIB\_USE\_INTRINSIC\_GUESS** cameraMatrix contains valid initial values of
- **fisheye::CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- **fisheye::CALIB\_RECOMPUTE\_EXTRINSIC** Extrinsic will be recomputed after each iteration
- **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
of intrinsic optimization.
- **fisheye::CALIB\_CHECK\_COND** The functions will check validity of condition number.
- **fisheye::CALIB\_FIX\_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
- **fisheye::CALIB\_FIX\_K1..4** Selected distortion coefficients are set to zeros and stay
- **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
- **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
- **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
zero.
@param criteria Termination criteria for the iterative optimization algorithm.
*/
@ -1794,13 +1794,13 @@ namespace fisheye
@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
@param flags Operation flags that may be zero or CV\_CALIB\_ZERO\_DISPARITY . If the flag is set,
@param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.
@param newImageSize New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo\_calib.cpp sample in OpenCV samples directory). When (0,0)
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.
@param balance Sets the new focal length in range between the min focal length and the max focal
@ -1820,7 +1820,7 @@ namespace fisheye
observed by the second camera.
@param K1 Input/output first camera matrix:
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
any of fisheye::CALIB\_USE\_INTRINSIC\_GUESS , fisheye::CV\_CALIB\_FIX\_INTRINSIC are specified,
any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CV_CALIB_FIX_INTRINSIC are specified,
some or all of the matrix components must be initialized.
@param D1 Input/output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$ of 4 elements.
@param K2 Input/output second camera matrix. The parameter is similar to K1 .
@ -1830,16 +1830,16 @@ namespace fisheye
@param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
@param T Output translation vector between the coordinate systems of the cameras.
@param flags Different flags that may be zero or a combination of the following values:
- **fisheye::CV\_CALIB\_FIX\_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices
- **fisheye::CV_CALIB_FIX_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices
are estimated.
- **fisheye::CALIB\_USE\_INTRINSIC\_GUESS** K1, K2 contains valid initial values of
- **fisheye::CALIB_USE_INTRINSIC_GUESS** K1, K2 contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
center (imageSize is used), and focal distances are computed in a least-squares fashion.
- **fisheye::CALIB\_RECOMPUTE\_EXTRINSIC** Extrinsic will be recomputed after each iteration
- **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
of intrinsic optimization.
- **fisheye::CALIB\_CHECK\_COND** The functions will check validity of condition number.
- **fisheye::CALIB\_FIX\_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
- **fisheye::CALIB\_FIX\_K1..4** Selected distortion coefficients are set to zeros and stay
- **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
- **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
- **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
zero.
@param criteria Termination criteria for the iterative optimization algorithm.
*/

View File

@ -326,11 +326,11 @@ CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCou
Its interface is also Mat-like but with additional memory type parameters.
- **PAGE\_LOCKED** sets a page locked memory type used commonly for fast and asynchronous
- **PAGE_LOCKED** sets a page locked memory type used commonly for fast and asynchronous
uploading/downloading data from/to GPU.
- **SHARED** specifies a zero copy memory allocation that enables mapping the host memory to GPU
address space, if supported.
- **WRITE\_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are
- **WRITE_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are
used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache
utilization.
@ -581,7 +581,7 @@ class CV_EXPORTS TargetArchs
public:
/** @brief The following method checks whether the module was built with the support of the given feature:
@param feature\_set Features to be checked. See :ocvcuda::FeatureSet.
@param feature_set Features to be checked. See :ocvcuda::FeatureSet.
*/
static bool builtWith(FeatureSet feature_set);
@ -611,9 +611,9 @@ public:
/** @brief The constructors.
@param device\_id System index of the CUDA device starting with 0.
@param device_id System index of the CUDA device starting with 0.
Constructs the DeviceInfo object for the specified device. If device\_id parameter is missed, it
Constructs the DeviceInfo object for the specified device. If device_id parameter is missed, it
constructs an object for the current device.
*/
DeviceInfo(int device_id);
@ -793,7 +793,7 @@ public:
/** @brief Provides information on CUDA feature support.
@param feature\_set Features to be checked. See cuda::FeatureSet.
@param feature_set Features to be checked. See cuda::FeatureSet.
This function returns true if the device has the specified CUDA feature. Otherwise, it returns false
*/

View File

@ -66,7 +66,7 @@ namespace cv
class Stream;
class Event;
/** @brief Class that enables getting cudaStream\_t from cuda::Stream
/** @brief Class that enables getting cudaStream_t from cuda::Stream
because it is the only public header that depends on the CUDA Runtime API. Including it
brings a dependency to your code.

View File

@ -83,11 +83,11 @@ as possible.
@note
- An example applying the HOG descriptor for people detection can be found at
opencv\_source\_code/samples/cpp/peopledetect.cpp
opencv_source_code/samples/cpp/peopledetect.cpp
- A CUDA example applying the HOG descriptor for people detection can be found at
opencv\_source\_code/samples/gpu/hog.cpp
opencv_source_code/samples/gpu/hog.cpp
- (Python) An example applying the HOG descriptor for people detection can be found at
opencv\_source\_code/samples/python2/peopledetect.py
opencv_source_code/samples/python2/peopledetect.py
*/
struct CV_EXPORTS HOGDescriptor
{
@ -97,14 +97,14 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Creates the HOG descriptor and detector.
@param win\_size Detection window size. Align to block size and block stride.
@param block\_size Block size in pixels. Align to cell size. Only (16,16) is supported for now.
@param block\_stride Block stride. It must be a multiple of cell size.
@param cell\_size Cell size. Only (8, 8) is supported for now.
@param win_size Detection window size. Align to block size and block stride.
@param block_size Block size in pixels. Align to cell size. Only (16,16) is supported for now.
@param block_stride Block stride. It must be a multiple of cell size.
@param cell_size Cell size. Only (8, 8) is supported for now.
@param nbins Number of bins. Only 9 bins per cell are supported for now.
@param win\_sigma Gaussian smoothing window parameter.
@param threshold\_L2hys L2-Hys normalization method shrinkage.
@param gamma\_correction Flag to specify whether the gamma correction preprocessing is required or
@param win_sigma Gaussian smoothing window parameter.
@param threshold_L2hys L2-Hys normalization method shrinkage.
@param gamma_correction Flag to specify whether the gamma correction preprocessing is required or
not.
@param nlevels Maximum number of detection window increases.
*/
@ -137,13 +137,13 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Performs object detection without a multi-scale window.
@param img Source image. CV\_8UC1 and CV\_8UC4 types are supported for now.
@param found\_locations Left-top corner points of detected objects boundaries.
@param hit\_threshold Threshold for the distance between features and SVM classifying plane.
@param img Source image. CV_8UC1 and CV_8UC4 types are supported for now.
@param found_locations Left-top corner points of detected objects boundaries.
@param hit_threshold Threshold for the distance between features and SVM classifying plane.
Usually it is 0 and should be specfied in the detector coefficients (as the last free
coefficient). But if the free coefficient is omitted (which is allowed), you can specify it
manually here.
@param win\_stride Window stride. It must be a multiple of block stride.
@param win_stride Window stride. It must be a multiple of block stride.
@param padding Mock parameter to keep the CPU interface compatibility. It must be (0,0).
*/
void detect(const GpuMat& img, std::vector<Point>& found_locations,
@ -153,13 +153,13 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Performs object detection with a multi-scale window.
@param img Source image. See cuda::HOGDescriptor::detect for type limitations.
@param found\_locations Detected objects boundaries.
@param hit\_threshold Threshold for the distance between features and SVM classifying plane. See
@param found_locations Detected objects boundaries.
@param hit_threshold Threshold for the distance between features and SVM classifying plane. See
cuda::HOGDescriptor::detect for details.
@param win\_stride Window stride. It must be a multiple of block stride.
@param win_stride Window stride. It must be a multiple of block stride.
@param padding Mock parameter to keep the CPU interface compatibility. It must be (0,0).
@param scale0 Coefficient of the detection window increase.
@param group\_threshold Coefficient to regulate the similarity threshold. When detected, some
@param group_threshold Coefficient to regulate the similarity threshold. When detected, some
objects can be covered by many rectangles. 0 means not to perform grouping. See groupRectangles .
*/
void detectMultiScale(const GpuMat& img, std::vector<Rect>& found_locations,
@ -177,11 +177,11 @@ struct CV_EXPORTS HOGDescriptor
/** @brief Returns block descriptors computed for the whole image.
@param img Source image. See cuda::HOGDescriptor::detect for type limitations.
@param win\_stride Window stride. It must be a multiple of block stride.
@param win_stride Window stride. It must be a multiple of block stride.
@param descriptors 2D array of descriptors.
@param descr\_format Descriptor storage format:
- **DESCR\_FORMAT\_ROW\_BY\_ROW** - Row-major order.
- **DESCR\_FORMAT\_COL\_BY\_COL** - Column-major order.
@param descr_format Descriptor storage format:
- **DESCR_FORMAT_ROW_BY_ROW** - Row-major order.
- **DESCR_FORMAT_COL_BY_COL** - Column-major order.
The function is mainly used to learn the classifier.
*/
@ -236,9 +236,9 @@ protected:
@note
- A cascade classifier example can be found at
opencv\_source\_code/samples/gpu/cascadeclassifier.cpp
opencv_source_code/samples/gpu/cascadeclassifier.cpp
- A Nvidea API specific cascade classifier example can be found at
opencv\_source\_code/samples/gpu/cascadeclassifier\_nvidia\_api.cpp
opencv_source_code/samples/gpu/cascadeclassifier_nvidia_api.cpp
*/
class CV_EXPORTS CascadeClassifier_CUDA
{
@ -271,7 +271,7 @@ public:
int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.2, int minNeighbors = 4, Size minSize = Size());
/** @brief Detects objects of different sizes in the input image.
@param image Matrix of type CV\_8U containing an image where objects should be detected.
@param image Matrix of type CV_8U containing an image where objects should be detected.
@param objectsBuf Buffer to store detected objects (rectangles). If it is empty, it is allocated
with the default size. If not empty, the function searches not more than N objects, where
N = sizeof(objectsBufer's data)/sizeof(cv::Rect).
@ -364,15 +364,15 @@ CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tve
@param object Single-row matrix of object points.
@param image Single-row matrix of image points.
@param camera\_mat 3x3 matrix of intrinsic camera parameters.
@param dist\_coef Distortion coefficients. See undistortPoints for details.
@param camera_mat 3x3 matrix of intrinsic camera parameters.
@param dist_coef Distortion coefficients. See undistortPoints for details.
@param rvec Output 3D rotation vector.
@param tvec Output 3D translation vector.
@param use\_extrinsic\_guess Flag to indicate that the function must use rvec and tvec as an
@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an
initial transformation guess. It is not supported for now.
@param num\_iters Maximum number of RANSAC iterations.
@param max\_dist Euclidean distance threshold to detect whether point is inlier or not.
@param min\_inlier\_count Flag to indicate that the function must stop if greater or equal number
@param num_iters Maximum number of RANSAC iterations.
@param max_dist Euclidean distance threshold to detect whether point is inlier or not.
@param min_inlier_count Flag to indicate that the function must stop if greater or equal number
of inliers is achieved. It is not supported for now.
@param inliers Output vector of inlier indices.
*/

View File

@ -216,12 +216,12 @@ CV_EXPORTS void pow(InputArray src, double power, OutputArray dst, Stream& strea
@param src2 Second source matrix or scalar.
@param dst Destination matrix that has the same size and type as the input array(s).
@param cmpop Flag specifying the relation between the elements to be checked:
- **CMP\_EQ:** a(.) == b(.)
- **CMP\_GT:** a(.) \< b(.)
- **CMP\_GE:** a(.) \<= b(.)
- **CMP\_LT:** a(.) \< b(.)
- **CMP\_LE:** a(.) \<= b(.)
- **CMP\_NE:** a(.) != b(.)
- **CMP_EQ:** a(.) == b(.)
- **CMP_GT:** a(.) \< b(.)
- **CMP_GE:** a(.) \<= b(.)
- **CMP_LT:** a(.) \< b(.)
- **CMP_LE:** a(.) \<= b(.)
- **CMP_NE:** a(.) != b(.)
@param stream Stream for the asynchronous version.
@sa compare
@ -278,7 +278,7 @@ CV_EXPORTS void rshift(InputArray src, Scalar_<int> val, OutputArray dst, Stream
/** @brief Performs pixel by pixel right left of an image by a constant value.
@param src Source matrix. Supports 1, 3 and 4 channels images with CV\_8U , CV\_16U or CV\_32S
@param src Source matrix. Supports 1, 3 and 4 channels images with CV_8U , CV_16U or CV_32S
depth.
@param val Constant values, one per channel.
@param dst Destination matrix with the same size and type as src .
@ -343,8 +343,8 @@ static inline void scaleAdd(InputArray src1, double alpha, InputArray src2, Outp
@param src Source array (single-channel).
@param dst Destination array with the same size and type as src .
@param thresh Threshold value.
@param maxval Maximum value to use with THRESH\_BINARY and THRESH\_BINARY\_INV threshold types.
@param type Threshold type. For details, see threshold . The THRESH\_OTSU and THRESH\_TRIANGLE
@param maxval Maximum value to use with THRESH_BINARY and THRESH_BINARY_INV threshold types.
@param type Threshold type. For details, see threshold . The THRESH_OTSU and THRESH_TRIANGLE
threshold types are not supported.
@param stream Stream for the asynchronous version.
@ -354,8 +354,8 @@ CV_EXPORTS double threshold(InputArray src, OutputArray dst, double thresh, doub
/** @brief Computes magnitudes of complex matrix elements.
@param xy Source complex matrix in the interleaved format ( CV\_32FC2 ).
@param magnitude Destination matrix of float magnitudes ( CV\_32FC1 ).
@param xy Source complex matrix in the interleaved format ( CV_32FC2 ).
@param magnitude Destination matrix of float magnitudes ( CV_32FC1 ).
@param stream Stream for the asynchronous version.
@sa magnitude
@ -364,8 +364,8 @@ CV_EXPORTS void magnitude(InputArray xy, OutputArray magnitude, Stream& stream =
/** @brief Computes squared magnitudes of complex matrix elements.
@param xy Source complex matrix in the interleaved format ( CV\_32FC2 ).
@param magnitude Destination matrix of float magnitude squares ( CV\_32FC1 ).
@param xy Source complex matrix in the interleaved format ( CV_32FC2 ).
@param magnitude Destination matrix of float magnitude squares ( CV_32FC1 ).
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void magnitudeSqr(InputArray xy, OutputArray magnitude, Stream& stream = Stream::Null());
@ -373,9 +373,9 @@ CV_EXPORTS void magnitudeSqr(InputArray xy, OutputArray magnitude, Stream& strea
/** @overload
computes magnitude of each (x(i), y(i)) vector
supports only floating-point source
@param x Source matrix containing real components ( CV\_32FC1 ).
@param y Source matrix containing imaginary components ( CV\_32FC1 ).
@param magnitude Destination matrix of float magnitudes ( CV\_32FC1 ).
@param x Source matrix containing real components ( CV_32FC1 ).
@param y Source matrix containing imaginary components ( CV_32FC1 ).
@param magnitude Destination matrix of float magnitudes ( CV_32FC1 ).
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void magnitude(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
@ -383,18 +383,18 @@ CV_EXPORTS void magnitude(InputArray x, InputArray y, OutputArray magnitude, Str
/** @overload
computes squared magnitude of each (x(i), y(i)) vector
supports only floating-point source
@param x Source matrix containing real components ( CV\_32FC1 ).
@param y Source matrix containing imaginary components ( CV\_32FC1 ).
@param magnitude Destination matrix of float magnitude squares ( CV\_32FC1 ).
@param x Source matrix containing real components ( CV_32FC1 ).
@param y Source matrix containing imaginary components ( CV_32FC1 ).
@param magnitude Destination matrix of float magnitude squares ( CV_32FC1 ).
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void magnitudeSqr(InputArray x, InputArray y, OutputArray magnitude, Stream& stream = Stream::Null());
/** @brief Computes polar angles of complex matrix elements.
@param x Source matrix containing real components ( CV\_32FC1 ).
@param y Source matrix containing imaginary components ( CV\_32FC1 ).
@param angle Destination matrix of angles ( CV\_32FC1 ).
@param x Source matrix containing real components ( CV_32FC1 ).
@param y Source matrix containing imaginary components ( CV_32FC1 ).
@param angle Destination matrix of angles ( CV_32FC1 ).
@param angleInDegrees Flag for angles that must be evaluated in degrees.
@param stream Stream for the asynchronous version.
@ -404,10 +404,10 @@ CV_EXPORTS void phase(InputArray x, InputArray y, OutputArray angle, bool angleI
/** @brief Converts Cartesian coordinates into polar.
@param x Source matrix containing real components ( CV\_32FC1 ).
@param y Source matrix containing imaginary components ( CV\_32FC1 ).
@param magnitude Destination matrix of float magnitudes ( CV\_32FC1 ).
@param angle Destination matrix of angles ( CV\_32FC1 ).
@param x Source matrix containing real components ( CV_32FC1 ).
@param y Source matrix containing imaginary components ( CV_32FC1 ).
@param magnitude Destination matrix of float magnitudes ( CV_32FC1 ).
@param angle Destination matrix of angles ( CV_32FC1 ).
@param angleInDegrees Flag for angles that must be evaluated in degrees.
@param stream Stream for the asynchronous version.
@ -417,10 +417,10 @@ CV_EXPORTS void cartToPolar(InputArray x, InputArray y, OutputArray magnitude, O
/** @brief Converts polar coordinates into Cartesian.
@param magnitude Source matrix containing magnitudes ( CV\_32FC1 ).
@param angle Source matrix containing angles ( CV\_32FC1 ).
@param x Destination matrix of real components ( CV\_32FC1 ).
@param y Destination matrix of imaginary components ( CV\_32FC1 ).
@param magnitude Source matrix containing magnitudes ( CV_32FC1 ).
@param angle Source matrix containing angles ( CV_32FC1 ).
@param x Destination matrix of real components ( CV_32FC1 ).
@param y Destination matrix of imaginary components ( CV_32FC1 ).
@param angleInDegrees Flag that indicates angles in degrees.
@param stream Stream for the asynchronous version.
*/
@ -468,8 +468,8 @@ CV_EXPORTS void transpose(InputArray src1, OutputArray dst, Stream& stream = Str
/** @brief Flips a 2D matrix around vertical, horizontal, or both axes.
@param src Source matrix. Supports 1, 3 and 4 channels images with CV\_8U, CV\_16U, CV\_32S or
CV\_32F depth.
@param src Source matrix. Supports 1, 3 and 4 channels images with CV_8U, CV_16U, CV_32S or
CV_32F depth.
@param dst Destination matrix.
@param flipCode Flip mode for the source:
- 0 Flips around x-axis.
@ -489,7 +489,7 @@ public:
/** @brief Transforms the source matrix into the destination matrix using the given look-up table:
dst(I) = lut(src(I)) .
@param src Source matrix. CV\_8UC1 and CV\_8UC3 matrices are supported for now.
@param src Source matrix. CV_8UC1 and CV_8UC3 matrices are supported for now.
@param dst Destination matrix.
@param stream Stream for the asynchronous version.
*/
@ -498,13 +498,13 @@ public:
/** @brief Creates implementation for cuda::LookUpTable .
@param lut Look-up table of 256 elements. It is a continuous CV\_8U matrix.
@param lut Look-up table of 256 elements. It is a continuous CV_8U matrix.
*/
CV_EXPORTS Ptr<LookUpTable> createLookUpTable(InputArray lut);
/** @brief Forms a border around an image.
@param src Source image. CV\_8UC1 , CV\_8UC4 , CV\_32SC1 , and CV\_32FC1 types are supported.
@param src Source image. CV_8UC1 , CV_8UC4 , CV_32SC1 , and CV_32FC1 types are supported.
@param dst Destination image with the same type as src. The size is
Size(src.cols+left+right, src.rows+top+bottom) .
@param top
@ -512,8 +512,8 @@ Size(src.cols+left+right, src.rows+top+bottom) .
@param left
@param right Number of pixels in each direction from the source image rectangle to extrapolate.
For example: top=1, bottom=1, left=1, right=1 mean that 1 pixel-wide border needs to be built.
@param borderType Border type. See borderInterpolate for details. BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param borderType Border type. See borderInterpolate for details. BORDER_REFLECT101 ,
BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
@param value Border value.
@param stream Stream for the asynchronous version.
*/
@ -528,8 +528,8 @@ CV_EXPORTS void copyMakeBorder(InputArray src, OutputArray dst, int top, int bot
/** @brief Returns the norm of a matrix (or difference of two matrices).
@param src1 Source matrix. Any matrices except 64F are supported.
@param normType Norm type. NORM\_L1 , NORM\_L2 , and NORM\_INF are supported for now.
@param mask optional operation mask; it must have the same size as src1 and CV\_8UC1 type.
@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa norm
@ -555,7 +555,7 @@ static inline double norm(InputArray src, int normType, GpuMat& buf)
@param src1 Source matrix. Any matrices except 64F are supported.
@param src2 Second source matrix (if any) with the same size and type as src1.
@param normType Norm type. NORM\_L1 , NORM\_L2 , and NORM\_INF are supported for now.
@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa norm
@ -572,8 +572,8 @@ static inline double norm(InputArray src1, InputArray src2, int normType=NORM_L2
/** @brief Returns the sum of matrix elements.
@param src Source image of any depth except for CV\_64F .
@param mask optional operation mask; it must have the same size as src1 and CV\_8UC1 type.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa sum
@ -597,8 +597,8 @@ static inline Scalar sum(InputArray src, GpuMat& buf)
/** @brief Returns the sum of absolute values for matrix elements.
@param src Source image of any depth except for CV\_64F .
@param mask optional operation mask; it must have the same size as src1 and CV\_8UC1 type.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
*/
CV_EXPORTS Scalar absSum(InputArray src, InputArray mask, GpuMat& buf);
@ -620,8 +620,8 @@ static inline Scalar absSum(InputArray src, GpuMat& buf)
/** @brief Returns the squared sum of matrix elements.
@param src Source image of any depth except for CV\_64F .
@param mask optional operation mask; it must have the same size as src1 and CV\_8UC1 type.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
*/
CV_EXPORTS Scalar sqrSum(InputArray src, InputArray mask, GpuMat& buf);
@ -649,7 +649,7 @@ static inline Scalar sqrSum(InputArray src, GpuMat& buf)
@param mask Optional mask to select a sub-matrix.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
The function does not work with CV\_64F images on GPUs with the compute capability \< 1.3.
The function does not work with CV_64F images on GPUs with the compute capability \< 1.3.
@sa minMaxLoc
*/
@ -675,7 +675,7 @@ static inline void minMax(InputArray src, double* minVal, double* maxVal=0, Inpu
automatically.
@param locbuf Optional locations buffer to avoid extra memory allocations. It is resized
automatically.
The function does not work with CV\_64F images on GPU with the compute capability \< 1.3.
The function does not work with CV_64F images on GPU with the compute capability \< 1.3.
@sa minMaxLoc
*/
@ -696,7 +696,7 @@ static inline void minMaxLoc(InputArray src, double* minVal, double* maxVal=0, P
@param src Single-channel source image.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
The function does not work with CV\_64F images on GPUs with the compute capability \< 1.3.
The function does not work with CV_64F images on GPUs with the compute capability \< 1.3.
@sa countNonZero
*/
@ -717,20 +717,20 @@ static inline int countNonZero(const GpuMat& src)
@param dim Dimension index along which the matrix is reduced. 0 means that the matrix is reduced
to a single row. 1 means that the matrix is reduced to a single column.
@param reduceOp Reduction operation that could be one of the following:
- **CV\_REDUCE\_SUM** The output is the sum of all rows/columns of the matrix.
- **CV\_REDUCE\_AVG** The output is the mean vector of all rows/columns of the matrix.
- **CV\_REDUCE\_MAX** The output is the maximum (column/row-wise) of all rows/columns of the
- **CV_REDUCE_SUM** The output is the sum of all rows/columns of the matrix.
- **CV_REDUCE_AVG** The output is the mean vector of all rows/columns of the matrix.
- **CV_REDUCE_MAX** The output is the maximum (column/row-wise) of all rows/columns of the
matrix.
- **CV\_REDUCE\_MIN** The output is the minimum (column/row-wise) of all rows/columns of the
- **CV_REDUCE_MIN** The output is the minimum (column/row-wise) of all rows/columns of the
matrix.
@param dtype When it is negative, the destination vector will have the same type as the source
matrix. Otherwise, its type will be CV\_MAKE\_TYPE(CV\_MAT\_DEPTH(dtype), mtx.channels()) .
matrix. Otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels()) .
@param stream Stream for the asynchronous version.
The function reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of
1D vectors and performing the specified operation on the vectors until a single row/column is
obtained. For example, the function can be used to compute horizontal and vertical projections of a
raster image. In case of CV\_REDUCE\_SUM and CV\_REDUCE\_AVG , the output may have a larger element
raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG , the output may have a larger element
bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction
modes.
@ -740,7 +740,7 @@ CV_EXPORTS void reduce(InputArray mtx, OutputArray vec, int dim, int reduceOp, i
/** @brief Computes a mean value and a standard deviation of matrix elements.
@param mtx Source matrix. CV\_8UC1 matrices are supported for now.
@param mtx Source matrix. CV_8UC1 matrices are supported for now.
@param mean Mean value.
@param stddev Standard deviation value.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@ -759,8 +759,8 @@ static inline void meanStdDev(InputArray src, Scalar& mean, Scalar& stddev)
/** @brief Computes a standard deviation of integral images.
@param src Source image. Only the CV\_32SC1 type is supported.
@param sqr Squared source image. Only the CV\_32FC1 type is supported.
@param src Source image. Only the CV_32SC1 type is supported.
@param sqr Squared source image. Only the CV_32FC1 type is supported.
@param dst Destination image with the same type and size as src .
@param rect Rectangular window.
@param stream Stream for the asynchronous version.
@ -775,12 +775,12 @@ CV_EXPORTS void rectStdDev(InputArray src, InputArray sqr, OutputArray dst, Rect
normalization.
@param beta Upper range boundary in case of the range normalization; it is not used for the norm
normalization.
@param norm_type Normalization type ( NORM\_MINMAX , NORM\_L2 , NORM\_L1 or NORM\_INF ).
@param norm_type Normalization type ( NORM_MINMAX , NORM_L2 , NORM_L1 or NORM_INF ).
@param dtype When negative, the output array has the same type as src; otherwise, it has the same
number of channels as src and the depth =CV\_MAT\_DEPTH(dtype).
number of channels as src and the depth =CV_MAT_DEPTH(dtype).
@param mask Optional operation mask.
@param norm\_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@param cvt\_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@param norm_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@param cvt_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa normalize
*/
@ -799,8 +799,8 @@ static inline void normalize(InputArray src, OutputArray dst, double alpha = 1,
/** @brief Computes an integral image.
@param src Source image. Only CV\_8UC1 images are supported for now.
@param sum Integral image containing 32-bit unsigned integer values packed into CV\_32SC1 .
@param src Source image. Only CV_8UC1 images are supported for now.
@param sum Integral image containing 32-bit unsigned integer values packed into CV_32SC1 .
@param buffer Optional buffer to avoid extra memory allocations. It is resized automatically.
@param stream Stream for the asynchronous version.
@ -822,9 +822,9 @@ static inline void integral(InputArray src, OutputArray sum, Stream& stream = St
/** @brief Computes a squared integral image.
@param src Source image. Only CV\_8UC1 images are supported for now.
@param src Source image. Only CV_8UC1 images are supported for now.
@param sqsum Squared integral image containing 64-bit unsigned integer values packed into
CV\_64FC1 .
CV_64FC1 .
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@param stream Stream for the asynchronous version.
*/
@ -845,8 +845,8 @@ static inline void sqrIntegral(InputArray src, OutputArray sqsum, Stream& stream
/** @brief Performs generalized matrix multiplication.
@param src1 First multiplied input matrix that should have CV\_32FC1 , CV\_64FC1 , CV\_32FC2 , or
CV\_64FC2 type.
@param src1 First multiplied input matrix that should have CV_32FC1 , CV_64FC1 , CV_32FC2 , or
CV_64FC2 type.
@param src2 Second multiplied input matrix of the same type as src1 .
@param alpha Weight of the matrix product.
@param src3 Third optional delta matrix added to the matrix product. It should have the same type
@ -854,17 +854,17 @@ as src1 and src2 .
@param beta Weight of src3 .
@param dst Destination matrix. It has the proper size and the same type as input matrices.
@param flags Operation flags:
- **GEMM\_1\_T** transpose src1
- **GEMM\_2\_T** transpose src2
- **GEMM\_3\_T** transpose src3
- **GEMM_1_T** transpose src1
- **GEMM_2_T** transpose src2
- **GEMM_3_T** transpose src3
@param stream Stream for the asynchronous version.
The function performs generalized matrix multiplication similar to the gemm functions in BLAS level
3. For example, gemm(src1, src2, alpha, src3, beta, dst, GEMM\_1\_T + GEMM\_3\_T) corresponds to
3. For example, gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to
\f[\texttt{dst} = \texttt{alpha} \cdot \texttt{src1} ^T \cdot \texttt{src2} + \texttt{beta} \cdot \texttt{src3} ^T\f]
@note Transposition operation doesn't support CV\_64FC2 input type.
@note Transposition operation doesn't support CV_64FC2 input type.
@sa gemm
*/
@ -881,7 +881,7 @@ CV_EXPORTS void gemm(InputArray src1, InputArray src2, double alpha,
multiplication.
@param stream Stream for the asynchronous version.
Only full (not packed) CV\_32FC2 complex spectrums in the interleaved format are supported for now.
Only full (not packed) CV_32FC2 complex spectrums in the interleaved format are supported for now.
@sa mulSpectrums
*/
@ -898,7 +898,7 @@ CV_EXPORTS void mulSpectrums(InputArray src1, InputArray src2, OutputArray dst,
multiplication.
@param stream Stream for the asynchronous version.
Only full (not packed) CV\_32FC2 complex spectrums in the interleaved format are supported for now.
Only full (not packed) CV_32FC2 complex spectrums in the interleaved format are supported for now.
@sa mulSpectrums
*/
@ -908,14 +908,14 @@ CV_EXPORTS void mulAndScaleSpectrums(InputArray src1, InputArray src2, OutputArr
@param src Source matrix (real or complex).
@param dst Destination matrix (real or complex).
@param dft\_size Size of a discrete Fourier transform.
@param dft_size Size of a discrete Fourier transform.
@param flags Optional flags:
- **DFT\_ROWS** transforms each individual row of the source matrix.
- **DFT\_SCALE** scales the result: divide it by the number of elements in the transform
(obtained from dft\_size ).
- **DFT\_INVERSE** inverts DFT. Use for complex-complex cases (real-complex and complex-real
- **DFT_ROWS** transforms each individual row of the source matrix.
- **DFT_SCALE** scales the result: divide it by the number of elements in the transform
(obtained from dft_size ).
- **DFT_INVERSE** inverts DFT. Use for complex-complex cases (real-complex and complex-real
cases are always forward and inverse, respectively).
- **DFT\_REAL\_OUTPUT** specifies the output as real. The source matrix is the result of
- **DFT_REAL_OUTPUT** specifies the output as real. The source matrix is the result of
real-complex transform, so the destination matrix must be real.
@param stream Stream for the asynchronous version.
@ -926,14 +926,14 @@ function chooses an operation mode depending on the flags, size, and channel cou
matrix:
- If the source matrix is complex and the output is not specified as real, the destination
matrix is complex and has the dft\_size size and CV\_32FC2 type. The destination matrix
matrix is complex and has the dft_size size and CV_32FC2 type. The destination matrix
contains a full result of the DFT (forward or inverse).
- If the source matrix is complex and the output is specified as real, the function assumes that
its input is the result of the forward transform (see the next item). The destination matrix
has the dft\_size size and CV\_32FC1 type. It contains the result of the inverse DFT.
- If the source matrix is real (its type is CV\_32FC1 ), forward DFT is performed. The result of
the DFT is packed into complex ( CV\_32FC2 ) matrix. So, the width of the destination matrix
is dft\_size.width / 2 + 1 . But if the source is a single column, the height is reduced
has the dft_size size and CV_32FC1 type. It contains the result of the inverse DFT.
- If the source matrix is real (its type is CV_32FC1 ), forward DFT is performed. The result of
the DFT is packed into complex ( CV_32FC2 ) matrix. So, the width of the destination matrix
is dft_size.width / 2 + 1 . But if the source is a single column, the height is reduced
instead of the width.
@sa dft
@ -947,7 +947,7 @@ class CV_EXPORTS Convolution : public Algorithm
public:
/** @brief Computes a convolution (or cross-correlation) of two images.
@param image Source image. Only CV\_32FC1 images are supported for now.
@param image Source image. Only CV_32FC1 images are supported for now.
@param templ Template image. The size is not greater than the image size. The type is the same as
image .
@param result Result image. If image is *W x H* and templ is *w x h*, then result must be *W-w+1 x
@ -960,8 +960,8 @@ public:
/** @brief Creates implementation for cuda::Convolution .
@param user\_block\_size Block size. If you leave default value Size(0,0) then automatic
estimation of block size will be used (which is optimized for speed). By varying user\_block\_size
@param user_block_size Block size. If you leave default value Size(0,0) then automatic
estimation of block size will be used (which is optimized for speed). By varying user_block_size
you can reduce memory requirements at the cost of speed.
*/
CV_EXPORTS Ptr<Convolution> createConvolution(Size user_block_size = Size());

View File

@ -75,7 +75,7 @@ class implements algorithm described in @cite MOG2001.
@note
- An example on gaussian mixture based background/foreground segmantation can be found at
opencv\_source\_code/samples/gpu/bgfg\_segm.cpp
opencv_source_code/samples/gpu/bgfg_segm.cpp
*/
class CV_EXPORTS BackgroundSubtractorMOG : public cv::BackgroundSubtractor
{
@ -216,7 +216,7 @@ class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
public:
/** @brief Returns the output foreground regions calculated by findContours.
@param foreground\_regions Output array (CPU memory).
@param foreground_regions Output array (CPU memory).
*/
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};

View File

@ -170,7 +170,7 @@ The implementation uses H264 video codec.
@note
- An example on how to use the videoWriter class can be found at
opencv\_source\_code/samples/gpu/video\_writer.cpp
opencv_source_code/samples/gpu/video_writer.cpp
*/
class CV_EXPORTS VideoWriter
{
@ -195,8 +195,8 @@ public:
@param fileName Name of the output video file. Only AVI file format is supported.
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
The constructors initialize video writer. FFMPEG is used to write videos. User can implement own
@ -208,8 +208,8 @@ CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frame
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param params Encoder parameters. See cudacodec::EncoderParams .
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
@ -219,8 +219,8 @@ CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frame
want to work with raw video stream.
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
@ -230,8 +230,8 @@ want to work with raw video stream.
@param frameSize Size of the input video frames.
@param fps Framerate of the created video stream.
@param params Encoder parameters. See cudacodec::EncoderParams .
@param format Surface format of input frames ( SF\_UYVY , SF\_YUY2 , SF\_YV12 , SF\_NV12 ,
SF\_IYUV , SF\_BGR or SF\_GRAY). BGR or gray frames will be converted to YV12 format before
@param format Surface format of input frames ( SF_UYVY , SF_YUY2 , SF_YV12 , SF_NV12 ,
SF_IYUV , SF_BGR or SF_GRAY). BGR or gray frames will be converted to YV12 format before
encoding, frames with other formats will be used as is.
*/
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
@ -282,7 +282,7 @@ struct FormatInfo
@note
- An example on how to use the videoReader class can be found at
opencv\_source\_code/samples/gpu/video\_reader.cpp
opencv_source_code/samples/gpu/video_reader.cpp
*/
class CV_EXPORTS VideoReader
{

View File

@ -68,7 +68,7 @@ For each descriptor in the first set, this matcher finds the closest descriptor
by trying each one. This descriptor matcher supports masking permissible matches between descriptor
sets.
The class BFMatcher\_CUDA has an interface similar to the class DescriptorMatcher. It has two groups
The class BFMatcher_CUDA has an interface similar to the class DescriptorMatcher. It has two groups
of match methods: for matching descriptors of one image with another image or with an image set.
Also, all functions have an alternative to save results either to the GPU memory or to the CPU
memory.
@ -250,7 +250,7 @@ public:
@param nonmaxSuppression If it is true, non-maximum suppression is applied to detected corners
(keypoints).
@param keypointsRatio Inner buffer size for keypoints store is determined as (keypointsRatio \*
image\_width \* image\_height).
image_width \* image_height).
*/
explicit FAST_CUDA(int threshold, bool nonmaxSuppression = true, double keypointsRatio = 0.05);
@ -261,8 +261,8 @@ public:
@param mask Optional input mask that marks the regions where we should detect features.
@param keypoints The output vector of keypoints. Can be stored both in CPU and GPU memory. For GPU
memory:
- keypoints.ptr\<Vec2s\>(LOCATION\_ROW)[i] will contain location of i'th point
- keypoints.ptr\<float\>(RESPONSE\_ROW)[i] will contain response of i'th point (if non-maximum
- keypoints.ptr\<Vec2s\>(LOCATION_ROW)[i] will contain location of i'th point
- keypoints.ptr\<float\>(RESPONSE_ROW)[i] will contain response of i'th point (if non-maximum
suppression is applied)
*/
void operator ()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints);
@ -363,12 +363,12 @@ public:
@param mask Optional input mask that marks the regions where we should detect features.
@param keypoints The input/output vector of keypoints. Can be stored both in CPU and GPU memory.
For GPU memory:
- keypoints.ptr\<float\>(X\_ROW)[i] contains x coordinate of the i'th feature.
- keypoints.ptr\<float\>(Y\_ROW)[i] contains y coordinate of the i'th feature.
- keypoints.ptr\<float\>(RESPONSE\_ROW)[i] contains the response of the i'th feature.
- keypoints.ptr\<float\>(ANGLE\_ROW)[i] contains orientation of the i'th feature.
- keypoints.ptr\<float\>(OCTAVE\_ROW)[i] contains the octave of the i'th feature.
- keypoints.ptr\<float\>(SIZE\_ROW)[i] contains the size of the i'th feature.
- keypoints.ptr\<float\>(X_ROW)[i] contains x coordinate of the i'th feature.
- keypoints.ptr\<float\>(Y_ROW)[i] contains y coordinate of the i'th feature.
- keypoints.ptr\<float\>(RESPONSE_ROW)[i] contains the response of the i'th feature.
- keypoints.ptr\<float\>(ANGLE_ROW)[i] contains orientation of the i'th feature.
- keypoints.ptr\<float\>(OCTAVE_ROW)[i] contains the octave of the i'th feature.
- keypoints.ptr\<float\>(SIZE_ROW)[i] contains the size of the i'th feature.
@param descriptors Computed descriptors. if blurForDescriptor is true, image will be blurred
before descriptors calculation.
*/

View File

@ -60,7 +60,7 @@ filtering operations on 2D images.
@note
- An example containing all basic morphology operators like erode and dilate can be found at
opencv\_source\_code/samples/gpu/morphology.cpp
opencv_source_code/samples/gpu/morphology.cpp
@}
*/
@ -89,7 +89,7 @@ public:
/** @brief Creates a normalized 2D box filter.
@param srcType Input image type. Only CV\_8UC1 and CV\_8UC4 are supported for now.
@param srcType Input image type. Only CV_8UC1 and CV_8UC4 are supported for now.
@param dstType Output image type. Only the same type as src is supported for now.
@param ksize Kernel size.
@param anchor Anchor point. The default value Point(-1, -1) means that the anchor is at the kernel
@ -107,7 +107,7 @@ CV_EXPORTS Ptr<Filter> createBoxFilter(int srcType, int dstType, Size ksize, Poi
/** @brief Creates a non-separable linear 2D filter.
@param srcType Input image type. Supports CV\_8U , CV\_16U and CV\_32F one and four channel image.
@param srcType Input image type. Supports CV_8U , CV_16U and CV_32F one and four channel image.
@param dstType Output image type. Only the same type as src is supported for now.
@param kernel 2D array of filter coefficients.
@param anchor Anchor point. The default value Point(-1, -1) means that the anchor is at the kernel
@ -125,7 +125,7 @@ CV_EXPORTS Ptr<Filter> createLinearFilter(int srcType, int dstType, InputArray k
/** @brief Creates a Laplacian operator.
@param srcType Input image type. Supports CV\_8U , CV\_16U and CV\_32F one and four channel image.
@param srcType Input image type. Supports CV_8U , CV_16U and CV_32F one and four channel image.
@param dstType Output image type. Only the same type as src is supported for now.
@param ksize Aperture size used to compute the second-derivative filters (see getDerivKernels). It
must be positive and odd. Only ksize = 1 and ksize = 3 are supported.
@ -243,14 +243,14 @@ CV_EXPORTS Ptr<Filter> createGaussianFilter(int srcType, int dstType, Size ksize
/** @brief Creates a 2D morphological filter.
@param op Type of morphological operation. The following types are possible:
- **MORPH\_ERODE** erode
- **MORPH\_DILATE** dilate
- **MORPH\_OPEN** opening
- **MORPH\_CLOSE** closing
- **MORPH\_GRADIENT** morphological gradient
- **MORPH\_TOPHAT** "top hat"
- **MORPH\_BLACKHAT** "black hat"
@param srcType Input/output image type. Only CV\_8UC1 and CV\_8UC4 are supported.
- **MORPH_ERODE** erode
- **MORPH_DILATE** dilate
- **MORPH_OPEN** opening
- **MORPH_CLOSE** closing
- **MORPH_GRADIENT** morphological gradient
- **MORPH_TOPHAT** "top hat"
- **MORPH_BLACKHAT** "black hat"
@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
@param kernel 2D 8-bit structuring element for the morphological operation.
@param anchor Anchor position within the structuring element. Negative values mean that the anchor
is at the center.
@ -265,7 +265,7 @@ CV_EXPORTS Ptr<Filter> createMorphologyFilter(int op, int srcType, InputArray ke
/** @brief Creates the maximum filter.
@param srcType Input/output image type. Only CV\_8UC1 and CV\_8UC4 are supported.
@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
@ -277,7 +277,7 @@ CV_EXPORTS Ptr<Filter> createBoxMaxFilter(int srcType, Size ksize,
/** @brief Creates the minimum filter.
@param srcType Input/output image type. Only CV\_8UC1 and CV\_8UC4 are supported.
@param srcType Input/output image type. Only CV_8UC1 and CV_8UC4 are supported.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
@ -292,8 +292,8 @@ CV_EXPORTS Ptr<Filter> createBoxMinFilter(int srcType, Size ksize,
/** @brief Creates a horizontal 1D box filter.
@param srcType Input image type. Only CV\_8UC1 type is supported for now.
@param dstType Output image type. Only CV\_32FC1 type is supported for now.
@param srcType Input image type. Only CV_8UC1 type is supported for now.
@param dstType Output image type. Only CV_32FC1 type is supported for now.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .
@ -303,8 +303,8 @@ CV_EXPORTS Ptr<Filter> createRowSumFilter(int srcType, int dstType, int ksize, i
/** @brief Creates a vertical 1D box filter.
@param srcType Input image type. Only CV\_8UC1 type is supported for now.
@param dstType Output image type. Only CV\_32FC1 type is supported for now.
@param srcType Input image type. Only CV_8UC1 type is supported for now.
@param dstType Output image type. Only CV_32FC1 type is supported for now.
@param ksize Kernel size.
@param anchor Anchor point. The default value (-1) means that the anchor is at the kernel center.
@param borderMode Pixel extrapolation method. For details, see borderInterpolate .

View File

@ -75,7 +75,7 @@ namespace cv { namespace cuda {
/** @brief Converts an image from one color space to another.
@param src Source image with CV\_8U , CV\_16U , or CV\_32F depth and 1, 3, or 4 channels.
@param src Source image with CV_8U , CV_16U , or CV_32F depth and 1, 3, or 4 channels.
@param dst Destination image.
@param code Color space conversion code. For details, see cvtColor .
@param dcn Number of channels in the destination image. If the parameter is 0, the number of the
@ -121,15 +121,15 @@ The function can do the following transformations:
- Demosaicing using bilinear interpolation
> - COLOR\_BayerBG2GRAY , COLOR\_BayerGB2GRAY , COLOR\_BayerRG2GRAY , COLOR\_BayerGR2GRAY
> - COLOR\_BayerBG2BGR , COLOR\_BayerGB2BGR , COLOR\_BayerRG2BGR , COLOR\_BayerGR2BGR
> - COLOR_BayerBG2GRAY , COLOR_BayerGB2GRAY , COLOR_BayerRG2GRAY , COLOR_BayerGR2GRAY
> - COLOR_BayerBG2BGR , COLOR_BayerGB2BGR , COLOR_BayerRG2BGR , COLOR_BayerGR2BGR
- Demosaicing using Malvar-He-Cutler algorithm (@cite MHT2011)
> - COLOR\_BayerBG2GRAY\_MHT , COLOR\_BayerGB2GRAY\_MHT , COLOR\_BayerRG2GRAY\_MHT ,
> COLOR\_BayerGR2GRAY\_MHT
> - COLOR\_BayerBG2BGR\_MHT , COLOR\_BayerGB2BGR\_MHT , COLOR\_BayerRG2BGR\_MHT ,
> COLOR\_BayerGR2BGR\_MHT
> - COLOR_BayerBG2GRAY_MHT , COLOR_BayerGB2GRAY_MHT , COLOR_BayerRG2GRAY_MHT ,
> COLOR_BayerGR2GRAY_MHT
> - COLOR_BayerBG2BGR_MHT , COLOR_BayerGB2BGR_MHT , COLOR_BayerRG2BGR_MHT ,
> COLOR_BayerGR2BGR_MHT
@sa cvtColor
*/
@ -137,7 +137,7 @@ CV_EXPORTS void demosaicing(InputArray src, OutputArray dst, int code, int dcn =
/** @brief Exchanges the color channels of an image in-place.
@param image Source image. Supports only CV\_8UC4 type.
@param image Source image. Supports only CV_8UC4 type.
@param dstOrder Integer array describing how channel values are permutated. The n-th entry of the
array contains the number of the channel that is stored in the n-th channel of the output image.
E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR channel order.
@ -161,28 +161,28 @@ enum { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA
/** @brief Composites two images using alpha opacity values contained in each image.
@param img1 First image. Supports CV\_8UC4 , CV\_16UC4 , CV\_32SC4 and CV\_32FC4 types.
@param img1 First image. Supports CV_8UC4 , CV_16UC4 , CV_32SC4 and CV_32FC4 types.
@param img2 Second image. Must have the same size and the same type as img1 .
@param dst Destination image.
@param alpha\_op Flag specifying the alpha-blending operation:
- **ALPHA\_OVER**
- **ALPHA\_IN**
- **ALPHA\_OUT**
- **ALPHA\_ATOP**
- **ALPHA\_XOR**
- **ALPHA\_PLUS**
- **ALPHA\_OVER\_PREMUL**
- **ALPHA\_IN\_PREMUL**
- **ALPHA\_OUT\_PREMUL**
- **ALPHA\_ATOP\_PREMUL**
- **ALPHA\_XOR\_PREMUL**
- **ALPHA\_PLUS\_PREMUL**
- **ALPHA\_PREMUL**
@param alpha_op Flag specifying the alpha-blending operation:
- **ALPHA_OVER**
- **ALPHA_IN**
- **ALPHA_OUT**
- **ALPHA_ATOP**
- **ALPHA_XOR**
- **ALPHA_PLUS**
- **ALPHA_OVER_PREMUL**
- **ALPHA_IN_PREMUL**
- **ALPHA_OUT_PREMUL**
- **ALPHA_ATOP_PREMUL**
- **ALPHA_XOR_PREMUL**
- **ALPHA_PLUS_PREMUL**
- **ALPHA_PREMUL**
@param stream Stream for the asynchronous version.
@note
- An example demonstrating the use of alphaComp can be found at
opencv\_source\_code/samples/gpu/alpha\_comp.cpp
opencv_source_code/samples/gpu/alpha_comp.cpp
*/
CV_EXPORTS void alphaComp(InputArray img1, InputArray img2, OutputArray dst, int alpha_op, Stream& stream = Stream::Null());
@ -195,15 +195,15 @@ CV_EXPORTS void alphaComp(InputArray img1, InputArray img2, OutputArray dst, int
/** @brief Calculates histogram for one channel 8-bit image.
@param src Source image with CV\_8UC1 type.
@param hist Destination histogram with one row, 256 columns, and the CV\_32SC1 type.
@param src Source image with CV_8UC1 type.
@param hist Destination histogram with one row, 256 columns, and the CV_32SC1 type.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void calcHist(InputArray src, OutputArray hist, Stream& stream = Stream::Null());
/** @brief Equalizes the histogram of a grayscale image.
@param src Source image with CV\_8UC1 type.
@param src Source image with CV_8UC1 type.
@param dst Destination image.
@param buf Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
@param stream Stream for the asynchronous version.
@ -227,7 +227,7 @@ public:
using cv::CLAHE::apply;
/** @brief Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
@param src Source image with CV\_8UC1 type.
@param src Source image with CV_8UC1 type.
@param dst Destination image.
@param stream Stream for the asynchronous version.
*/
@ -244,7 +244,7 @@ CV_EXPORTS Ptr<cuda::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSi
/** @brief Computes levels with even distribution.
@param levels Destination array. levels has 1 row, nLevels columns, and the CV\_32SC1 type.
@param levels Destination array. levels has 1 row, nLevels columns, and the CV_32SC1 type.
@param nLevels Number of computed levels. nLevels must be at least 2.
@param lowerLevel Lower boundary value of the lowest level.
@param upperLevel Upper boundary value of the greatest level.
@ -253,9 +253,9 @@ CV_EXPORTS void evenLevels(OutputArray levels, int nLevels, int lowerLevel, int
/** @brief Calculates a histogram with evenly distributed bins.
@param src Source image. CV\_8U, CV\_16U, or CV\_16S depth and 1 or 4 channels are supported. For
@param src Source image. CV_8U, CV_16U, or CV_16S depth and 1 or 4 channels are supported. For
a four-channel image, all channels are processed separately.
@param hist Destination histogram with one row, histSize columns, and the CV\_32S type.
@param hist Destination histogram with one row, histSize columns, and the CV_32S type.
@param histSize Size of the histogram.
@param lowerLevel Lower boundary of lowest-level bin.
@param upperLevel Upper boundary of highest-level bin.
@ -283,9 +283,9 @@ static inline void histEven(InputArray src, GpuMat hist[4], int histSize[4], int
/** @brief Calculates a histogram with bins determined by the levels array.
@param src Source image. CV\_8U , CV\_16U , or CV\_16S depth and 1 or 4 channels are supported.
@param src Source image. CV_8U , CV_16U , or CV_16S depth and 1 or 4 channels are supported.
For a four-channel image, all channels are processed separately.
@param hist Destination histogram with one row, (levels.cols-1) columns, and the CV\_32SC1 type.
@param hist Destination histogram with one row, (levels.cols-1) columns, and the CV_32SC1 type.
@param levels Number of levels in the histogram.
@param buf Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
@param stream Stream for the asynchronous version.
@ -325,8 +325,8 @@ public:
*/
virtual void detect(InputArray image, OutputArray edges) = 0;
/** @overload
@param dx First derivative of image in the vertical direction. Support only CV\_32S type.
@param dy First derivative of image in the horizontal direction. Support only CV\_32S type.
@param dx First derivative of image in the vertical direction. Support only CV_32S type.
@param dy First derivative of image in the horizontal direction. Support only CV_32S type.
@param edges Output edge map. It has the same size and type as image .
*/
virtual void detect(InputArray dx, InputArray dy, OutputArray edges) = 0;
@ -346,9 +346,9 @@ public:
/** @brief Creates implementation for cuda::CannyEdgeDetector .
@param low\_thresh First threshold for the hysteresis procedure.
@param high\_thresh Second threshold for the hysteresis procedure.
@param apperture\_size Aperture size for the Sobel operator.
@param low_thresh First threshold for the hysteresis procedure.
@param high_thresh Second threshold for the hysteresis procedure.
@param apperture_size Aperture size for the Sobel operator.
@param L2gradient Flag indicating whether a more accurate \f$L_2\f$ norm
\f$=\sqrt{(dI/dx)^2 + (dI/dy)^2}\f$ should be used to compute the image gradient magnitude (
L2gradient=true ), or a faster default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough ( L2gradient=false
@ -383,9 +383,9 @@ public:
/** @brief Downloads results from cuda::HoughLinesDetector::detect to host memory.
@param d\_lines Result of cuda::HoughLinesDetector::detect .
@param h\_lines Output host array.
@param h\_votes Optional output array for line's votes.
@param d_lines Result of cuda::HoughLinesDetector::detect .
@param h_lines Output host array.
@param h_votes Optional output array for line's votes.
*/
virtual void downloadResults(InputArray d_lines, OutputArray h_lines, OutputArray h_votes = noArray()) = 0;
@ -547,7 +547,7 @@ public:
@param src Source image.
@param dst Destination image containing cornerness values. It will have the same size as src and
CV\_32FC1 type.
CV_32FC1 type.
@param stream Stream for the asynchronous version.
*/
virtual void compute(InputArray src, OutputArray dst, Stream& stream = Stream::Null()) = 0;
@ -555,11 +555,11 @@ public:
/** @brief Creates implementation for Harris cornerness criteria.
@param srcType Input source type. Only CV\_8UC1 and CV\_32FC1 are supported for now.
@param srcType Input source type. Only CV_8UC1 and CV_32FC1 are supported for now.
@param blockSize Neighborhood size.
@param ksize Aperture parameter for the Sobel operator.
@param k Harris detector free parameter.
@param borderType Pixel extrapolation method. Only BORDER\_REFLECT101 and BORDER\_REPLICATE are
@param borderType Pixel extrapolation method. Only BORDER_REFLECT101 and BORDER_REPLICATE are
supported for now.
@sa cornerHarris
@ -569,10 +569,10 @@ CV_EXPORTS Ptr<CornernessCriteria> createHarrisCorner(int srcType, int blockSize
/** @brief Creates implementation for the minimum eigen value of a 2x2 derivative covariation matrix (the
cornerness criteria).
@param srcType Input source type. Only CV\_8UC1 and CV\_32FC1 are supported for now.
@param srcType Input source type. Only CV_8UC1 and CV_32FC1 are supported for now.
@param blockSize Neighborhood size.
@param ksize Aperture parameter for the Sobel operator.
@param borderType Pixel extrapolation method. Only BORDER\_REFLECT101 and BORDER\_REPLICATE are
@param borderType Pixel extrapolation method. Only BORDER_REFLECT101 and BORDER_REPLICATE are
supported for now.
@sa cornerMinEigenVal
@ -589,17 +589,17 @@ public:
/** @brief Determines strong corners on an image.
@param image Input 8-bit or floating-point 32-bit, single-channel image.
@param corners Output vector of detected corners (1-row matrix with CV\_32FC2 type with corners
@param corners Output vector of detected corners (1-row matrix with CV_32FC2 type with corners
positions).
@param mask Optional region of interest. If the image is not empty (it needs to have the type
CV\_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
*/
virtual void detect(InputArray image, OutputArray corners, InputArray mask = noArray()) = 0;
};
/** @brief Creates implementation for cuda::CornersDetector .
@param srcType Input source type. Only CV\_8UC1 and CV\_32FC1 are supported for now.
@param srcType Input source type. Only CV_8UC1 and CV_32FC1 are supported for now.
@param maxCorners Maximum number of corners to return. If there are more corners than are found,
the strongest of them is returned.
@param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
@ -624,7 +624,7 @@ CV_EXPORTS Ptr<CornersDetector> createGoodFeaturesToTrackDetector(int srcType, i
/** @brief Performs mean-shift filtering for each point of the source image.
@param src Source image. Only CV\_8UC4 images are supported for now.
@param src Source image. Only CV_8UC4 images are supported for now.
@param dst Destination image containing the color of mapped points. It has the same size and type
as src .
@param sp Spatial window radius.
@ -642,11 +642,11 @@ CV_EXPORTS void meanShiftFiltering(InputArray src, OutputArray dst, int sp, int
/** @brief Performs a mean-shift procedure and stores information about processed points (their colors and
positions) in two images.
@param src Source image. Only CV\_8UC4 images are supported for now.
@param src Source image. Only CV_8UC4 images are supported for now.
@param dstr Destination image containing the color of mapped points. The size and type is the same
as src .
@param dstsp Destination image containing the position of mapped points. The size is the same as
src size. The type is CV\_16SC2 .
src size. The type is CV_16SC2 .
@param sp Spatial window radius.
@param sr Color window radius.
@param criteria Termination criteria. See TermCriteria.
@ -660,7 +660,7 @@ CV_EXPORTS void meanShiftProc(InputArray src, OutputArray dstr, OutputArray dsts
/** @brief Performs a mean-shift segmentation of the source image and eliminates small segments.
@param src Source image. Only CV\_8UC4 images are supported for now.
@param src Source image. Only CV_8UC4 images are supported for now.
@param dst Segmented image with the same size and type as src (host memory).
@param sp Spatial window radius.
@param sr Color window radius.
@ -681,7 +681,7 @@ public:
@param image Source image.
@param templ Template image with the size and type the same as image .
@param result Map containing comparison results ( CV\_32FC1 ). If image is *W x H* and templ is *w
@param result Map containing comparison results ( CV_32FC1 ). If image is *W x H* and templ is *w
x h*, then result must be *W-w+1 x H-h+1*.
@param stream Stream for the asynchronous version.
*/
@ -690,27 +690,27 @@ public:
/** @brief Creates implementation for cuda::TemplateMatching .
@param srcType Input source type. CV\_32F and CV\_8U depth images (1..4 channels) are supported
@param srcType Input source type. CV_32F and CV_8U depth images (1..4 channels) are supported
for now.
@param method Specifies the way to compare the template with the image.
@param user\_block\_size You can use field user\_block\_size to set specific block size. If you
@param user_block_size You can use field user_block_size to set specific block size. If you
leave its default value Size(0,0) then automatic estimation of block size will be used (which is
optimized for speed). By varying user\_block\_size you can reduce memory requirements at the cost
optimized for speed). By varying user_block_size you can reduce memory requirements at the cost
of speed.
The following methods are supported for the CV\_8U depth images for now:
The following methods are supported for the CV_8U depth images for now:
- CV\_TM\_SQDIFF
- CV\_TM\_SQDIFF\_NORMED
- CV\_TM\_CCORR
- CV\_TM\_CCORR\_NORMED
- CV\_TM\_CCOEFF
- CV\_TM\_CCOEFF\_NORMED
- CV_TM_SQDIFF
- CV_TM_SQDIFF_NORMED
- CV_TM_CCORR
- CV_TM_CCORR_NORMED
- CV_TM_CCOEFF
- CV_TM_CCOEFF_NORMED
The following methods are supported for the CV\_32F images for now:
The following methods are supported for the CV_32F images for now:
- CV\_TM\_SQDIFF
- CV\_TM\_CCORR
- CV_TM_SQDIFF
- CV_TM_CCORR
@sa matchTemplate
*/
@ -720,14 +720,14 @@ CV_EXPORTS Ptr<TemplateMatching> createTemplateMatching(int srcType, int method,
/** @brief Performs bilateral filtering of passed image
@param src Source image. Supports only (channles != 2 && depth() != CV\_8S && depth() != CV\_32S
&& depth() != CV\_64F).
@param src Source image. Supports only (channles != 2 && depth() != CV_8S && depth() != CV_32S
&& depth() != CV_64F).
@param dst Destination imagwe.
@param kernel\_size Kernel window size.
@param sigma\_color Filter sigma in the color space.
@param sigma\_spatial Filter sigma in the coordinate space.
@param borderMode Border type. See borderInterpolate for details. BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param kernel_size Kernel window size.
@param sigma_color Filter sigma in the color space.
@param sigma_spatial Filter sigma in the coordinate space.
@param borderMode Border type. See borderInterpolate for details. BORDER_REFLECT101 ,
BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
@param stream Stream for the asynchronous version.
@sa bilateralFilter
@ -739,11 +739,11 @@ CV_EXPORTS void bilateralFilter(InputArray src, OutputArray dst, int kernel_size
/** @brief Performs linear blending of two images.
@param img1 First image. Supports only CV\_8U and CV\_32F depth.
@param img1 First image. Supports only CV_8U and CV_32F depth.
@param img2 Second image. Must have the same size and the same type as img1 .
@param weights1 Weights for first image. Must have tha same size as img1 . Supports only CV\_32F
@param weights1 Weights for first image. Must have tha same size as img1 . Supports only CV_32F
type.
@param weights2 Weights for second image. Must have tha same size as img2 . Supports only CV\_32F
@param weights2 Weights for second image. Must have tha same size as img2 . Supports only CV_32F
type.
@param result Destination image.
@param stream Stream for the asynchronous version.

View File

@ -110,7 +110,7 @@ iterative Lucas-Kanade method with pyramids.
@note
- An example of the Lucas Kanade optical flow algorithm can be found at
opencv\_source\_code/samples/gpu/pyrlk\_optical\_flow.cpp
opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
*/
class CV_EXPORTS PyrLKOpticalFlow
{
@ -122,13 +122,13 @@ public:
@param prevImg First 8-bit input image (supports both grayscale and color images).
@param nextImg Second input image of the same size and the same type as prevImg .
@param prevPts Vector of 2D points for which the flow needs to be found. It must be one row matrix
with CV\_32FC2 type.
with CV_32FC2 type.
@param nextPts Output vector of 2D points (with single-precision floating-point coordinates)
containing the calculated new positions of input features in the second image. When useInitialFlow
is true, the vector must have the same size as in the input.
@param status Output status vector (CV\_8UC1 type). Each element of the vector is set to 1 if the
@param status Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the
flow for the corresponding features has been found. Otherwise, it is set to 0.
@param err Output vector (CV\_32FC1 type) that contains the difference between patches around the
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
@ -145,7 +145,7 @@ public:
floating-point, single-channel
@param v Vertical component of the optical flow of the same size as input images, 32-bit
floating-point, single-channel
@param err Output vector (CV\_32FC1 type) that contains the difference between patches around the
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
*/
@ -374,7 +374,7 @@ private:
@param bv Backward vertical displacement.
@param pos New frame position.
@param newFrame Output image.
@param buf Temporary buffer, will have width x 6\*height size, CV\_32FC1 type and contain 6
@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
interpolated backward vertical flow.

View File

@ -106,7 +106,7 @@ The class implements algorithm described in @cite Felzenszwalb2006 . It can comp
\f[width\_step \cdot height \cdot ndisp \cdot (1 + 0.25 + 0.0625 + \dotsm + \frac{1}{4^{levels}})\f]
width\_step is the number of bytes in a line including padding.
width_step is the number of bytes in a line including padding.
StereoBeliefPropagation uses a truncated linear model for the data cost and discontinuity terms:
@ -116,8 +116,8 @@ StereoBeliefPropagation uses a truncated linear model for the data cost and disc
For more details, see @cite Felzenszwalb2006.
By default, StereoBeliefPropagation uses floating-point arithmetics and the CV\_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV\_16SC1 message type for better
By default, StereoBeliefPropagation uses floating-point arithmetics and the CV_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
performance. To avoid an overflow in this case, the parameters must satisfy the following
requirement:
@ -135,9 +135,9 @@ public:
/** @brief Enables the stereo correspondence operator that finds the disparity for the specified data cost.
@param data User-specified data cost, a matrix of msg\_type type and
@param data User-specified data cost, a matrix of msg_type type and
Size(\<image columns\>\*ndisp, \<image rows\>) size.
@param disparity Output disparity map. If disparity is empty, the output type is CV\_16SC1 .
@param disparity Output disparity map. If disparity is empty, the output type is CV_16SC1 .
Otherwise, the type is retained.
@param stream Stream for the asynchronous version.
*/
@ -182,7 +182,7 @@ public:
@param ndisp Number of disparities.
@param iters Number of BP iterations on each level.
@param levels Number of levels.
@param msg\_type Type for messages. CV\_16SC1 and CV\_32FC1 types are supported.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoBeliefPropagation>
createStereoBeliefPropagation(int ndisp = 64, int iters = 5, int levels = 5, int msg_type = CV_32F);
@ -195,7 +195,7 @@ CV_EXPORTS Ptr<cuda::StereoBeliefPropagation>
The class implements algorithm described in @cite Yang2010. StereoConstantSpaceBP supports both local
minimum and global minimum data cost initialization algorithms. For more details, see the paper
mentioned above. By default, a local algorithm is used. To enable a global algorithm, set
use\_local\_init\_data\_cost to false .
use_local_init_data_cost to false .
StereoConstantSpaceBP uses a truncated linear model for the data cost and discontinuity terms:
@ -205,8 +205,8 @@ StereoConstantSpaceBP uses a truncated linear model for the data cost and discon
For more details, see @cite Yang2010.
By default, StereoConstantSpaceBP uses floating-point arithmetics and the CV\_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV\_16SC1 message type for better
By default, StereoConstantSpaceBP uses floating-point arithmetics and the CV_32FC1 type for
messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
performance. To avoid an overflow in this case, the parameters must satisfy the following
requirement:
@ -234,8 +234,8 @@ public:
@param ndisp Number of disparities.
@param iters Number of BP iterations on each level.
@param levels Number of levels.
@param nr\_plane Number of disparity levels on the first level.
@param msg\_type Type for messages. CV\_16SC1 and CV\_32FC1 types are supported.
@param nr_plane Number of disparity levels on the first level.
@param msg_type Type for messages. CV_16SC1 and CV_32FC1 types are supported.
*/
CV_EXPORTS Ptr<cuda::StereoConstantSpaceBP>
createStereoConstantSpaceBP(int ndisp = 128, int iters = 8, int levels = 4, int nr_plane = 4, int msg_type = CV_32F);
@ -252,8 +252,8 @@ class CV_EXPORTS DisparityBilateralFilter : public cv::Algorithm
public:
/** @brief Refines a disparity map using joint bilateral filtering.
@param disparity Input disparity map. CV\_8UC1 and CV\_16SC1 types are supported.
@param image Input image. CV\_8UC1 and CV\_8UC3 types are supported.
@param disparity Input disparity map. CV_8UC1 and CV_16SC1 types are supported.
@param image Input image. CV_8UC1 and CV_8UC3 types are supported.
@param dst Destination disparity map. It has the same size and type as disparity .
@param stream Stream for the asynchronous version.
*/
@ -295,12 +295,12 @@ CV_EXPORTS Ptr<cuda::DisparityBilateralFilter>
/** @brief Reprojects a disparity image to 3D space.
@param disp Input disparity image. CV\_8U and CV\_16S types are supported.
@param disp Input disparity image. CV_8U and CV_16S types are supported.
@param xyzw Output 3- or 4-channel floating-point image of the same size as disp . Each element of
xyzw(x,y) contains 3D coordinates (x,y,z) or (x,y,z,1) of the point (x,y) , computed from the
disparity map.
@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained via stereoRectify .
@param dst\_cn The number of channels for output image. Can be 3 or 4.
@param dst_cn The number of channels for output image. Can be 3 or 4.
@param stream Stream for the asynchronous version.
@sa reprojectImageTo3D
@ -309,8 +309,8 @@ CV_EXPORTS void reprojectImageTo3D(InputArray disp, OutputArray xyzw, InputArray
/** @brief Colors a disparity image.
@param src\_disp Source disparity image. CV\_8UC1 and CV\_16SC1 types are supported.
@param dst\_disp Output disparity image. It has the same size as src\_disp . The type is CV\_8UC4
@param src_disp Source disparity image. CV_8UC1 and CV_16SC1 types are supported.
@param dst_disp Output disparity image. It has the same size as src_disp . The type is CV_8UC4
in BGRA format (alpha = 255).
@param ndisp Number of disparities.
@param stream Stream for the asynchronous version.

View File

@ -66,12 +66,12 @@ namespace cv { namespace cuda {
@param src Source image.
@param dst Destination image with the size the same as xmap and the type the same as src .
@param xmap X values. Only CV\_32FC1 type is supported.
@param ymap Y values. Only CV\_32FC1 type is supported.
@param interpolation Interpolation method (see resize ). INTER\_NEAREST , INTER\_LINEAR and
INTER\_CUBIC are supported for now.
@param borderMode Pixel extrapolation method (see borderInterpolate ). BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param xmap X values. Only CV_32FC1 type is supported.
@param ymap Y values. Only CV_32FC1 type is supported.
@param interpolation Interpolation method (see resize ). INTER_NEAREST , INTER_LINEAR and
INTER_CUBIC are supported for now.
@param borderMode Pixel extrapolation method (see borderInterpolate ). BORDER_REFLECT101 ,
BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
@param borderValue Value used in case of a constant border. By default, it is 0.
@param stream Stream for the asynchronous version.
@ -99,7 +99,7 @@ Either dsize or both fx and fy must be non-zero.
\f[\texttt{(double)dsize.width/src.cols}\f]
@param fy Scale factor along the vertical axis. If it is zero, it is computed as:
\f[\texttt{(double)dsize.height/src.rows}\f]
@param interpolation Interpolation method. INTER\_NEAREST , INTER\_LINEAR and INTER\_CUBIC are
@param interpolation Interpolation method. INTER_NEAREST , INTER_LINEAR and INTER_CUBIC are
supported for now.
@param stream Stream for the asynchronous version.
@ -109,14 +109,14 @@ CV_EXPORTS void resize(InputArray src, OutputArray dst, Size dsize, double fx=0,
/** @brief Applies an affine transformation to an image.
@param src Source image. CV\_8U , CV\_16U , CV\_32S , or CV\_32F depth and 1, 3, or 4 channels are
@param src Source image. CV_8U , CV_16U , CV_32S , or CV_32F depth and 1, 3, or 4 channels are
supported.
@param dst Destination image with the same type as src . The size is dsize .
@param M *2x3* transformation matrix.
@param dsize Size of the destination image.
@param flags Combination of interpolation methods (see resize) and the optional flag
WARP\_INVERSE\_MAP specifying that M is an inverse transformation ( dst=\>src ). Only
INTER\_NEAREST , INTER\_LINEAR , and INTER\_CUBIC interpolation methods are supported.
WARP_INVERSE_MAP specifying that M is an inverse transformation ( dst=\>src ). Only
INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are supported.
@param borderMode
@param borderValue
@param stream Stream for the asynchronous version.
@ -131,8 +131,8 @@ CV_EXPORTS void warpAffine(InputArray src, OutputArray dst, InputArray M, Size d
@param M *2x3* transformation matrix.
@param inverse Flag specifying that M is an inverse transformation ( dst=\>src ).
@param dsize Size of the destination image.
@param xmap X values with CV\_32FC1 type.
@param ymap Y values with CV\_32FC1 type.
@param xmap X values with CV_32FC1 type.
@param ymap Y values with CV_32FC1 type.
@param stream Stream for the asynchronous version.
@sa cuda::warpAffine , cuda::remap
@ -141,14 +141,14 @@ CV_EXPORTS void buildWarpAffineMaps(InputArray M, bool inverse, Size dsize, Outp
/** @brief Applies a perspective transformation to an image.
@param src Source image. CV\_8U , CV\_16U , CV\_32S , or CV\_32F depth and 1, 3, or 4 channels are
@param src Source image. CV_8U , CV_16U , CV_32S , or CV_32F depth and 1, 3, or 4 channels are
supported.
@param dst Destination image with the same type as src . The size is dsize .
@param M *3x3* transformation matrix.
@param dsize Size of the destination image.
@param flags Combination of interpolation methods (see resize ) and the optional flag
WARP\_INVERSE\_MAP specifying that M is the inverse transformation ( dst =\> src ). Only
INTER\_NEAREST , INTER\_LINEAR , and INTER\_CUBIC interpolation methods are supported.
WARP_INVERSE_MAP specifying that M is the inverse transformation ( dst =\> src ). Only
INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC interpolation methods are supported.
@param borderMode
@param borderValue
@param stream Stream for the asynchronous version.
@ -163,8 +163,8 @@ CV_EXPORTS void warpPerspective(InputArray src, OutputArray dst, InputArray M, S
@param M *3x3* transformation matrix.
@param inverse Flag specifying that M is an inverse transformation ( dst=\>src ).
@param dsize Size of the destination image.
@param xmap X values with CV\_32FC1 type.
@param ymap Y values with CV\_32FC1 type.
@param xmap X values with CV_32FC1 type.
@param ymap Y values with CV_32FC1 type.
@param stream Stream for the asynchronous version.
@sa cuda::warpPerspective , cuda::remap
@ -188,14 +188,14 @@ CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, InputArray K
/** @brief Rotates an image around the origin (0,0) and then shifts it.
@param src Source image. Supports 1, 3 or 4 channels images with CV\_8U , CV\_16U or CV\_32F
@param src Source image. Supports 1, 3 or 4 channels images with CV_8U , CV_16U or CV_32F
depth.
@param dst Destination image with the same type as src . The size is dsize .
@param dsize Size of the destination image.
@param angle Angle of rotation in degrees.
@param xShift Shift along the horizontal axis.
@param yShift Shift along the vertical axis.
@param interpolation Interpolation method. Only INTER\_NEAREST , INTER\_LINEAR , and INTER\_CUBIC
@param interpolation Interpolation method. Only INTER_NEAREST , INTER_LINEAR , and INTER_CUBIC
are supported.
@param stream Stream for the asynchronous version.

View File

@ -59,11 +59,11 @@ implement vector descriptor matchers inherit the DescriptorMatcher interface.
@note
- An example explaining keypoint matching can be found at
opencv\_source\_code/samples/cpp/descriptor\_extractor\_matcher.cpp
opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp
- An example on descriptor matching evaluation can be found at
opencv\_source\_code/samples/cpp/detector\_descriptor\_matcher\_evaluation.cpp
opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp
- An example on one to many image matching can be found at
opencv\_source\_code/samples/cpp/matching\_to\_many\_images.cpp
opencv_source_code/samples/cpp/matching_to_many_images.cpp
@defgroup features2d_draw Drawing Function of Keypoints and Matches
@defgroup features2d_category Object Categorization
@ -72,9 +72,9 @@ This section describes approaches based on local 2D features and used to categor
@note
- A complete Bag-Of-Words sample can be found at
opencv\_source\_code/samples/cpp/bagofwords\_classification.cpp
opencv_source_code/samples/cpp/bagofwords_classification.cpp
- (Python) An example using the features2D framework to perform object categorization can be
found at opencv\_source\_code/samples/python2/find\_obj.py
found at opencv_source_code/samples/python2/find_obj.py
@}
*/
@ -265,22 +265,22 @@ public:
will mean that to cover certain scale range you will need more pyramid levels and so the speed
will suffer.
@param nlevels The number of pyramid levels. The smallest level will have linear size equal to
input\_image\_linear\_size/pow(scaleFactor, nlevels).
input_image_linear_size/pow(scaleFactor, nlevels).
@param edgeThreshold This is size of the border where the features are not detected. It should
roughly match the patchSize parameter.
@param firstLevel It should be 0 in the current implementation.
@param WTA\_K The number of points that produce each element of the oriented BRIEF descriptor. The
@param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The
default value 2 means the BRIEF where we take a random point pair and compare their brightnesses,
so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3
random points (of course, those point coordinates are random, but they are generated from the
pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel
rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such
output will occupy 2 bits, and therefore it will need a special variant of Hamming distance,
denoted as NORM\_HAMMING2 (2 bits per bin). When WTA\_K=4, we take 4 random points to compute each
denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each
bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
@param scoreType The default HARRIS\_SCORE means that Harris algorithm is used to rank features
@param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features
(the score is written to KeyPoint::score and is used to retain best nfeatures features);
FAST\_SCORE is alternative value of the parameter that produces slightly less stable keypoints,
FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints,
but it is a little faster to compute.
@param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller
pyramid layers the perceived image area covered by a feature will be larger.
@ -325,7 +325,7 @@ The class encapsulates all the parameters of the MSER extraction algorithm (see
@note
- (Python) A complete example showing the use of the MSER detector can be found at
opencv\_source\_code/samples/python2/mser.py
opencv_source_code/samples/python2/mser.py
*/
class CV_EXPORTS_W MSER : public Feature2D
{
@ -366,13 +366,13 @@ circle around this pixel.
@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners
(keypoints).
@param type one of the three neighborhoods as defined in the paper:
FastFeatureDetector::TYPE\_9\_16, FastFeatureDetector::TYPE\_7\_12,
FastFeatureDetector::TYPE\_5\_8
FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12,
FastFeatureDetector::TYPE_5_8
Detects corners using the FAST algorithm by @cite Rosten06.
@note In Python API, types are given as cv2.FAST\_FEATURE\_DETECTOR\_TYPE\_5\_8,
cv2.FAST\_FEATURE\_DETECTOR\_TYPE\_7\_12 and cv2.FAST\_FEATURE\_DETECTOR\_TYPE\_9\_16. For corner
@note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8,
cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner
detection, use cv2.FAST.detect() method.
*/
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
@ -529,8 +529,8 @@ public:
@param threshold Detector response threshold to accept point
@param nOctaves Maximum octave evolution of the image
@param nOctaveLayers Default number of sublevels per scale level
@param diffusivity Diffusivity type. DIFF\_PM\_G1, DIFF\_PM\_G2, DIFF\_WEICKERT or
DIFF\_CHARBONNIER
@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
DIFF_CHARBONNIER
*/
CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,
float threshold = 0.001f,
@ -577,15 +577,15 @@ public:
/** @brief The AKAZE constructor
@param descriptor\_type Type of the extracted descriptor: DESCRIPTOR\_KAZE,
DESCRIPTOR\_KAZE\_UPRIGHT, DESCRIPTOR\_MLDB or DESCRIPTOR\_MLDB\_UPRIGHT.
@param descriptor\_size Size of the descriptor in bits. 0 -\> Full size
@param descriptor\_channels Number of channels in the descriptor (1, 2, 3)
@param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
@param descriptor_size Size of the descriptor in bits. 0 -\> Full size
@param descriptor_channels Number of channels in the descriptor (1, 2, 3)
@param threshold Detector response threshold to accept point
@param nOctaves Maximum octave evolution of the image
@param nOctaveLayers Default number of sublevels per scale level
@param diffusivity Diffusivity type. DIFF\_PM\_G1, DIFF\_PM\_G2, DIFF\_WEICKERT or
DIFF\_CHARBONNIER
@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
DIFF_CHARBONNIER
*/
CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB,
int descriptor_size = 0, int descriptor_channels = 3,
@ -947,9 +947,9 @@ class CV_EXPORTS_W BFMatcher : public DescriptorMatcher
public:
/** @brief Brute-force matcher constructor.
@param normType One of NORM\_L1, NORM\_L2, NORM\_HAMMING, NORM\_HAMMING2. L1 and L2 norms are
preferable choices for SIFT and SURF descriptors, NORM\_HAMMING should be used with ORB, BRISK and
BRIEF, NORM\_HAMMING2 should be used with ORB when WTA\_K==3 or 4 (see ORB::ORB constructor
@param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are
preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor
description).
@param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k
nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with
@ -977,7 +977,7 @@ protected:
/** @brief Flann-based descriptor matcher.
This matcher trains flann::Index\_ on a train descriptor collection and calls its nearest search
This matcher trains flann::Index_ on a train descriptor collection and calls its nearest search
methods to find the best matches. So, this matcher may be faster when matching a large train
collection than the brute force matcher. FlannBasedMatcher does not support masking permissible
matches of descriptor sets because flann::Index does not support this. :
@ -1053,9 +1053,9 @@ output image. See possible flags bit values below.
DrawMatchesFlags. See details above in drawMatches .
@note
For Python API, flags are modified as cv2.DRAW\_MATCHES\_FLAGS\_DEFAULT,
cv2.DRAW\_MATCHES\_FLAGS\_DRAW\_RICH\_KEYPOINTS, cv2.DRAW\_MATCHES\_FLAGS\_DRAW\_OVER\_OUTIMG,
cv2.DRAW\_MATCHES\_FLAGS\_NOT\_DRAW\_SINGLE\_POINTS
For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT,
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
*/
CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );

View File

@ -114,7 +114,7 @@ public:
/** @brief Constructs a nearest neighbor search index for a given dataset.
@param features Matrix of containing the features(points) to index. The size of the matrix is
num\_features x feature\_dimensionality and the data type of the elements in the matrix must
num_features x feature_dimensionality and the data type of the elements in the matrix must
coincide with the type of the index.
@param params Structure containing the index parameters. The type of index that will be
constructed depends on the type of this parameter. See the description.

View File

@ -202,10 +202,10 @@ typedef void (*ButtonCallback)(int state, void* userdata);
@param winname Name of the window in the window caption that may be used as a window identifier.
@param flags Flags of the window. The supported flags are:
> - **WINDOW\_NORMAL** If this is set, the user can resize the window (no constraint).
> - **WINDOW\_AUTOSIZE** If this is set, the window size is automatically adjusted to fit the
> - **WINDOW_NORMAL** If this is set, the user can resize the window (no constraint).
> - **WINDOW_AUTOSIZE** If this is set, the window size is automatically adjusted to fit the
> displayed image (see imshow ), and you cannot change the window size manually.
> - **WINDOW\_OPENGL** If this is set, the window will be created with OpenGL support.
> - **WINDOW_OPENGL** If this is set, the window will be created with OpenGL support.
The function namedWindow creates a window that can be used as a placeholder for images and
trackbars. Created windows are referred to by their names.
@ -219,14 +219,14 @@ resources and windows of the application are closed automatically by the operati
@note
Qt backend supports additional flags:
- **CV\_WINDOW\_NORMAL or CV\_WINDOW\_AUTOSIZE:** CV\_WINDOW\_NORMAL enables you to resize the
window, whereas CV\_WINDOW\_AUTOSIZE adjusts automatically the window size to fit the
- **CV_WINDOW_NORMAL or CV_WINDOW_AUTOSIZE:** CV_WINDOW_NORMAL enables you to resize the
window, whereas CV_WINDOW_AUTOSIZE adjusts automatically the window size to fit the
displayed image (see imshow ), and you cannot change the window size manually.
- **CV\_WINDOW\_FREERATIO or CV\_WINDOW\_KEEPRATIO:** CV\_WINDOW\_FREERATIO adjusts the image
with no respect to its ratio, whereas CV\_WINDOW\_KEEPRATIO keeps the image ratio.
- **CV\_GUI\_NORMAL or CV\_GUI\_EXPANDED:** CV\_GUI\_NORMAL is the old way to draw the window
without statusbar and toolbar, whereas CV\_GUI\_EXPANDED is a new enhanced GUI.
By default, flags == CV\_WINDOW\_AUTOSIZE | CV\_WINDOW\_KEEPRATIO | CV\_GUI\_EXPANDED
- **CV_WINDOW_FREERATIO or CV_WINDOW_KEEPRATIO:** CV_WINDOW_FREERATIO adjusts the image
with no respect to its ratio, whereas CV_WINDOW_KEEPRATIO keeps the image ratio.
- **CV_GUI_NORMAL or CV_GUI_EXPANDED:** CV_GUI_NORMAL is the old way to draw the window
without statusbar and toolbar, whereas CV_GUI_EXPANDED is a new enhanced GUI.
By default, flags == CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_EXPANDED
*/
CV_EXPORTS_W void namedWindow(const String& winname, int flags = WINDOW_AUTOSIZE);
@ -275,7 +275,7 @@ CV_EXPORTS_W int waitKey(int delay = 0);
@param mat Image to be shown.
The function imshow displays an image in the specified window. If the window was created with the
CV\_WINDOW\_AUTOSIZE flag, the image is shown with its original size. Otherwise, the image is scaled
CV_WINDOW_AUTOSIZE flag, the image is shown with its original size. Otherwise, the image is scaled
to fit the window. The function may scale the image, depending on its depth:
- If the image is 8-bit unsigned, it is displayed as is.
@ -309,7 +309,7 @@ CV_EXPORTS_W void imshow(const String& winname, InputArray mat);
@note
- The specified window size is for the image area. Toolbars are not counted.
- Only windows created without CV\_WINDOW\_AUTOSIZE flag can be resized.
- Only windows created without CV_WINDOW_AUTOSIZE flag can be resized.
*/
CV_EXPORTS_W void resizeWindow(const String& winname, int width, int height);
@ -325,19 +325,19 @@ CV_EXPORTS_W void moveWindow(const String& winname, int x, int y);
@param winname Name of the window.
@param prop_id Window property to edit. The following operation flags are available:
- **CV\_WND\_PROP\_FULLSCREEN** Change if the window is fullscreen ( CV\_WINDOW\_NORMAL or
CV\_WINDOW\_FULLSCREEN ).
- **CV\_WND\_PROP\_AUTOSIZE** Change if the window is resizable (CV\_WINDOW\_NORMAL or
CV\_WINDOW\_AUTOSIZE ).
- **CV\_WND\_PROP\_ASPECTRATIO** Change if the aspect ratio of the image is preserved (
CV\_WINDOW\_FREERATIO or CV\_WINDOW\_KEEPRATIO ).
- **CV_WND_PROP_FULLSCREEN** Change if the window is fullscreen ( CV_WINDOW_NORMAL or
CV_WINDOW_FULLSCREEN ).
- **CV_WND_PROP_AUTOSIZE** Change if the window is resizable (CV_WINDOW_NORMAL or
CV_WINDOW_AUTOSIZE ).
- **CV_WND_PROP_ASPECTRATIO** Change if the aspect ratio of the image is preserved (
CV_WINDOW_FREERATIO or CV_WINDOW_KEEPRATIO ).
@param prop_value New value of the window property. The following operation flags are available:
- **CV\_WINDOW\_NORMAL** Change the window to normal size or make the window resizable.
- **CV\_WINDOW\_AUTOSIZE** Constrain the size by the displayed image. The window is not
- **CV_WINDOW_NORMAL** Change the window to normal size or make the window resizable.
- **CV_WINDOW_AUTOSIZE** Constrain the size by the displayed image. The window is not
resizable.
- **CV\_WINDOW\_FULLSCREEN** Change the window to fullscreen.
- **CV\_WINDOW\_FREERATIO** Make the window resizable without any ratio constraints.
- **CV\_WINDOW\_KEEPRATIO** Make the window resizable, but preserve the proportions of the
- **CV_WINDOW_FULLSCREEN** Change the window to fullscreen.
- **CV_WINDOW_FREERATIO** Make the window resizable without any ratio constraints.
- **CV_WINDOW_KEEPRATIO** Make the window resizable, but preserve the proportions of the
displayed image.
The function setWindowProperty enables changing properties of a window.
@ -352,12 +352,12 @@ CV_EXPORTS_W void setWindowTitle(const String& winname, const String& title);
@param winname Name of the window.
@param prop_id Window property to retrieve. The following operation flags are available:
- **CV\_WND\_PROP\_FULLSCREEN** Change if the window is fullscreen ( CV\_WINDOW\_NORMAL or
CV\_WINDOW\_FULLSCREEN ).
- **CV\_WND\_PROP\_AUTOSIZE** Change if the window is resizable (CV\_WINDOW\_NORMAL or
CV\_WINDOW\_AUTOSIZE ).
- **CV\_WND\_PROP\_ASPECTRATIO** Change if the aspect ratio of the image is preserved
(CV\_WINDOW\_FREERATIO or CV\_WINDOW\_KEEPRATIO ).
- **CV_WND_PROP_FULLSCREEN** Change if the window is fullscreen ( CV_WINDOW_NORMAL or
CV_WINDOW_FULLSCREEN ).
- **CV_WND_PROP_AUTOSIZE** Change if the window is resizable (CV_WINDOW_NORMAL or
CV_WINDOW_AUTOSIZE ).
- **CV_WND_PROP_ASPECTRATIO** Change if the aspect ratio of the image is preserved
(CV_WINDOW_FREERATIO or CV_WINDOW_KEEPRATIO ).
See setWindowProperty to know the meaning of the returned values.
@ -375,8 +375,8 @@ use the callback.
*/
CV_EXPORTS void setMouseCallback(const String& winname, MouseCallback onMouse, void* userdata = 0);
/** @brief Gets the mouse-wheel motion delta, when handling mouse-wheel events EVENT\_MOUSEWHEEL and
EVENT\_MOUSEHWHEEL.
/** @brief Gets the mouse-wheel motion delta, when handling mouse-wheel events EVENT_MOUSEWHEEL and
EVENT_MOUSEHWHEEL.
@param flags The mouse callback flags parameter.
@ -385,11 +385,11 @@ a one notch rotation of the wheel or the threshold for action to be taken and on
occur for each delta. Some high-precision mice with higher-resolution freely-rotating wheels may
generate smaller values.
For EVENT\_MOUSEWHEEL positive and negative values mean forward and backward scrolling,
respectively. For EVENT\_MOUSEHWHEEL, where available, positive and negative values mean right and
For EVENT_MOUSEWHEEL positive and negative values mean forward and backward scrolling,
respectively. For EVENT_MOUSEHWHEEL, where available, positive and negative values mean right and
left scrolling, respectively.
With the C API, the macro CV\_GET\_WHEEL\_DELTA(flags) can be used alternatively.
With the C API, the macro CV_GET_WHEEL_DELTA(flags) can be used alternatively.
@note
@ -426,7 +426,7 @@ Clicking the label of each trackbar enables editing the trackbar values manually
@note
- An example of using the trackbar functionality can be found at
opencv\_source\_code/samples/cpp/connected\_components.cpp
opencv_source_code/samples/cpp/connected_components.cpp
*/
CV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname,
int* value, int count,
@ -549,20 +549,20 @@ struct QtFont
*Times*). If the font is not found, a default one is used.
@param pointSize Size of the font. If not specified, equal zero or negative, the point size of the
font is set to a system-dependent default value. Generally, this is 12 points.
@param color Color of the font in BGRA where A = 255 is fully transparent. Use the macro CV \_ RGB
@param color Color of the font in BGRA where A = 255 is fully transparent. Use the macro CV _ RGB
for simplicity.
@param weight Font weight. The following operation flags are available:
- **CV\_FONT\_LIGHT** Weight of 25
- **CV\_FONT\_NORMAL** Weight of 50
- **CV\_FONT\_DEMIBOLD** Weight of 63
- **CV\_FONT\_BOLD** Weight of 75
- **CV\_FONT\_BLACK** Weight of 87
- **CV_FONT_LIGHT** Weight of 25
- **CV_FONT_NORMAL** Weight of 50
- **CV_FONT_DEMIBOLD** Weight of 63
- **CV_FONT_BOLD** Weight of 75
- **CV_FONT_BLACK** Weight of 87
You can also specify a positive integer for better control.
@param style Font style. The following operation flags are available:
- **CV\_STYLE\_NORMAL** Normal font
- **CV\_STYLE\_ITALIC** Italic font
- **CV\_STYLE\_OBLIQUE** Oblique font
- **CV_STYLE_NORMAL** Normal font
- **CV_STYLE_ITALIC** Italic font
- **CV_STYLE_OBLIQUE** Oblique font
@param spacing Spacing between characters. It can be negative or positive.
The function fontQt creates a CvFont object. This CvFont is not compatible with putText .
@ -613,7 +613,7 @@ zero, the text never disappears.
The function displayOverlay displays useful information/tips on top of the window for a certain
amount of time *delayms* . This information is displayed on the window statusbar (the window must be
created with the CV\_GUI\_EXPANDED flags).
created with the CV_GUI_EXPANDED flags).
*/
CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms = 0);
@ -622,7 +622,7 @@ CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int
@param windowName Name of the window.
The function saveWindowParameters saves size, location, flags, trackbars value, zoom and panning
location of the window window\_name .
location of the window window_name .
*/
CV_EXPORTS void saveWindowParameters(const String& windowName);
@ -631,7 +631,7 @@ CV_EXPORTS void saveWindowParameters(const String& windowName);
@param windowName Name of the window.
The function loadWindowParameters loads size, location, flags, trackbars value, zoom and panning
location of the window window\_name .
location of the window window_name .
*/
CV_EXPORTS void loadWindowParameters(const String& windowName);
@ -648,9 +648,9 @@ This function should be prototyped as void Foo(int state,\*void); . *state* is t
of the button. It could be -1 for a push button, 0 or 1 for a check/radio box button.
@param userdata Pointer passed to the callback function.
@param type Optional type of the button.
- **CV\_PUSH\_BUTTON** Push button
- **CV\_CHECKBOX** Checkbox button
- **CV\_RADIOBOX** Radiobox button. The radiobox on the same buttonbar (same line) are
- **CV_PUSH_BUTTON** Push button
- **CV_CHECKBOX** Checkbox button
- **CV_RADIOBOX** Radiobox button. The radiobox on the same buttonbar (same line) are
exclusive, that is only one can be selected at a time.
@param initial_button_state Default state of the button. Use for checkbox and radiobox. Its
value could be 0 or 1. *(Optional)*

View File

@ -92,10 +92,10 @@ enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0,
@param filename Name of file to be loaded.
@param flags Flags specifying the color type of a loaded image:
- CV\_LOAD\_IMAGE\_ANYDEPTH - If set, return 16-bit/32-bit image when the input has the
- CV_LOAD_IMAGE_ANYDEPTH - If set, return 16-bit/32-bit image when the input has the
corresponding depth, otherwise convert it to 8-bit.
- CV\_LOAD\_IMAGE\_COLOR - If set, always convert image to the color one
- CV\_LOAD\_IMAGE\_GRAYSCALE - If set, always convert image to the grayscale one
- CV_LOAD_IMAGE_COLOR - If set, always convert image to the color one
- CV_LOAD_IMAGE_GRAYSCALE - If set, always convert image to the grayscale one
- **\>0** Return a 3-channel color image.
@note In the current implementation the alpha channel, if any, is stripped from the output image.
@ -128,7 +128,7 @@ returns an empty matrix ( Mat::data==NULL ). Currently, the following file forma
- On Linux\*, BSD flavors and other Unix-like open-source operating systems, OpenCV looks for
codecs supplied with an OS image. Install the relevant packages (do not forget the development
files, for example, "libjpeg-dev", in Debian\* and Ubuntu\*) to get the codec support or turn
on the OPENCV\_BUILD\_3RDPARTY\_LIBS flag in CMake.
on the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake.
@note In the case of color images, the decoded images will have the channels stored in B G R order.
*/
@ -139,20 +139,20 @@ CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR );
@param filename Name of the file.
@param img Image to be saved.
@param params Format-specific save parameters encoded as pairs
paramId\_1, paramValue\_1, paramId\_2, paramValue\_2, ... . The following parameters are currently
paramId_1, paramValue_1, paramId_2, paramValue_2, ... . The following parameters are currently
supported:
- For JPEG, it can be a quality ( CV\_IMWRITE\_JPEG\_QUALITY ) from 0 to 100 (the higher is
- For JPEG, it can be a quality ( CV_IMWRITE_JPEG_QUALITY ) from 0 to 100 (the higher is
the better). Default value is 95.
- For WEBP, it can be a quality ( CV\_IMWRITE\_WEBP\_QUALITY ) from 1 to 100 (the higher is
- For WEBP, it can be a quality ( CV_IMWRITE_WEBP_QUALITY ) from 1 to 100 (the higher is
the better). By default (without any parameter) and for quality above 100 the lossless
compression is used.
- For PNG, it can be the compression level ( CV\_IMWRITE\_PNG\_COMPRESSION ) from 0 to 9. A
- For PNG, it can be the compression level ( CV_IMWRITE_PNG_COMPRESSION ) from 0 to 9. A
higher value means a smaller size and longer compression time. Default value is 3.
- For PPM, PGM, or PBM, it can be a binary format flag ( CV\_IMWRITE\_PXM\_BINARY ), 0 or 1.
- For PPM, PGM, or PBM, it can be a binary format flag ( CV_IMWRITE_PXM_BINARY ), 0 or 1.
Default value is 1.
The function imwrite saves the image to the specified file. The image format is chosen based on the
filename extension (see imread for the list of extensions). Only 8-bit (or 16-bit unsigned (CV\_16U)
filename extension (see imread for the list of extensions). Only 8-bit (or 16-bit unsigned (CV_16U)
in case of PNG, JPEG 2000, and TIFF) single-channel or 3-channel (with 'BGR' channel order) images
can be saved using this function. If the format, depth or channel order is different, use
Mat::convertTo , and cvtColor to convert it before saving. Or, use the universal FileStorage I/O
@ -239,7 +239,7 @@ CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst);
The function compresses the image and stores it in the memory buffer that is resized to fit the
result. See imwrite for the list of supported formats and flags description.
@note cvEncodeImage returns single-row matrix of type CV\_8UC1 that contains encoded image as array
@note cvEncodeImage returns single-row matrix of type CV_8UC1 that contains encoded image as array
of bytes.
*/
CV_EXPORTS_W bool imencode( const String& ext, InputArray img,

View File

@ -1361,7 +1361,7 @@ call
is equivalent to
\f[\texttt{Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, borderType)} .\f]
\f[\texttt{Sobel(src, dst, ddepth, dx, dy, CV\_SCHARR, scale, delta, borderType)} .\f]
@param src input image.
@param dst output image of the same size and the same number of channels as src.

View File

@ -92,7 +92,7 @@ SVM implementation in OpenCV is based on @cite LibSVM.
Prediction with SVM
-------------------
StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW\_OUTPUT to get
StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get
the raw response from SVM (in the case of regression, 1-class or 2-class classification problem).
@defgroup ml_decsiontrees Decision Trees
@ -126,8 +126,8 @@ index is stored in the observed node. The following variables are possible:
could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For
example, if the color is green or red, go to the left, else to the right.
So, in each node, a pair of entities (variable\_index , `decision_rule (threshold/subset)` ) is
used. This pair is called a *split* (split on the variable variable\_index ). Once a leaf node is
So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is
used. This pair is called a *split* (split on the variable variable_index ). Once a leaf node is
reached, the value assigned to this node is used as the output of the prediction procedure.
Sometimes, certain features of the input vector are missed (for example, in the darkness it is
@ -232,15 +232,15 @@ the ensemble is increased, a larger number of the training samples are classifie
increasing confidence, thereby those samples receive smaller weights on the subsequent iterations.
Examples with a very low relative weight have a small impact on the weak classifier training. Thus,
such examples may be excluded during the weak classifier training without having much effect on the
induced classifier. This process is controlled with the weight\_trim\_rate parameter. Only examples
with the summary fraction weight\_trim\_rate of the total weight mass are used in the weak
induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples
with the summary fraction weight_trim_rate of the total weight mass are used in the weak
classifier training. Note that the weights for **all** training examples are recomputed at each
training iteration. Examples deleted at a particular iteration may be used again for learning some
of the weak classifiers further @cite FHT98.
Prediction with Boost
---------------------
StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW\_OUTPUT to get
StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get
the raw sum from Boost classifier.
@defgroup ml_randomtrees Random Trees
@ -260,7 +260,7 @@ randomly select the same number of vectors as in the original set ( =N ). The ve
with replacement. That is, some vectors will occur more than once and some will be absent. At each
node of each trained tree, not all the variables are used to find the best split, but a random
subset of them. With each node a new subset is generated. However, its size is fixed for all the
nodes and all the trees. It is a training parameter set to \f$\sqrt{number\_of\_variables}\f$ by
nodes and all the trees. It is a training parameter set to \f$\sqrt{number_of_variables}\f$ by
default. None of the built trees are pruned.
In random trees there is no need for any accuracy estimation procedures, such as cross-validation or
@ -280,7 +280,7 @@ about N/3 . The classification error is estimated by using this oob-data as foll
to all the vectors in the original data. In case of regression, the oob-error is computed as the
squared error for oob vectors difference divided by the total number of vectors.
For the random trees usage example, please, see letter\_recog.cpp sample in OpenCV distribution.
For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution.
**References:**
@ -373,15 +373,15 @@ computed as:
Different activation functions may be used. ML implements three standard functions:
- Identity function ( ANN\_MLP::IDENTITY ): \f$f(x)=x\f$
- Identity function ( ANN_MLP::IDENTITY ): \f$f(x)=x\f$
- Symmetrical sigmoid ( ANN\_MLP::SIGMOID\_SYM ): \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}\f$
- Symmetrical sigmoid ( ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}\f$
), which is the default choice for MLP. The standard sigmoid with \f$\beta =1, \alpha =1\f$ is shown
below:
![image](pics/sigmoid_bipolar.png)
- Gaussian function ( ANN\_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not completely
- Gaussian function ( ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not completely
supported at the moment.
In ML, all the neurons have the same activation functions, with the same free parameters (
@ -443,18 +443,18 @@ determined by LogisticRegression::Params.alpha. It determines how faster we appr
It is a positive real number. Optimization algorithms like Batch Gradient Descent and Mini-Batch
Gradient Descent are supported in LogisticRegression. It is important that we mention the number of
iterations these optimization algorithms have to run. The number of iterations are mentioned by
LogisticRegression::Params.num\_iters. The number of iterations can be thought as number of steps
LogisticRegression::Params.num_iters. The number of iterations can be thought as number of steps
taken and learning rate specifies if it is a long step or a short step. These two parameters define
how fast we arrive at a possible solution. In order to compensate for overfitting regularization is
performed, which can be enabled by setting LogisticRegression::Params.regularized to a positive
integer (greater than zero). One can specify what kind of regularization has to be performed by
setting LogisticRegression::Params.norm to LogisticRegression::REG\_L1 or
LogisticRegression::REG\_L2 values. LogisticRegression provides a choice of 2 training methods with
setting LogisticRegression::Params.norm to LogisticRegression::REG_L1 or
LogisticRegression::REG_L2 values. LogisticRegression provides a choice of 2 training methods with
Batch Gradient Descent or the Mini-Batch Gradient Descent. To specify this, set
LogisticRegression::Params.train\_method to either LogisticRegression::BATCH or
LogisticRegression::MINI\_BATCH. If LogisticRegression::Params is set to
LogisticRegression::MINI\_BATCH, the size of the mini batch has to be to a postive integer using
LogisticRegression::Params.mini\_batch\_size.
LogisticRegression::Params.train_method to either LogisticRegression::BATCH or
LogisticRegression::MINI_BATCH. If LogisticRegression::Params is set to
LogisticRegression::MINI_BATCH, the size of the mini batch has to be to a postive integer using
LogisticRegression::Params.mini_batch_size.
A sample set of training parameters for the Logistic Regression classifier can be initialized as
follows:
@ -713,22 +713,22 @@ public:
char missch='?');
/** @brief Creates training data from in-memory arrays.
@param samples matrix of samples. It should have CV\_32F type.
@param layout it's either ROW\_SAMPLE, which means that each training sample is a row of samples,
or COL\_SAMPLE, which means that each training sample occupies a column of samples.
@param samples matrix of samples. It should have CV_32F type.
@param layout it's either ROW_SAMPLE, which means that each training sample is a row of samples,
or COL_SAMPLE, which means that each training sample occupies a column of samples.
@param responses matrix of responses. If the responses are scalar, they should be stored as a
single row or as a single column. The matrix should have type CV\_32F or CV\_32S (in the former
single row or as a single column. The matrix should have type CV_32F or CV_32S (in the former
case the responses are considered as ordered by default; in the latter case - as categorical)
@param varIdx vector specifying which variables to use for training. It can be an integer vector
(CV\_32S) containing 0-based variable indices or byte vector (CV\_8U) containing a mask of active
(CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of active
variables.
@param sampleIdx vector specifying which samples to use for training. It can be an integer vector
(CV\_32S) containing 0-based sample indices or byte vector (CV\_8U) containing a mask of training
(CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask of training
samples.
@param sampleWeights optional vector with weights for each sample. It should have CV\_32F type.
@param varType optional vector of type CV\_8U and size \<number\_of\_variables\_in\_samples\> +
\<number\_of\_variables\_in\_responses\>, containing types of each input and output variable. The
ordered variables are denoted by value VAR\_ORDERED, and categorical - by VAR\_CATEGORICAL.
@param sampleWeights optional vector with weights for each sample. It should have CV_32F type.
@param varType optional vector of type CV_8U and size \<number_of_variables_in_samples\> +
\<number_of_variables_in_responses\>, containing types of each input and output variable. The
ordered variables are denoted by value VAR_ORDERED, and categorical - by VAR_CATEGORICAL.
*/
static Ptr<TrainData> create(InputArray samples, int layout, InputArray responses,
InputArray varIdx=noArray(), InputArray sampleIdx=noArray(),
@ -770,7 +770,7 @@ public:
@param trainData training data that can be loaded from file using TrainData::loadFromCSV or
created with TrainData::create.
@param flags optional flags, depending on the model. Some of the models can be updated with the
new training samples, not completely overwritten (such as NormalBayesClassifier or ANN\_MLP).
new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
There are 2 instance methods and 2 static (class) template methods. The first two train the already
created model (the very first method must be overwritten in the derived classes). And the latter two
@ -779,7 +779,7 @@ public:
virtual bool train( const Ptr<TrainData>& trainData, int flags=0 );
/** @overload
@param samples training samples
@param layout ROW\_SAMPLE (training samples are the matrix rows) or COL\_SAMPLE (training samples
@param layout ROW_SAMPLE (training samples are the matrix rows) or COL_SAMPLE (training samples
are the matrix columns)
@param responses vector of responses associated with the training samples.
*/
@ -805,7 +805,7 @@ public:
@param samples The input samples, floating-point matrix
@param results The optional output matrix of results.
@param flags The optional flags, model-dependent. Some models, such as Boost, SVM recognize
StatModel::RAW\_OUTPUT flag, which makes the method return the raw results (the sum), not the
StatModel::RAW_OUTPUT flag, which makes the method return the raw results (the sum), not the
class label.
*/
virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
@ -905,11 +905,11 @@ public:
@note
- (Python) An example of digit recognition using KNearest can be found at
opencv\_source/samples/python2/digits.py
opencv_source/samples/python2/digits.py
- (Python) An example of grid search digit recognition using KNearest can be found at
opencv\_source/samples/python2/digits\_adjust.py
opencv_source/samples/python2/digits_adjust.py
- (Python) An example of video digit recognition using KNearest can be found at
opencv\_source/samples/python2/digits\_video.py
opencv_source/samples/python2/digits_video.py
*/
class CV_EXPORTS_W KNearest : public StatModel
{
@ -930,14 +930,14 @@ public:
/** @brief Finds the neighbors and predicts responses for input vectors.
@param samples Input samples stored by rows. It is a single-precision floating-point matrix of
\<number\_of\_samples\> \* k size.
\<number_of_samples\> \* k size.
@param k Number of used nearest neighbors. Should be greater than 1.
@param results Vector with results of prediction (regression or classification) for each input
sample. It is a single-precision floating-point vector with \<number\_of\_samples\> elements.
sample. It is a single-precision floating-point vector with \<number_of_samples\> elements.
@param neighborResponses Optional output values for corresponding neighbors. It is a
single-precision floating-point matrix of \<number\_of\_samples\> \* k size.
single-precision floating-point matrix of \<number_of_samples\> \* k size.
@param dist Optional output distances from the input vectors to the corresponding neighbors. It is
a single-precision floating-point matrix of \<number\_of\_samples\> \* k size.
a single-precision floating-point matrix of \<number_of_samples\> \* k size.
For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. In
case of regression, the predicted result is a mean value of the particular vector's neighbor
@ -986,11 +986,11 @@ public:
@note
- (Python) An example of digit recognition using SVM can be found at
opencv\_source/samples/python2/digits.py
opencv_source/samples/python2/digits.py
- (Python) An example of grid search digit recognition using SVM can be found at
opencv\_source/samples/python2/digits\_adjust.py
opencv_source/samples/python2/digits_adjust.py
- (Python) An example of video digit recognition using SVM can be found at
opencv\_source/samples/python2/digits\_video.py
opencv_source/samples/python2/digits_video.py
*/
class CV_EXPORTS_W SVM : public StatModel
{
@ -1006,18 +1006,18 @@ public:
/** @brief The constructors
@param svm_type Type of a SVM formulation. Possible values are:
- **SVM::C\_SVC** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows
- **SVM::C_SVC** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows
imperfect separation of classes with penalty multiplier C for outliers.
- **SVM::NU\_SVC** \f$\nu\f$-Support Vector Classification. n-class classification with possible
- **SVM::NU_SVC** \f$\nu\f$-Support Vector Classification. n-class classification with possible
imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother
the decision boundary) is used instead of C.
- **SVM::ONE\_CLASS** Distribution Estimation (One-class SVM). All the training data are from
- **SVM::ONE_CLASS** Distribution Estimation (One-class SVM). All the training data are from
the same class, SVM builds a boundary that separates the class from the rest of the feature
space.
- **SVM::EPS\_SVR** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors
- **SVM::EPS_SVR** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors
from the training set and the fitting hyper-plane must be less than p. For outliers the
penalty multiplier C is used.
- **SVM::NU\_SVR** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p.
- **SVM::NU_SVR** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p.
See @cite LibSVM for details.
@param kernel_type Type of a SVM kernel. Possible values are:
- **SVM::LINEAR** Linear kernel. No mapping is done, linear discrimination (or regression) is
@ -1033,10 +1033,10 @@ public:
@param degree Parameter degree of a kernel function (POLY).
@param gamma Parameter \f$\gamma\f$ of a kernel function (POLY / RBF / SIGMOID / CHI2).
@param coef0 Parameter coef0 of a kernel function (POLY / SIGMOID).
@param Cvalue Parameter C of a SVM optimization problem (C\_SVC / EPS\_SVR / NU\_SVR).
@param nu Parameter \f$\nu\f$ of a SVM optimization problem (NU\_SVC / ONE\_CLASS / NU\_SVR).
@param p Parameter \f$\epsilon\f$ of a SVM optimization problem (EPS\_SVR).
@param classWeights Optional weights in the C\_SVC problem , assigned to particular classes. They
@param Cvalue Parameter C of a SVM optimization problem (C_SVC / EPS_SVR / NU_SVR).
@param nu Parameter \f$\nu\f$ of a SVM optimization problem (NU_SVC / ONE_CLASS / NU_SVR).
@param p Parameter \f$\epsilon\f$ of a SVM optimization problem (EPS_SVR).
@param classWeights Optional weights in the C_SVC problem , assigned to particular classes. They
are multiplied by C so the parameter C of class \#i becomes classWeights(i) \* C. Thus these
weights affect the misclassification penalty for different classes. The larger weight, the larger
penalty on misclassification of data from the corresponding class.
@ -1053,8 +1053,8 @@ public:
termCrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, FLT_EPSILON );
}
@endcode
A comparison of different kernels on the following 2D test case with four classes. Four C\_SVC SVMs
have been trained (one against rest) with auto\_train. Evaluation on three different kernels (CHI2,
A comparison of different kernels on the following 2D test case with four classes. Four C_SVC SVMs
have been trained (one against rest) with auto_train. Evaluation on three different kernels (CHI2,
INTER, RBF). The color depicts the class with max score. Bright means max-score \> 0, dark means
max-score \< 0.
@ -1115,16 +1115,16 @@ public:
If there is no need to optimize a parameter, the corresponding grid step should be set to any value
less than or equal to 1. For example, to avoid optimization in gamma, set gammaGrid.step = 0,
gammaGrid.minVal, gamma\_grid.maxVal as arbitrary numbers. In this case, the value params.gamma is
gammaGrid.minVal, gamma_grid.maxVal as arbitrary numbers. In this case, the value params.gamma is
taken for gamma.
And, finally, if the optimization in a parameter is required but the corresponding grid is unknown,
you may call the function SVM::getDefaulltGrid. To generate a grid, for example, for gamma, call
SVM::getDefaulltGrid(SVM::GAMMA).
This function works for the classification (params.svmType=SVM::C\_SVC or
params.svmType=SVM::NU\_SVC) as well as for the regression (params.svmType=SVM::EPS\_SVR or
params.svmType=SVM::NU\_SVR). If params.svmType=SVM::ONE\_CLASS, no optimization is made and the
This function works for the classification (params.svmType=SVM::C_SVC or
params.svmType=SVM::NU_SVC) as well as for the regression (params.svmType=SVM::EPS_SVR or
params.svmType=SVM::NU_SVR). If params.svmType=SVM::ONE_CLASS, no optimization is made and the
usual SVM with parameters specified in params is executed.
*/
virtual bool trainAuto( const Ptr<TrainData>& data, int kFold = 10,
@ -1172,7 +1172,7 @@ public:
/** @brief Generates a grid for SVM parameters.
@param param\_id SVM parameters IDs that must be one of the following:
@param param_id SVM parameters IDs that must be one of the following:
- **SVM::C**
- **SVM::GAMMA**
- **SVM::P**
@ -1230,27 +1230,27 @@ public:
/** @brief The constructor
@param nclusters The number of mixture components in the Gaussian mixture model. Default value of
the parameter is EM::DEFAULT\_NCLUSTERS=5. Some of EM implementation could determine the optimal
the parameter is EM::DEFAULT_NCLUSTERS=5. Some of EM implementation could determine the optimal
number of mixtures within a specified value range, but that is not the case in ML yet.
@param covMatType Constraint on covariance matrices which defines type of matrices. Possible
values are:
- **EM::COV\_MAT\_SPHERICAL** A scaled identity matrix \f$\mu_k * I\f$. There is the only
- **EM::COV_MAT_SPHERICAL** A scaled identity matrix \f$\mu_k * I\f$. There is the only
parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases,
when the constraint is relevant, or as a first step in the optimization (for example in case
when the data is preprocessed with PCA). The results of such preliminary estimation may be
passed again to the optimization procedure, this time with
covMatType=EM::COV\_MAT\_DIAGONAL.
- **EM::COV\_MAT\_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of
covMatType=EM::COV_MAT_DIAGONAL.
- **EM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of
free parameters is d for each matrix. This is most commonly used option yielding good
estimation results.
- **EM::COV\_MAT\_GENERIC** A symmetric positively defined matrix. The number of free
- **EM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free
parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless
there is pretty accurate initial estimation of the parameters and/or a huge number of
training samples.
@param termCrit The termination criteria of the EM algorithm. The EM algorithm can be terminated
by the number of iterations termCrit.maxCount (number of M-steps) or when relative change of
likelihood logarithm is less than termCrit.epsilon. Default maximum number of iterations is
EM::DEFAULT\_MAX\_ITERS=100.
EM::DEFAULT_MAX_ITERS=100.
*/
explicit Params(int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
@ -1286,7 +1286,7 @@ public:
@param sample A sample for classification. It should be a one-channel matrix of \f$1 \times dims\f$ or
\f$dims \times 1\f$ size.
@param probs Optional output matrix that contains posterior probabilities of each component given
the sample. It has \f$1 \times nclusters\f$ size and CV\_64FC1 type.
the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type.
The method returns a two-element double vector. Zero element is a likelihood logarithm value for the
sample. First element is an index of the most probable mixture component for the given sample.
@ -1298,15 +1298,15 @@ public:
/** @brief Static methods that estimate the Gaussian mixture parameters from a samples set
@param samples Samples from which the Gaussian mixture model will be estimated. It should be a
one-channel matrix, each row of which is a sample. If the matrix does not have CV\_64F type it
one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it
will be converted to the inner matrix of such type for the further computing.
@param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
each sample. It has \f$nsamples \times 1\f$ size and CV\_64FC1 type.
each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
@param labels The optional output "class label" for each sample:
\f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture
component for each sample). It has \f$nsamples \times 1\f$ size and CV\_32SC1 type.
component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
@param probs The optional output matrix that contains posterior probabilities of each Gaussian
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV\_64FC1
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1
type.
@param params The Gaussian mixture params, see EM::Params description
@return true if the Gaussian mixture model was trained successfully, otherwise it returns
@ -1337,24 +1337,24 @@ public:
\f$S_k\f$ of mixture components.
@param samples Samples from which the Gaussian mixture model will be estimated. It should be a
one-channel matrix, each row of which is a sample. If the matrix does not have CV\_64F type it
one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it
will be converted to the inner matrix of such type for the further computing.
@param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of
\f$nclusters \times dims\f$ size. If the matrix does not have CV\_64F type it will be converted to the
\f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be converted to the
inner matrix of such type for the further computing.
@param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of
covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices do not
have CV\_64F type they will be converted to the inner matrices of such type for the further
have CV_64F type they will be converted to the inner matrices of such type for the further
computing.
@param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel
floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size.
@param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
each sample. It has \f$nsamples \times 1\f$ size and CV\_64FC1 type.
each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
@param labels The optional output "class label" for each sample:
\f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture
component for each sample). It has \f$nsamples \times 1\f$ size and CV\_32SC1 type.
component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
@param probs The optional output matrix that contains posterior probabilities of each Gaussian
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV\_64FC1
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1
type.
@param params The Gaussian mixture params, see EM::Params description
*/
@ -1370,16 +1370,16 @@ public:
use this option.
@param samples Samples from which the Gaussian mixture model will be estimated. It should be a
one-channel matrix, each row of which is a sample. If the matrix does not have CV\_64F type it
one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type it
will be converted to the inner matrix of such type for the further computing.
@param probs0
@param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
each sample. It has \f$nsamples \times 1\f$ size and CV\_64FC1 type.
each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.
@param labels The optional output "class label" for each sample:
\f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable mixture
component for each sample). It has \f$nsamples \times 1\f$ size and CV\_32SC1 type.
component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.
@param probs The optional output matrix that contains posterior probabilities of each Gaussian
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV\_64FC1
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and CV_64FC1
type.
@param params The Gaussian mixture params, see EM::Params description
*/
@ -1450,7 +1450,7 @@ public:
engines (including our implementation) try to find sub-optimal split in this case by clustering
all the samples into maxCategories clusters that is some categories are merged together. The
clustering is applied only in n \> 2-class classification problems for categorical variables
with N \> max\_categories possible values. In case of regression and 2-class classification the
with N \> max_categories possible values. In case of regression and 2-class classification the
optimal split can be found efficiently without employing clustering, thus the parameter is not
used in these cases.
@ -1515,7 +1515,7 @@ public:
Value at the node: a class label in case of classification or estimated function value in case
of regression.
- member int classIdx
Class index normalized to 0..class\_count-1 range and assigned to the node. It is used
Class index normalized to 0..class_count-1 range and assigned to the node. It is used
internally in classification trees and tree ensembles.
- member int parent
Index of the parent node
@ -1653,11 +1653,11 @@ public:
@param useSurrogates
@param maxCategories Cluster possible values of a categorical variable into K \<= maxCategories
clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries
to make a split, takes more than max\_categories values, the precise best subset estimation may
to make a split, takes more than max_categories values, the precise best subset estimation may
take a very long time because the algorithm is exponential. Instead, many decision trees engines
(including ML) try to find sub-optimal split in this case by clustering all the samples into
maxCategories clusters that is some categories are merged together. The clustering is applied only
in n\>2-class classification problems for categorical variables with N \> max\_categories possible
in n\>2-class classification problems for categorical variables with N \> max_categories possible
values. In case of regression and 2-class classification the optimal split can be found
efficiently without employing clustering, thus the parameter is not used in these cases.
@param priors
@ -1752,7 +1752,7 @@ public:
Gentle AdaBoost and Real AdaBoost are often the preferable choices.
@param weakCount The number of weak classifiers.
@param weightTrimRate A threshold between 0 and 1 used to save computational time. Samples
with summary weight \f$\leq 1 - weight\_trim\_rate\f$ do not participate in the *next* iteration of
with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* iteration of
training. Set this parameter to 0 to turn off this functionality.
@param maxDepth
@param useSurrogates
@ -1844,7 +1844,7 @@ public:
Unlike many other models in ML that are constructed and trained at once, in the MLP model these
steps are separated. First, a network with the specified topology is created using the non-default
constructor or the method ANN\_MLP::create. All the weights are set to zeros. Then, the network is
constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is
trained using a set of input and output vectors. The training procedure can be repeated more than
once, that is, the weights can be adjusted based on the new training data.
*/
@ -1861,13 +1861,13 @@ public:
of elements in the input layer. The last element - number of elements in the output layer.
- member int activateFunc
The activation function. Currently the only fully supported activation function is
ANN\_MLP::SIGMOID\_SYM.
ANN_MLP::SIGMOID_SYM.
- member double fparam1
The first parameter of activation function, 0 by default.
- member double fparam2
The second parameter of the activation function, 0 by default.
@note
If you are using the default ANN\_MLP::SIGMOID\_SYM activation function with the default
If you are using the default ANN_MLP::SIGMOID_SYM activation function with the default
parameter values fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x),
so the output will range from [-1.7159, 1.7159], instead of [0,1].
@ -1899,7 +1899,7 @@ public:
@param layerSizes Integer vector specifying the number of neurons in each layer including the
input and output layers.
@param activateFunc Parameter specifying the activation function for each neuron: one of
ANN\_MLP::IDENTITY, ANN\_MLP::SIGMOID\_SYM, and ANN\_MLP::GAUSSIAN.
ANN_MLP::IDENTITY, ANN_MLP::SIGMOID_SYM, and ANN_MLP::GAUSSIAN.
@param fparam1 The first parameter of the activation function, \f$\alpha\f$. See the formulas in the
introduction section.
@param fparam2 The second parameter of the activation function, \f$\beta\f$. See the formulas in the
@ -1908,11 +1908,11 @@ public:
of iterations (maxCount) and/or how much the error could change between the iterations to make the
algorithm continue (epsilon).
@param trainMethod Training method of the MLP. Possible values are:
- **ANN\_MLP\_TrainParams::BACKPROP** The back-propagation algorithm.
- **ANN\_MLP\_TrainParams::RPROP** The RPROP algorithm.
@param param1 Parameter of the training method. It is rp\_dw0 for RPROP and bp\_dw\_scale for
- **ANN_MLP_TrainParams::BACKPROP** The back-propagation algorithm.
- **ANN_MLP_TrainParams::RPROP** The RPROP algorithm.
@param param1 Parameter of the training method. It is rp_dw0 for RPROP and bp_dw_scale for
BACKPROP.
@param param2 Parameter of the training method. It is rp\_dw\_min for RPROP and bp\_moment\_scale
@param param2 Parameter of the training method. It is rp_dw_min for RPROP and bp_moment_scale
for BACKPROP.
By default the RPROP algorithm is used:
@ -1973,18 +1973,18 @@ public:
/** @brief Creates empty model
Use StatModel::train to train the model, StatModel::train\<ANN\_MLP\>(traindata, params) to create
and train the model, StatModel::load\<ANN\_MLP\>(filename) to load the pre-trained model. Note that
the train method has optional flags, and the following flags are handled by \`ANN\_MLP\`:
Use StatModel::train to train the model, StatModel::train\<ANN_MLP\>(traindata, params) to create
and train the model, StatModel::load\<ANN_MLP\>(filename) to load the pre-trained model. Note that
the train method has optional flags, and the following flags are handled by \`ANN_MLP\`:
- **UPDATE\_WEIGHTS** Algorithm updates the network weights, rather than computes them from
- **UPDATE_WEIGHTS** Algorithm updates the network weights, rather than computes them from
scratch. In the latter case the weights are initialized using the Nguyen-Widrow algorithm.
- **NO\_INPUT\_SCALE** Algorithm does not normalize the input vectors. If this flag is not set,
- **NO_INPUT_SCALE** Algorithm does not normalize the input vectors. If this flag is not set,
the training algorithm normalizes each input feature independently, shifting its mean value to
0 and making the standard deviation equal to 1. If the network is assumed to be updated
frequently, the new training data could be much different from original one. In this case, you
should take care of proper normalization.
- **NO\_OUTPUT\_SCALE** Algorithm does not normalize the output vectors. If the flag is not set,
- **NO_OUTPUT_SCALE** Algorithm does not normalize the output vectors. If the flag is not set,
the training algorithm normalizes each output feature independently, by transforming it to the
certain range depending on the used activation function.
*/
@ -2010,19 +2010,19 @@ public:
public:
/** @brief The constructors
@param learning\_rate Specifies the learning rate.
@param learning_rate Specifies the learning rate.
@param iters Specifies the number of iterations.
@param method Specifies the kind of training method used. It should be set to either
LogisticRegression::BATCH or LogisticRegression::MINI\_BATCH. If using
LogisticRegression::MINI\_BATCH, set LogisticRegression::Params.mini\_batch\_size to a positive
LogisticRegression::BATCH or LogisticRegression::MINI_BATCH. If using
LogisticRegression::MINI_BATCH, set LogisticRegression::Params.mini_batch_size to a positive
integer.
@param normalization Specifies the kind of regularization to be applied.
LogisticRegression::REG\_L1 or LogisticRegression::REG\_L2 (L1 norm or L2 norm). To use this, set
LogisticRegression::REG_L1 or LogisticRegression::REG_L2 (L1 norm or L2 norm). To use this, set
LogisticRegression::Params.regularized to a integer greater than zero.
@param reg To enable or disable regularization. Set to positive integer (greater than zero) to
enable and to 0 to disable.
@param batch_size Specifies the number of training samples taken in each step of Mini-Batch
Gradient Descent. Will only be used if using LogisticRegression::MINI\_BATCH training algorithm.
Gradient Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm.
It has to take values less than the total number of training samples.
By initializing this structure, one can set all the parameters required for Logistic Regression
@ -2062,8 +2062,8 @@ public:
/** @brief Predicts responses for input samples and returns a float type.
@param samples The input data for the prediction algorithm. Matrix [m x n], where each row
contains variables (features) of one object being classified. Should have data type CV\_32F.
@param results Predicted labels as a column matrix of type CV\_32S.
contains variables (features) of one object being classified. Should have data type CV_32F.
@param results Predicted labels as a column matrix of type CV_32S.
@param flags Not used.
*/
virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
@ -2072,7 +2072,7 @@ public:
/** @brief This function returns the trained paramters arranged across rows.
For a two class classifcation problem, it returns a row matrix.
It returns learnt paramters of the Logistic Regression as a matrix of type CV\_32F.
It returns learnt paramters of the Logistic Regression as a matrix of type CV_32F.
*/
virtual Mat get_learnt_thetas() const = 0;

View File

@ -94,7 +94,7 @@ To see the object detector at work, have a look at the facedetect demo:
<https://github.com/Itseez/opencv/tree/master/samples/cpp/dbt_face_detection.cpp>
The following reference is for the detection part only. There is a separate application called
opencv\_traincascade that can train a cascade of boosted classifiers from a set of samples.
opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
@note In the new C++ interface it is also possible to use LBP (local binary pattern) features in
addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection
@ -246,7 +246,7 @@ public:
/** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
of rectangles.
@param image Matrix of the type CV\_8U containing an image where objects are detected.
@param image Matrix of the type CV_8U containing an image where objects are detected.
@param objects Vector of rectangles where each rectangle contains the detected object, the
rectangles may be partially outside the original image.
@param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
@ -261,7 +261,7 @@ public:
@note
- (Python) A face detection example using cascade classifiers can be found at
opencv\_source\_code/samples/python2/facedetect.py
opencv_source_code/samples/python2/facedetect.py
*/
CV_WRAP void detectMultiScale( InputArray image,
CV_OUT std::vector<Rect>& objects,
@ -271,7 +271,7 @@ public:
Size maxSize = Size() );
/** @overload
@param image Matrix of the type CV\_8U containing an image where objects are detected.
@param image Matrix of the type CV_8U containing an image where objects are detected.
@param objects Vector of rectangles where each rectangle contains the detected object, the
rectangles may be partially outside the original image.
@param numDetections Vector of detection numbers for the corresponding objects. An object's number

View File

@ -96,8 +96,8 @@ needs to be inpainted.
@param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
by the algorithm.
@param flags Inpainting method that could be one of the following:
- **INPAINT\_NS** Navier-Stokes based method [Navier01]
- **INPAINT\_TELEA** Method by Alexandru Telea @cite Telea04.
- **INPAINT_NS** Navier-Stokes based method [Navier01]
- **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04.
The function reconstructs the selected image area from the pixel near the area boundary. The
function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
@ -105,9 +105,9 @@ objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting
@note
- An example using the inpainting technique can be found at
opencv\_source\_code/samples/cpp/inpaint.cpp
opencv_source_code/samples/cpp/inpaint.cpp
- (Python) An example using the inpainting technique can be found at
opencv\_source\_code/samples/python2/inpaint.py
opencv_source_code/samples/python2/inpaint.py
*/
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
OutputArray dst, double inpaintRadius, int flags );
@ -347,8 +347,8 @@ public:
@param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
are maximum and minimum luminance values of the resulting image.
@param saturation saturation enhancement value. See createTonemapDrago
@param sigma\_space bilateral filter sigma in color space
@param sigma\_color bilateral filter sigma in coordinate space
@param sigma_space bilateral filter sigma in color space
@param sigma_color bilateral filter sigma in coordinate space
*/
CV_EXPORTS_W Ptr<TonemapDurand>
createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_space = 2.0f, float sigma_color = 2.0f);
@ -377,9 +377,9 @@ public:
@param gamma gamma value for gamma correction. See createTonemap
@param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
@param light\_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
@param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
value, if 0 it's global, otherwise it's a weighted mean of this two cases.
@param color\_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
@param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
if 0 adaptation level is the same for each channel.
*/
CV_EXPORTS_W Ptr<TonemapReinhard>
@ -484,9 +484,9 @@ public:
/** @brief Creates AlignMTB object
@param max\_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
@param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
usually good enough (31 and 63 pixels shift respectively).
@param exclude\_range range for exclusion bitmap that is constructed to suppress noise around the
@param exclude_range range for exclusion bitmap that is constructed to suppress noise around the
median value.
@param cut if true cuts images, otherwise fills the new regions with zeros.
*/
@ -554,7 +554,7 @@ public:
/** @brief Creates CalibrateRobertson object
@param max\_iter maximal number of Gauss-Seidel solver iterations.
@param max_iter maximal number of Gauss-Seidel solver iterations.
@param threshold target difference between results of two successive steps of the minimization.
*/
CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
@ -628,9 +628,9 @@ public:
/** @brief Creates MergeMertens object
@param contrast\_weight contrast measure weight. See MergeMertens.
@param saturation\_weight saturation measure weight
@param exposure\_weight well-exposedness measure weight
@param contrast_weight contrast measure weight. See MergeMertens.
@param saturation_weight saturation measure weight
@param exposure_weight well-exposedness measure weight
*/
CV_EXPORTS_W Ptr<MergeMertens>
createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f);
@ -660,7 +660,7 @@ black-and-white photograph rendering, and in many single channel image processin
@param src Input 8-bit 3-channel image.
@param grayscale Output 8-bit 1-channel image.
@param color\_boost Output 8-bit 3-channel image.
@param color_boost Output 8-bit 3-channel image.
This function is to be applied on color images.
*/
@ -681,13 +681,13 @@ content @cite PM03.
@param p Point in dst image where object is placed.
@param blend Output image with the same size and type as dst.
@param flags Cloning method that could be one of the following:
- **NORMAL\_CLONE** The power of the method is fully expressed when inserting objects with
- **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with
complex outlines into a new background
- **MIXED\_CLONE** The classic method, color-based selection and alpha masking might be time
- **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time
consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the
original image, is not effective. Mixed seamless cloning based on a loose selection proves
effective.
- **FEATURE\_EXCHANGE** Feature exchange allows the user to easily replace certain features of
- **FEATURE_EXCHANGE** Feature exchange allows the user to easily replace certain features of
one object by alternative features.
*/
CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
@ -699,9 +699,9 @@ seamlessly.
@param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src .
@param red\_mul R-channel multiply factor.
@param green\_mul G-channel multiply factor.
@param blue\_mul B-channel multiply factor.
@param red_mul R-channel multiply factor.
@param green_mul G-channel multiply factor.
@param blue_mul B-channel multiply factor.
Multiplication factor is between .5 to 2.5.
*/
@ -729,9 +729,9 @@ Detector is used.
@param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src.
@param low\_threshold Range from 0 to 100.
@param high\_threshold Value \> 100.
@param kernel\_size The size of the Sobel kernel to be used.
@param low_threshold Range from 0 to 100.
@param high_threshold Value \> 100.
@param kernel_size The size of the Sobel kernel to be used.
**NOTE:**
@ -754,10 +754,10 @@ filters are used in many different applications @cite EM11.
@param src Input 8-bit 3-channel image.
@param dst Output 8-bit 3-channel image.
@param flags Edge preserving filters:
- **RECURS\_FILTER** = 1
- **NORMCONV\_FILTER** = 2
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
- **RECURS_FILTER** = 1
- **NORMCONV_FILTER** = 2
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
*/
CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
float sigma_s = 60, float sigma_r = 0.4f);
@ -766,8 +766,8 @@ CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flag
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
*/
CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
float sigma_r = 0.15f);
@ -777,9 +777,9 @@ CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s =
@param src Input 8-bit 3-channel image.
@param dst1 Output 8-bit 1-channel image.
@param dst2 Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
@param shade\_factor Range between 0 to 0.1.
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
@param shade_factor Range between 0 to 0.1.
*/
CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
@ -790,8 +790,8 @@ contrast while preserving, or enhancing, high-contrast features.
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
*/
CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
float sigma_r = 0.45f);

View File

@ -52,13 +52,13 @@ namespace cv { namespace cuda {
/** @brief Performs pure non local means denoising without any simplification, and thus it is not fast.
@param src Source image. Supports only CV\_8UC1, CV\_8UC2 and CV\_8UC3.
@param src Source image. Supports only CV_8UC1, CV_8UC2 and CV_8UC3.
@param dst Destination image.
@param h Filter sigma regulating filter strength for color.
@param search\_window Size of search window.
@param block\_size Size of block used for computing weights.
@param borderMode Border type. See borderInterpolate for details. BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param search_window Size of search window.
@param block_size Size of block used for computing weights.
@param borderMode Border type. See borderInterpolate for details. BORDER_REFLECT101 ,
BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now.
@param s Stream for the asynchronous version.
@sa
@ -79,10 +79,10 @@ public:
@param dst Output image with the same size and type as src .
@param h Parameter regulating filter strength. Big h value perfectly removes noise but also
removes image details, smaller h value preserves details but also preserves some noise
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
@param search_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
@param block_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.
@ -98,14 +98,14 @@ public:
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src .
@param h\_luminance Parameter regulating filter strength. Big h value perfectly removes noise but
@param h_luminance Parameter regulating filter strength. Big h value perfectly removes noise but
also removes image details, smaller h value preserves details but also preserves some noise
@param photo_render float The same as h but for color components. For most images value equals 10 will be
enought to remove colored noise and do not distort colors
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
@param search_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
@param block_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.

View File

@ -200,7 +200,7 @@ public:
/** @brief Set the norm used to compute the Hausdorff value between two shapes. It can be L1 or L2 norm.
@param distanceFlag Flag indicating which norm is used to compute the Hausdorff distance
(NORM\_L1, NORM\_L2).
(NORM_L1, NORM_L2).
*/
CV_WRAP virtual void setDistanceFlag(int distanceFlag) = 0;
CV_WRAP virtual int getDistanceFlag() const = 0;

View File

@ -89,9 +89,9 @@ familiar with the theory is recommended.
@note
- A basic example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching.cpp
opencv_source_code/samples/cpp/stitching.cpp
- A detailed example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching\_detailed.cpp
opencv_source_code/samples/cpp/stitching_detailed.cpp
*/
class CV_EXPORTS_W Stitcher
{
@ -108,7 +108,7 @@ public:
// Stitcher() {}
/** @brief Creates a stitcher with the default parameters.
@param try\_use\_gpu Flag indicating whether GPU should be used whenever it's possible.
@param try_use_gpu Flag indicating whether GPU should be used whenever it's possible.
@return Stitcher class instance.
*/
static Stitcher createDefault(bool try_use_gpu = false);

View File

@ -58,8 +58,8 @@ undergoes rotations around its centre only.
@param H Homography.
@param f0 Estimated focal length along X axis.
@param f1 Estimated focal length along Y axis.
@param f0\_ok True, if f0 was estimated successfully, false otherwise.
@param f1\_ok True, if f1 was estimated successfully, false otherwise.
@param f0_ok True, if f0 was estimated successfully, false otherwise.
@param f1_ok True, if f1 was estimated successfully, false otherwise.
See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
by Heung-Yeung Shum and Richard Szeliski.
@ -69,7 +69,7 @@ void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool
/** @brief Estimates focal lengths for each given camera.
@param features Features of images.
@param pairwise\_matches Matches between all image pairs.
@param pairwise_matches Matches between all image pairs.
@param focals Estimated focal lengths for each camera.
*/
void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,

View File

@ -81,7 +81,7 @@ public:
/** @brief Blends and returns the final pano.
@param dst Final pano
@param dst\_mask Final pano mask
@param dst_mask Final pano mask
*/
virtual void blend(InputOutputArray dst, InputOutputArray dst_mask);

View File

@ -80,7 +80,7 @@ public:
@param features Found features
@param rois Regions of interest
@sa detail::ImageFeatures, Rect\_
@sa detail::ImageFeatures, Rect_
*/
void operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
/** @brief Frees unused memory allocated before if there is any. */
@ -88,7 +88,7 @@ public:
protected:
/** @brief This method must implement features finding logic in order to make the wrappers
detail::FeaturesFinder::operator()\_ work.
detail::FeaturesFinder::operator()_ work.
@param image Source image
@param features Found features
@ -181,7 +181,7 @@ public:
/** @overload
@param features1 First image features
@param features2 Second image features
@param matches\_info Found matches
@param matches_info Found matches
*/
void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) { match(features1, features2, matches_info); }
@ -189,7 +189,7 @@ public:
/** @brief Performs images matching.
@param features Features of the source images
@param pairwise\_matches Found pairwise matches
@param pairwise_matches Found pairwise matches
@param mask Mask indicating which image pairs must be matched
The function is parallelized with the TBB library.
@ -211,11 +211,11 @@ protected:
FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}
/** @brief This method must implement matching logic in order to make the wrappers
detail::FeaturesMatcher::operator()\_ work.
detail::FeaturesMatcher::operator()_ work.
@param features1 first image features
@param features2 second image features
@param matches\_info found matches
@param matches_info found matches
*/
virtual void match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) = 0;
@ -224,7 +224,7 @@ protected:
};
/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match\_conf
ratio between descriptor distances is greater than the threshold match_conf
@sa detail::FeaturesMatcher
*/
@ -233,11 +233,11 @@ class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher
public:
/** @brief Constructs a "best of 2 nearest" matcher.
@param try\_use\_gpu Should try to use GPU or not
@param match\_conf Match distances ration threshold
@param num\_matches\_thresh1 Minimum number of matches required for the 2D projective transform
@param try_use_gpu Should try to use GPU or not
@param match_conf Match distances ration threshold
@param num_matches_thresh1 Minimum number of matches required for the 2D projective transform
estimation used in the inliers classification step
@param num\_matches\_thresh2 Minimum number of matches required for the 2D projective transform
@param num_matches_thresh2 Minimum number of matches required for the 2D projective transform
re-estimation on inliers
*/
BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,

View File

@ -70,7 +70,7 @@ public:
/** @brief Estimates camera parameters.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
@ -81,10 +81,10 @@ public:
protected:
/** @brief This method must implement camera parameters estimation logic in order to make the wrapper
detail::Estimator::operator()\_ work.
detail::Estimator::operator()_ work.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
@ -130,8 +130,8 @@ public:
protected:
/** @brief Construct a bundle adjuster base instance.
@param num\_params\_per\_cam Number of parameters per camera
@param num\_errs\_per\_measurement Number of error terms (components) per match
@param num_params_per_cam Number of parameters per camera
@param num_errs_per_measurement Number of error terms (components) per match
*/
BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)
: num_params_per_cam_(num_params_per_cam),
@ -159,13 +159,13 @@ protected:
virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0;
/** @brief Calculates error vector.
@param err Error column-vector of length total\_num\_matches \* num\_errs\_per\_measurement
@param err Error column-vector of length total_num_matches \* num_errs_per_measurement
*/
virtual void calcError(Mat &err) = 0;
/** @brief Calculates the cost function jacobian.
@param jac Jacobian matrix of dimensions
(total\_num\_matches \* num\_errs\_per\_measurement) x (num\_images \* num\_params\_per\_cam)
(total_num_matches \* num_errs_per_measurement) x (num_images \* num_params_per_cam)
*/
virtual void calcJacobian(Mat &jac) = 0;

View File

@ -72,7 +72,7 @@ public:
/** @brief Builds the projection maps according to the given camera data.
@param src\_size Source image size
@param src_size Source image size
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
@ -86,8 +86,8 @@ public:
@param src Source image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
@ -99,16 +99,16 @@ public:
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param dst\_size Backward-projected image size
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst_size Backward-projected image size
@param dst Backward-projected image
*/
virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst) = 0;
/**
@param src\_size Source image bounding box
@param src_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
@ -135,7 +135,7 @@ struct CV_EXPORTS ProjectorBase
float t[3];
};
/** @brief Base class for rotation-based warper using a detail::ProjectorBase\_ derived class.
/** @brief Base class for rotation-based warper using a detail::ProjectorBase_ derived class.
*/
template <class P>
class CV_EXPORTS RotationWarperBase : public RotationWarper

View File

@ -74,7 +74,7 @@ See the OpenCV sample camshiftdemo.c that tracks colored objects.
@note
- (Python) A sample explaining the camshift tracking algorithm can be found at
opencv\_source\_code/samples/python2/camshift.py
opencv_source_code/samples/python2/camshift.py
*/
CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window,
TermCriteria criteria );
@ -98,7 +98,7 @@ with findContours , throwing away contours with small area ( contourArea ), and
remaining contours with drawContours.
@note
- A mean-shift tracking sample can be found at opencv\_source\_code/samples/cpp/camshiftdemo.cpp
- A mean-shift tracking sample can be found at opencv_source_code/samples/cpp/camshiftdemo.cpp
*/
CV_EXPORTS_W int meanShift( InputArray probImage, CV_IN_OUT Rect& window, TermCriteria criteria );
@ -132,7 +132,7 @@ pyramids.
single-precision floating-point numbers.
@param nextPts output vector of 2D points (with single-precision floating-point coordinates)
containing the calculated new positions of input features in the second image; when
OPTFLOW\_USE\_INITIAL\_FLOW flag is passed, the vector must have the same size as in the input.
OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
@param status output status vector (of unsigned chars); each element of the vector is set to 1 if
the flow for the corresponding features has been found, otherwise, it is set to 0.
@param err output vector of errors; each element of the vector is set to an error for the
@ -146,9 +146,9 @@ algorithm will use as many levels as pyramids have but no more than maxLevel.
(after the specified maximum number of iterations criteria.maxCount or when the search window
moves by less than criteria.epsilon.
@param flags operation flags:
- **OPTFLOW\_USE\_INITIAL\_FLOW** uses initial estimations, stored in nextPts; if the flag is
- **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
not set, then prevPts is copied to nextPts and is considered the initial estimate.
- **OPTFLOW\_LK\_GET\_MIN\_EIGENVALS** use minimum eigen values as an error measure (see
- **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
minEigThreshold description); if the flag is not set, then L1 distance between patches
around the original and a moved point, divided by number of pixels in a window, is used as a
error measure.
@ -164,11 +164,11 @@ The function implements a sparse iterative version of the Lucas-Kanade optical f
@note
- An example using the Lucas-Kanade optical flow algorithm can be found at
opencv\_source\_code/samples/cpp/lkdemo.cpp
opencv_source_code/samples/cpp/lkdemo.cpp
- (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
opencv\_source\_code/samples/python2/lk\_track.py
opencv_source_code/samples/python2/lk_track.py
- (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
opencv\_source\_code/samples/python2/lk\_homography.py
opencv_source_code/samples/python2/lk_homography.py
*/
CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,
InputArray prevPts, InputOutputArray nextPts,
@ -181,24 +181,24 @@ CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,
@param prev first 8-bit single-channel input image.
@param next second input image of the same size and the same type as prev.
@param flow computed flow image that has the same size as prev and type CV\_32FC2.
@param pyr\_scale parameter, specifying the image scale (\<1) to build pyramids for each image;
pyr\_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
@param flow computed flow image that has the same size as prev and type CV_32FC2.
@param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image;
pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
one.
@param levels number of pyramid layers including the initial image; levels=1 means that no extra
layers are created and only the original images are used.
@param winsize averaging window size; larger values increase the algorithm robustness to image
noise and give more chances for fast motion detection, but yield more blurred motion field.
@param iterations number of iterations the algorithm does at each pyramid level.
@param poly\_n size of the pixel neighborhood used to find polynomial expansion in each pixel;
@param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel;
larger values mean that the image will be approximated with smoother surfaces, yielding more
robust algorithm and more blurred motion field, typically poly\_n =5 or 7.
@param poly\_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
basis for the polynomial expansion; for poly\_n=5, you can set poly\_sigma=1.1, for poly\_n=7, a
good value would be poly\_sigma=1.5.
robust algorithm and more blurred motion field, typically poly_n =5 or 7.
@param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a
good value would be poly_sigma=1.5.
@param flags operation flags that can be a combination of the following:
- **OPTFLOW\_USE\_INITIAL\_FLOW** uses the input flow as an initial flow approximation.
- **OPTFLOW\_FARNEBACK\_GAUSSIAN** uses the Gaussian \f$\texttt{winsize}\times\texttt{winsize}\f$
- **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation.
- **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian \f$\texttt{winsize}\times\texttt{winsize}\f$
filter instead of a box filter of the same size for optical flow estimation; usually, this
option gives z more accurate flow than with a box filter, at the cost of lower speed;
normally, winsize for a Gaussian window should be set to a larger value to achieve the same
@ -211,9 +211,9 @@ The function finds an optical flow for each prev pixel using the @cite Farneback
@note
- An example using the optical flow algorithm described by Gunnar Farneback can be found at
opencv\_source\_code/samples/cpp/fback.cpp
opencv_source_code/samples/cpp/fback.cpp
- (Python) An example using the optical flow algorithm described by Gunnar Farneback can be
found at opencv\_source\_code/samples/python2/opt\_flow.py
found at opencv_source_code/samples/python2/opt_flow.py
*/
CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, InputOutputArray flow,
double pyr_scale, int levels, int winsize,
@ -260,19 +260,19 @@ enum
/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08.
@param templateImage single-channel template image; CV\_8U or CV\_32F array.
@param templateImage single-channel template image; CV_8U or CV_32F array.
@param inputImage single-channel input image which should be warped with the final warpMatrix in
order to provide an image similar to templateImage, same type as temlateImage.
@param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp).
@param motionType parameter, specifying the type of motion:
- **MOTION\_TRANSLATION** sets a translational motion model; warpMatrix is \f$2\times 3\f$ with
- **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is \f$2\times 3\f$ with
the first \f$2\times 2\f$ part being the unity matrix and the rest two parameters being
estimated.
- **MOTION\_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three
- **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three
parameters are estimated; warpMatrix is \f$2\times 3\f$.
- **MOTION\_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated;
- **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated;
warpMatrix is \f$2\times 3\f$.
- **MOTION\_HOMOGRAPHY** sets a homography as a motion model; eight parameters are
- **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are
estimated;\`warpMatrix\` is \f$3\times 3\f$.
@param criteria parameter, specifying the termination criteria of the ECC algorithm;
criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
@ -300,8 +300,8 @@ warp (unity matrix) should be given as input. Note that if images undergo strong
displacements/rotations, an initial transformation that roughly aligns the images is necessary
(e.g., a simple euclidean/similarity transform that allows for the images showing the same image
content approximately). Use inverse warping in the second image to take an image close to the first
one, i.e. use the flag WARP\_INVERSE\_MAP with warpAffine or warpPerspective. See also the OpenCV
sample image\_alignment.cpp that demonstrates the use of the function. Note that the function throws
one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
an exception if algorithm does not converges.
@sa
@ -320,7 +320,7 @@ an extended Kalman filter functionality. See the OpenCV sample kalman.cpp.
@note
- An example using the standard Kalman filter can be found at
opencv\_source\_code/samples/cpp/kalman.cpp
opencv_source_code/samples/cpp/kalman.cpp
*/
class CV_EXPORTS_W KalmanFilter
{
@ -335,7 +335,7 @@ public:
@param dynamParams Dimensionality of the state.
@param measureParams Dimensionality of the measurement.
@param controlParams Dimensionality of the control vector.
@param type Type of the created matrices that should be CV\_32F or CV\_64F.
@param type Type of the created matrices that should be CV_32F or CV_64F.
*/
CV_WRAP KalmanFilter( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );
@ -344,7 +344,7 @@ public:
@param dynamParams Dimensionalityensionality of the state.
@param measureParams Dimensionality of the measurement.
@param controlParams Dimensionality of the control vector.
@param type Type of the created matrices that should be CV\_32F or CV\_64F.
@param type Type of the created matrices that should be CV_32F or CV_64F.
*/
void init( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );
@ -429,7 +429,7 @@ public:
@param I0 first 8-bit single-channel input image.
@param I1 second input image of the same size and the same type as prev.
@param flow computed flow image that has the same size as prev and type CV\_32FC2.
@param flow computed flow image that has the same size as prev and type CV_32FC2.
*/
CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;
/** @brief Releases all inner buffers.

View File

@ -390,15 +390,15 @@ class can be used: :
@note
- A basic sample on using the VideoCapture interface can be found at
opencv\_source\_code/samples/cpp/starter\_video.cpp
opencv_source_code/samples/cpp/starter_video.cpp
- Another basic video processing sample can be found at
opencv\_source\_code/samples/cpp/video\_dmtx.cpp
opencv_source_code/samples/cpp/video_dmtx.cpp
- (Python) A basic sample on using the VideoCapture interface can be found at
opencv\_source\_code/samples/python2/video.py
opencv_source_code/samples/python2/video.py
- (Python) Another basic video processing sample can be found at
opencv\_source\_code/samples/python2/video\_dmtx.py
opencv_source_code/samples/python2/video_dmtx.py
- (Python) A multi threaded video processing sample can be found at
opencv\_source\_code/samples/python2/video\_threaded.py
opencv_source_code/samples/python2/video_threaded.py
*/
class CV_EXPORTS_W VideoCapture
{
@ -412,7 +412,7 @@ public:
/** @overload
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img\_%02d.jpg, which will read samples like img\_00.jpg, img\_01.jpg, img\_02.jpg, ...)
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
*/
CV_WRAP VideoCapture(const String& filename);
@ -427,7 +427,7 @@ public:
/** @brief Open video file or a capturing device for video capturing
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img\_%02d.jpg, which will read samples like img\_00.jpg, img\_01.jpg, img\_02.jpg, ...)
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
The methods first call VideoCapture::release to close the already opened file or camera.
*/
@ -502,27 +502,27 @@ public:
/** @brief Sets a property in the VideoCapture.
@param propId Property identifier. It can be one of the following:
- **CV\_CAP\_PROP\_POS\_MSEC** Current position of the video file in milliseconds.
- **CV\_CAP\_PROP\_POS\_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV\_CAP\_PROP\_POS\_AVI\_RATIO** Relative position of the video file: 0 - start of the
- **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds.
- **CV_CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV_CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
film, 1 - end of the film.
- **CV\_CAP\_PROP\_FRAME\_WIDTH** Width of the frames in the video stream.
- **CV\_CAP\_PROP\_FRAME\_HEIGHT** Height of the frames in the video stream.
- **CV\_CAP\_PROP\_FPS** Frame rate.
- **CV\_CAP\_PROP\_FOURCC** 4-character code of codec.
- **CV\_CAP\_PROP\_FRAME\_COUNT** Number of frames in the video file.
- **CV\_CAP\_PROP\_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV\_CAP\_PROP\_MODE** Backend-specific value indicating the current capture mode.
- **CV\_CAP\_PROP\_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV\_CAP\_PROP\_CONTRAST** Contrast of the image (only for cameras).
- **CV\_CAP\_PROP\_SATURATION** Saturation of the image (only for cameras).
- **CV\_CAP\_PROP\_HUE** Hue of the image (only for cameras).
- **CV\_CAP\_PROP\_GAIN** Gain of the image (only for cameras).
- **CV\_CAP\_PROP\_EXPOSURE** Exposure (only for cameras).
- **CV\_CAP\_PROP\_CONVERT\_RGB** Boolean flags indicating whether images should be converted
- **CV_CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- **CV_CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- **CV_CAP_PROP_FPS** Frame rate.
- **CV_CAP_PROP_FOURCC** 4-character code of codec.
- **CV_CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- **CV_CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV_CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- **CV_CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV_CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- **CV_CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- **CV_CAP_PROP_HUE** Hue of the image (only for cameras).
- **CV_CAP_PROP_GAIN** Gain of the image (only for cameras).
- **CV_CAP_PROP_EXPOSURE** Exposure (only for cameras).
- **CV_CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
to RGB.
- **CV\_CAP\_PROP\_WHITE\_BALANCE** Currently unsupported
- **CV\_CAP\_PROP\_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
- **CV_CAP_PROP_WHITE_BALANCE** Currently unsupported
- **CV_CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
by DC1394 v 2.x backend currently)
@param value Value of the property.
*/
@ -531,28 +531,28 @@ public:
/** @brief Returns the specified VideoCapture property
@param propId Property identifier. It can be one of the following:
- **CV\_CAP\_PROP\_POS\_MSEC** Current position of the video file in milliseconds or video
- **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video
capture timestamp.
- **CV\_CAP\_PROP\_POS\_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV\_CAP\_PROP\_POS\_AVI\_RATIO** Relative position of the video file: 0 - start of the
- **CV_CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CV_CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
film, 1 - end of the film.
- **CV\_CAP\_PROP\_FRAME\_WIDTH** Width of the frames in the video stream.
- **CV\_CAP\_PROP\_FRAME\_HEIGHT** Height of the frames in the video stream.
- **CV\_CAP\_PROP\_FPS** Frame rate.
- **CV\_CAP\_PROP\_FOURCC** 4-character code of codec.
- **CV\_CAP\_PROP\_FRAME\_COUNT** Number of frames in the video file.
- **CV\_CAP\_PROP\_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV\_CAP\_PROP\_MODE** Backend-specific value indicating the current capture mode.
- **CV\_CAP\_PROP\_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV\_CAP\_PROP\_CONTRAST** Contrast of the image (only for cameras).
- **CV\_CAP\_PROP\_SATURATION** Saturation of the image (only for cameras).
- **CV\_CAP\_PROP\_HUE** Hue of the image (only for cameras).
- **CV\_CAP\_PROP\_GAIN** Gain of the image (only for cameras).
- **CV\_CAP\_PROP\_EXPOSURE** Exposure (only for cameras).
- **CV\_CAP\_PROP\_CONVERT\_RGB** Boolean flags indicating whether images should be converted
- **CV_CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- **CV_CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- **CV_CAP_PROP_FPS** Frame rate.
- **CV_CAP_PROP_FOURCC** 4-character code of codec.
- **CV_CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- **CV_CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- **CV_CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- **CV_CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- **CV_CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- **CV_CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- **CV_CAP_PROP_HUE** Hue of the image (only for cameras).
- **CV_CAP_PROP_GAIN** Gain of the image (only for cameras).
- **CV_CAP_PROP_EXPOSURE** Exposure (only for cameras).
- **CV_CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
to RGB.
- **CV\_CAP\_PROP\_WHITE\_BALANCE** Currently not supported
- **CV\_CAP\_PROP\_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
- **CV_CAP_PROP_WHITE_BALANCE** Currently not supported
- **CV_CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
by DC1394 v 2.x backend currently)
**Note**: When querying a property that is not supported by the backend used by the VideoCapture

View File

@ -70,7 +70,7 @@ namespace videostab
@param points0 Source set of 2D points (32F).
@param points1 Destination set of 2D points (32F).
@param model Motion model (up to MM\_AFFINE).
@param model Motion model (up to MM_AFFINE).
@param rmse Final root-mean-square error.
@return 3x3 2D transformation matrix (32F).
*/

View File

@ -155,19 +155,19 @@ namespace cv
Camera(double fx, double fy, double cx, double cy, const Size &window_size);
/** @overload
@param fov Field of view (horizontal, vertical)
@param window\_size Size of the window. Principal point is at the center of the window
@param window_size Size of the window. Principal point is at the center of the window
by default.
*/
explicit Camera(const Vec2d &fov, const Size &window_size);
/** @overload
@param K Intrinsic matrix of the camera.
@param window\_size Size of the window. This together with intrinsic matrix determines
@param window_size Size of the window. This together with intrinsic matrix determines
the field of view.
*/
explicit Camera(const Matx33d &K, const Size &window_size);
/** @overload
@param proj Projection matrix of the camera.
@param window\_size Size of the window. This together with projection matrix determines
@param window_size Size of the window. This together with projection matrix determines
the field of view.
*/
explicit Camera(const Matx44d &proj, const Size &window_size);
@ -192,7 +192,7 @@ namespace cv
/** @brief Creates a Kinect Camera.
@param window\_size Size of the window. This together with intrinsic matrix of a Kinect Camera
@param window_size Size of the window. This together with intrinsic matrix of a Kinect Camera
determines the field of view.
*/
static Camera KinectCamera(const Size &window_size);

View File

@ -73,7 +73,7 @@ namespace cv
/** @brief The constructors.
@param window\_name Name of the window.
@param window_name Name of the window.
*/
Viz3d(const String& window_name = String());
Viz3d(const Viz3d&);
@ -165,13 +165,13 @@ namespace cv
/** @brief Transforms a point in world coordinate system to window coordinate system.
@param pt Point in world coordinate system.
@param window\_coord Output point in window coordinate system.
@param window_coord Output point in window coordinate system.
*/
void convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord);
/** @brief Transforms a point in window coordinate system to a 3D ray in world coordinate system.
@param window\_coord Point in window coordinate system. @param origin Output origin of the ray.
@param window_coord Point in window coordinate system. @param origin Output origin of the ray.
@param direction Output direction of the ray.
*/
void converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction);
@ -181,7 +181,7 @@ namespace cv
Size getWindowSize() const;
/** @brief Sets the size of the window.
@param window\_size New size of the window.
@param window_size New size of the window.
*/
void setWindowSize(const Size &window_size);
@ -251,24 +251,24 @@ namespace cv
@param value The new value of the property.
**Rendering property** can be one of the following:
- **POINT\_SIZE**
- **POINT_SIZE**
- **OPACITY**
- **LINE\_WIDTH**
- **FONT\_SIZE**
- **LINE_WIDTH**
- **FONT_SIZE**
-
**REPRESENTATION**: Expected values are
- **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
- **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
-
**IMMEDIATE\_RENDERING**:
**IMMEDIATE_RENDERING**:
- Turn on immediate rendering by setting the value to 1.
- Turn off immediate rendering by setting the value to 0.
-
**SHADING**: Expected values are
- **SHADING\_FLAT**
- **SHADING\_GOURAUD**
- **SHADING\_PHONG**
- **SHADING_FLAT**
- **SHADING_GOURAUD**
- **SHADING_PHONG**
*/
void setRenderingProperty(const String &id, int property, double value);
/** @brief Returns rendering property of a widget.
@ -277,33 +277,33 @@ namespace cv
@param property Property.
**Rendering property** can be one of the following:
- **POINT\_SIZE**
- **POINT_SIZE**
- **OPACITY**
- **LINE\_WIDTH**
- **FONT\_SIZE**
- **LINE_WIDTH**
- **FONT_SIZE**
-
**REPRESENTATION**: Expected values are
- **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
- **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
-
**IMMEDIATE\_RENDERING**:
**IMMEDIATE_RENDERING**:
- Turn on immediate rendering by setting the value to 1.
- Turn off immediate rendering by setting the value to 0.
-
**SHADING**: Expected values are
- **SHADING\_FLAT**
- **SHADING\_GOURAUD**
- **SHADING\_PHONG**
- **SHADING_FLAT**
- **SHADING_GOURAUD**
- **SHADING_PHONG**
*/
double getRenderingProperty(const String &id, int property);
/** @brief Sets geometry representation of the widgets to surface, wireframe or points.
@param representation Geometry representation which can be one of the following:
- **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
- **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
*/
void setRepresentation(int representation);

View File

@ -60,8 +60,8 @@ namespace cv
/** @brief Takes coordinate frame data and builds transform to global coordinate frame.
@param axis\_x X axis vector in global coordinate frame. @param axis\_y Y axis vector in global
coordinate frame. @param axis\_z Z axis vector in global coordinate frame. @param origin Origin of
@param axis_x X axis vector in global coordinate frame. @param axis_y Y axis vector in global
coordinate frame. @param axis_z Z axis vector in global coordinate frame. @param origin Origin of
the coordinate frame in global coordinate frame.
This function returns affine transform that describes transformation between global coordinate frame
@ -69,11 +69,11 @@ namespace cv
*/
CV_EXPORTS Affine3d makeTransformToGlobal(const Vec3d& axis_x, const Vec3d& axis_y, const Vec3d& axis_z, const Vec3d& origin = Vec3d::all(0));
/** @brief Constructs camera pose from position, focal\_point and up\_vector (see gluLookAt() for more
/** @brief Constructs camera pose from position, focal_point and up_vector (see gluLookAt() for more
infromation).
@param position Position of the camera in global coordinate frame. @param focal\_point Focal point
of the camera in global coordinate frame. @param y\_dir Up vector of the camera in global
@param position Position of the camera in global coordinate frame. @param focal_point Focal point
of the camera in global coordinate frame. @param y_dir Up vector of the camera in global
coordinate frame.
This function returns pose of the camera in global coordinate frame.
@ -82,7 +82,7 @@ namespace cv
/** @brief Retrieves a window by its name.
@param window\_name Name of the window that is to be retrieved.
@param window_name Name of the window that is to be retrieved.
This function returns a Viz3d object with the given name.

View File

@ -97,7 +97,7 @@ namespace cv
/** @brief Creates a widget from ply file.
@param file\_name Ply file name.
@param file_name Ply file name.
*/
static Widget fromPlyFile(const String &file_name);
@ -107,24 +107,24 @@ namespace cv
@param value The new value of the property.
**Rendering property** can be one of the following:
- **POINT\_SIZE**
- **POINT_SIZE**
- **OPACITY**
- **LINE\_WIDTH**
- **FONT\_SIZE**
- **LINE_WIDTH**
- **FONT_SIZE**
-
**REPRESENTATION**: Expected values are
- **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
- **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
-
**IMMEDIATE\_RENDERING**:
**IMMEDIATE_RENDERING**:
- Turn on immediate rendering by setting the value to 1.
- Turn off immediate rendering by setting the value to 0.
-
**SHADING**: Expected values are
- **SHADING\_FLAT**
- **SHADING\_GOURAUD**
- **SHADING\_PHONG**
- **SHADING_FLAT**
- **SHADING_GOURAUD**
- **SHADING_PHONG**
*/
void setRenderingProperty(int property, double value);
/** @brief Returns rendering property of the widget.
@ -132,24 +132,24 @@ namespace cv
@param property Property.
**Rendering property** can be one of the following:
- **POINT\_SIZE**
- **POINT_SIZE**
- **OPACITY**
- **LINE\_WIDTH**
- **FONT\_SIZE**
- **LINE_WIDTH**
- **FONT_SIZE**
-
**REPRESENTATION**: Expected values are
: - **REPRESENTATION\_POINTS**
- **REPRESENTATION\_WIREFRAME**
- **REPRESENTATION\_SURFACE**
: - **REPRESENTATION_POINTS**
- **REPRESENTATION_WIREFRAME**
- **REPRESENTATION_SURFACE**
-
**IMMEDIATE\_RENDERING**:
**IMMEDIATE_RENDERING**:
: - Turn on immediate rendering by setting the value to 1.
- Turn off immediate rendering by setting the value to 0.
-
**SHADING**: Expected values are
: - **SHADING\_FLAT**
- **SHADING\_GOURAUD**
- **SHADING\_PHONG**
: - **SHADING_FLAT**
- **SHADING_GOURAUD**
- **SHADING_PHONG**
*/
double getRenderingProperty(int property) const;
@ -257,7 +257,7 @@ namespace cv
@param center Center of the plane
@param normal Plane normal orientation
@param new\_yaxis Up-vector. New orientation of plane y-axis.
@param new_yaxis Up-vector. New orientation of plane y-axis.
@param size
@param color Color of the plane.
*/
@ -274,7 +274,7 @@ namespace cv
@param center Center of the sphere.
@param radius Radius of the sphere.
@param sphere\_resolution Resolution of the sphere.
@param sphere_resolution Resolution of the sphere.
@param color Color of the sphere.
*/
WSphere(const cv::Point3d &center, double radius, int sphere_resolution = 10, const Color &color = Color::white());
@ -355,8 +355,8 @@ namespace cv
public:
/** @brief Constructs a WCylinder.
@param axis\_point1 A point1 on the axis of the cylinder.
@param axis\_point2 A point2 on the axis of the cylinder.
@param axis_point1 A point1 on the axis of the cylinder.
@param axis_point2 A point2 on the axis of the cylinder.
@param radius Radius of the cylinder.
@param numsides Resolution of the cylinder.
@param color Color of the cylinder.
@ -373,7 +373,7 @@ namespace cv
@param min_point Specifies minimum point of the bounding box.
@param max_point Specifies maximum point of the bounding box.
@param wire\_frame If true, cube is represented as wireframe.
@param wire_frame If true, cube is represented as wireframe.
@param color Color of the cube.
![Cube Widget](images/cube_widget.png)
@ -408,7 +408,7 @@ namespace cv
@param text Text content of the widget.
@param pos Position of the text.
@param font\_size Font size.
@param font_size Font size.
@param color Color of the text.
*/
WText(const String &text, const Point &pos, int font_size = 20, const Color &color = Color::white());
@ -432,8 +432,8 @@ namespace cv
@param text Text content of the widget.
@param position Position of the text.
@param text\_scale Size of the text.
@param face\_camera If true, text always faces the camera.
@param text_scale Size of the text.
@param face_camera If true, text always faces the camera.
@param color Color of the text.
*/
WText3D(const String &text, const Point3d &position, double text_scale = 1., bool face_camera = true, const Color &color = Color::white());
@ -485,7 +485,7 @@ namespace cv
@param size Size of the image.
@param center Position of the image.
@param normal Normal of the plane that represents the image.
@param up\_vector Determines orientation of the image.
@param up_vector Determines orientation of the image.
*/
WImage3D(InputArray image, const Size2d &size, const Vec3d &center, const Vec3d &normal, const Vec3d &up_vector);
@ -519,7 +519,7 @@ namespace cv
/** @brief Constructs a WGrid.
@param cells Number of cell columns and rows, respectively.
@param cells\_spacing Size of each cell, respectively.
@param cells_spacing Size of each cell, respectively.
@param color Color of the grid.
*/
WGrid(const Vec2i &cells = Vec2i::all(10), const Vec2d &cells_spacing = Vec2d::all(1.0), const Color &color = Color::white());
@ -600,7 +600,7 @@ namespace cv
/** @brief Constructs a WTrajectory.
@param path List of poses on a trajectory. Takes std::vector\<Affine\<T\>\> with T == [float | double]
@param display\_mode Display mode. This can be PATH, FRAMES, and BOTH.
@param display_mode Display mode. This can be PATH, FRAMES, and BOTH.
@param scale Scale of the frames. Polyline is not affected.
@param color Color of the polyline that represents path.
@ -652,7 +652,7 @@ namespace cv
/** @brief Constructs a WTrajectorySpheres.
@param path List of poses on a trajectory. Takes std::vector\<Affine\<T\>\> with T == [float | double]
@param line\_length Max length of the lines which point to previous position
@param line_length Max length of the lines which point to previous position
@param radius Radius of the spheres.
@param from Color for first sphere.
@param to Color for last sphere. Intermediate spheres will have interpolated color.
@ -673,7 +673,7 @@ namespace cv
public:
/** @brief Constructs a WCloud.
@param cloud Set of points which can be of type: CV\_32FC3, CV\_32FC4, CV\_64FC3, CV\_64FC4.
@param cloud Set of points which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.
@param colors Set of colors. It has to be of the same size with cloud.
Points in the cloud belong to mask when they are set to (NaN, NaN, NaN).
@ -681,7 +681,7 @@ namespace cv
WCloud(InputArray cloud, InputArray colors);
/** @brief Constructs a WCloud.
@param cloud Set of points which can be of type: CV\_32FC3, CV\_32FC4, CV\_64FC3, CV\_64FC4.
@param cloud Set of points which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.
@param color A single Color for the whole cloud.
Points in the cloud belong to mask when they are set to (NaN, NaN, NaN).
@ -689,7 +689,7 @@ namespace cv
WCloud(InputArray cloud, const Color &color = Color::white());
/** @brief Constructs a WCloud.
@param cloud Set of points which can be of type: CV\_32FC3, CV\_32FC4, CV\_64FC3, CV\_64FC4.
@param cloud Set of points which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.
@param colors Set of colors. It has to be of the same size with cloud.
@param normals Normals for each point in cloud. Size and type should match with the cloud parameter.
@ -698,7 +698,7 @@ namespace cv
WCloud(InputArray cloud, InputArray colors, InputArray normals);
/** @brief Constructs a WCloud.
@param cloud Set of points which can be of type: CV\_32FC3, CV\_32FC4, CV\_64FC3, CV\_64FC4.
@param cloud Set of points which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.
@param color A single Color for the whole cloud.
@param normals Normals for each point in cloud.
@ -731,14 +731,14 @@ namespace cv
/** @brief Adds a cloud to the collection.
@param cloud Point set which can be of type: CV\_32FC3, CV\_32FC4, CV\_64FC3, CV\_64FC4.
@param cloud Point set which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.
@param colors Set of colors. It has to be of the same size with cloud.
@param pose Pose of the cloud. Points in the cloud belong to mask when they are set to (NaN, NaN, NaN).
*/
void addCloud(InputArray cloud, InputArray colors, const Affine3d &pose = Affine3d::Identity());
/** @brief Adds a cloud to the collection.
@param cloud Point set which can be of type: CV\_32FC3, CV\_32FC4, CV\_64FC3, CV\_64FC4.
@param cloud Point set which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.
@param color A single Color for the whole cloud.
@param pose Pose of the cloud. Points in the cloud belong to mask when they are set to (NaN, NaN, NaN).
*/
@ -757,7 +757,7 @@ namespace cv
public:
/** @brief Constructs a WCloudNormals.
@param cloud Point set which can be of type: CV\_32FC3, CV\_32FC4, CV\_64FC3, CV\_64FC4.
@param cloud Point set which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.
@param normals A set of normals that has to be of same type with cloud.
@param level Display only every level th normal.
@param scale Scale of the arrows that represent normals.