mirror of
https://github.com/opencv/opencv.git
synced 2024-11-23 18:50:21 +08:00
Doxygen documentation: BiB references and fixes
This commit is contained in:
parent
1523fdcc1c
commit
03e213ccae
@ -166,20 +166,27 @@ if(BUILD_DOCS AND HAVE_DOXYGEN)
|
||||
set(paths_include)
|
||||
set(paths_doc)
|
||||
set(paths_bib)
|
||||
set(deps)
|
||||
foreach(m ${BASE_MODULES} ${EXTRA_MODULES})
|
||||
list(FIND blacklist ${m} _pos)
|
||||
if(${_pos} EQUAL -1)
|
||||
# include folder
|
||||
set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include")
|
||||
if(EXISTS "${header_dir}")
|
||||
list(APPEND paths_include "${header_dir}")
|
||||
list(APPEND deps ${header_dir})
|
||||
endif()
|
||||
# doc folder
|
||||
set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
|
||||
if(EXISTS "${docs_dir}")
|
||||
list(APPEND paths_doc "${docs_dir}")
|
||||
file(GLOB bib_file "${docs_dir}" "*.bib")
|
||||
if(EXISTS "${bib_file}")
|
||||
list(APPEND paths_bib "${bib_file}")
|
||||
endif()
|
||||
list(APPEND deps ${docs_dir})
|
||||
endif()
|
||||
# BiBTeX file
|
||||
set(bib_file "${docs_dir}/${m}.bib")
|
||||
if(EXISTS "${bib_file}")
|
||||
set(paths_bib "${paths_bib} ${bib_file}")
|
||||
list(APPEND deps ${bib_file})
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
@ -204,10 +211,11 @@ if(BUILD_DOCS AND HAVE_DOXYGEN)
|
||||
configure_file(Doxyfile.in ${doxyfile} @ONLY)
|
||||
configure_file(root.markdown.in ${rootfile} @ONLY)
|
||||
configure_file(mymath.sty "${CMAKE_DOXYGEN_OUTPUT_PATH}/html/mymath.sty" @ONLY)
|
||||
configure_file(mymath.sty "${CMAKE_DOXYGEN_OUTPUT_PATH}/latex/mymath.sty" @ONLY)
|
||||
|
||||
add_custom_target(doxygen
|
||||
COMMAND ${DOXYGEN_BUILD} ${doxyfile}
|
||||
DEPENDS ${doxyfile} ${all_headers} ${all_images})
|
||||
DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps})
|
||||
endif()
|
||||
|
||||
if(HAVE_DOC_GENERATOR)
|
||||
|
2
doc/disabled_doc_warnings.txt
Normal file
2
doc/disabled_doc_warnings.txt
Normal file
@ -0,0 +1,2 @@
|
||||
# doxygen citelist build workaround
|
||||
citelist : .*Unexpected new line character.*
|
1179
doc/opencv.bib
1179
doc/opencv.bib
File diff suppressed because it is too large
Load Diff
@ -343,7 +343,7 @@ and a rotation matrix.
|
||||
It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
|
||||
degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
|
||||
sequence of rotations about the three principle axes that results in the same orientation of an
|
||||
object, eg. see @cite Slabaugh. Returned tree rotation matrices and corresponding three Euler angules
|
||||
object, eg. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angules
|
||||
are only one of the possible solutions.
|
||||
*/
|
||||
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
|
||||
@ -368,7 +368,7 @@ matrix and the position of a camera.
|
||||
|
||||
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
|
||||
be used in OpenGL. Note, there is always more than one sequence of rotations about the three
|
||||
principle axes that results in the same orientation of an object, eg. see @cite Slabaugh. Returned
|
||||
principle axes that results in the same orientation of an object, eg. see @cite Slabaugh . Returned
|
||||
tree rotation matrices and corresponding three Euler angules are only one of the possible solutions.
|
||||
|
||||
The function is based on RQDecomp3x3 .
|
||||
@ -745,7 +745,7 @@ supplied distCoeffs matrix is used. Otherwise, it is set to 0.
|
||||
@param criteria Termination criteria for the iterative optimization algorithm.
|
||||
|
||||
The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
|
||||
views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT. The coordinates of 3D object
|
||||
views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object
|
||||
points and their corresponding 2D projections in each view must be specified. That may be achieved
|
||||
by using an object with a known geometry and easily detectable feature points. Such an object is
|
||||
called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
|
||||
@ -1014,7 +1014,7 @@ The function computes the rectification transformations without knowing intrinsi
|
||||
cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
|
||||
related difference from stereoRectify is that the function outputs not the rectification
|
||||
transformations in the object (3D) space, but the planar perspective transformations encoded by the
|
||||
homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99.
|
||||
homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 .
|
||||
|
||||
@note
|
||||
While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
|
||||
@ -1185,7 +1185,7 @@ confidence (probability) that the estimated matrix is correct.
|
||||
@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
|
||||
for the other points. The array is computed only in the RANSAC and LMedS methods.
|
||||
|
||||
This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03.
|
||||
This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .
|
||||
@cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
|
||||
|
||||
\f[[p_2; 1]^T K^T E K [p_1; 1] = 0 \\\f]\f[K =
|
||||
@ -1211,7 +1211,7 @@ CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
|
||||
@param R2 Another possible rotation matrix.
|
||||
@param t One possible translation.
|
||||
|
||||
This function decompose an essential matrix E using svd decomposition @cite HartleyZ00. Generally 4
|
||||
This function decompose an essential matrix E using svd decomposition @cite HartleyZ00 . Generally 4
|
||||
possible poses exists for a given E. They are \f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$. By
|
||||
decomposing E, you can only get the direction of the translation, so the function returns unit t.
|
||||
*/
|
||||
@ -1236,7 +1236,7 @@ matrix E. Only these inliers will be used to recover pose. In the output mask on
|
||||
which pass the cheirality check.
|
||||
This function decomposes an essential matrix using decomposeEssentialMat and then verifies possible
|
||||
pose hypotheses by doing cheirality check. The cheirality check basically means that the
|
||||
triangulated 3D points should have positive depth. Some details can be found in @cite Nister03.
|
||||
triangulated 3D points should have positive depth. Some details can be found in @cite Nister03 .
|
||||
|
||||
This function can be used to process output E and mask from findEssentialMat. In this scenario,
|
||||
points1 and points2 are the same input for findEssentialMat. :
|
||||
@ -1421,7 +1421,7 @@ This function extracts relative camera motion between two views observing a plan
|
||||
homography H induced by the plane. The intrinsic camera matrix K must also be provided. The function
|
||||
may return up to four mathematical solution sets. At least two of the solutions may further be
|
||||
invalidated if point correspondences are available by applying positive depth constraint (all points
|
||||
must be in front of the camera). The decomposition method is described in detail in @cite Malis.
|
||||
must be in front of the camera). The decomposition method is described in detail in @cite Malis .
|
||||
*/
|
||||
CV_EXPORTS_W int decomposeHomographyMat(InputArray H,
|
||||
InputArray K,
|
||||
@ -1605,6 +1605,7 @@ public:
|
||||
int mode = StereoSGBM::MODE_SGBM);
|
||||
};
|
||||
|
||||
//! @} calib3d
|
||||
|
||||
/** @brief The methods in this namespace use a so-called fisheye camera model.
|
||||
@ingroup calib3d_fisheye
|
||||
@ -1851,8 +1852,6 @@ namespace fisheye
|
||||
//! @} calib3d_fisheye
|
||||
}
|
||||
|
||||
//! @} calib3d
|
||||
|
||||
} // cv
|
||||
|
||||
#endif
|
||||
|
@ -75,6 +75,9 @@
|
||||
@defgroup core_opengl OpenGL interoperability
|
||||
@defgroup core_ipp Intel IPP Asynchronous C/C++ Converters
|
||||
@defgroup core_optim Optimization Algorithms
|
||||
@defgroup core_directx DirectX interoperability
|
||||
@defgroup core_eigen Eigen support
|
||||
@defgroup core_opencl OpenCL support
|
||||
@}
|
||||
*/
|
||||
|
||||
|
@ -69,7 +69,7 @@ namespace cv { namespace cuda {
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model
|
||||
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
|
||||
class implements algorithm described in @cite MOG2001.
|
||||
class implements algorithm described in @cite MOG2001 .
|
||||
|
||||
@sa BackgroundSubtractorMOG
|
||||
|
||||
@ -119,7 +119,7 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG>
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model
|
||||
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
|
||||
class implements algorithm described in @cite MOG2004.
|
||||
class implements algorithm described in @cite Zivkovic2004 .
|
||||
|
||||
@sa BackgroundSubtractorMOG2
|
||||
*/
|
||||
@ -154,7 +154,7 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
|
||||
|
||||
The class discriminates between foreground and background pixels by building and maintaining a model
|
||||
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
|
||||
class implements algorithm described in @cite GMG2012.
|
||||
class implements algorithm described in @cite Gold2012 .
|
||||
*/
|
||||
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
|
||||
{
|
||||
@ -208,7 +208,7 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
|
||||
of the background.
|
||||
|
||||
Any pixel which does not fit this model is then deemed to be foreground. The class implements
|
||||
algorithm described in @cite FGD2003.
|
||||
algorithm described in @cite FGD2003 .
|
||||
@sa BackgroundSubtractor
|
||||
*/
|
||||
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
|
||||
|
@ -205,6 +205,7 @@ struct NcvPoint2D32u
|
||||
__host__ __device__ NcvPoint2D32u(Ncv32u x_, Ncv32u y_) : x(x_), y(y_) {}
|
||||
};
|
||||
|
||||
//! @cond IGNORED
|
||||
|
||||
NCV_CT_ASSERT(sizeof(NcvBool) <= 4);
|
||||
NCV_CT_ASSERT(sizeof(Ncv64s) == 8);
|
||||
@ -223,6 +224,7 @@ NCV_CT_ASSERT(sizeof(NcvRect32u) == 4 * sizeof(Ncv32u));
|
||||
NCV_CT_ASSERT(sizeof(NcvSize32u) == 2 * sizeof(Ncv32u));
|
||||
NCV_CT_ASSERT(sizeof(NcvPoint2D32u) == 2 * sizeof(Ncv32u));
|
||||
|
||||
//! @endcond
|
||||
|
||||
//==============================================================================
|
||||
//
|
||||
|
@ -114,7 +114,7 @@ StereoBeliefPropagation uses a truncated linear model for the data cost and disc
|
||||
|
||||
\f[DiscTerm = \min (disc \_ single \_ jump \cdot \lvert f_1-f_2 \rvert , max \_ disc \_ term)\f]
|
||||
|
||||
For more details, see @cite Felzenszwalb2006.
|
||||
For more details, see @cite Felzenszwalb2006 .
|
||||
|
||||
By default, StereoBeliefPropagation uses floating-point arithmetics and the CV_32FC1 type for
|
||||
messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
|
||||
@ -192,7 +192,7 @@ CV_EXPORTS Ptr<cuda::StereoBeliefPropagation>
|
||||
|
||||
/** @brief Class computing stereo correspondence using the constant space belief propagation algorithm. :
|
||||
|
||||
The class implements algorithm described in @cite Yang2010. StereoConstantSpaceBP supports both local
|
||||
The class implements algorithm described in @cite Yang2010 . StereoConstantSpaceBP supports both local
|
||||
minimum and global minimum data cost initialization algorithms. For more details, see the paper
|
||||
mentioned above. By default, a local algorithm is used. To enable a global algorithm, set
|
||||
use_local_init_data_cost to false .
|
||||
@ -203,7 +203,7 @@ StereoConstantSpaceBP uses a truncated linear model for the data cost and discon
|
||||
|
||||
\f[DiscTerm = \min (disc \_ single \_ jump \cdot \lvert f_1-f_2 \rvert , max \_ disc \_ term)\f]
|
||||
|
||||
For more details, see @cite Yang2010.
|
||||
For more details, see @cite Yang2010 .
|
||||
|
||||
By default, StereoConstantSpaceBP uses floating-point arithmetics and the CV_32FC1 type for
|
||||
messages. But it can also use fixed-point arithmetics and the CV_16SC1 message type for better
|
||||
|
@ -215,7 +215,7 @@ typedef Feature2D DescriptorExtractor;
|
||||
//! @addtogroup features2d_main
|
||||
//! @{
|
||||
|
||||
/** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11.
|
||||
/** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 .
|
||||
*/
|
||||
class CV_EXPORTS_W BRISK : public Feature2D
|
||||
{
|
||||
@ -246,7 +246,7 @@ public:
|
||||
|
||||
/** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor
|
||||
|
||||
described in @cite RRKB11. The algorithm uses FAST in pyramids to detect stable keypoints, selects
|
||||
described in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects
|
||||
the strongest features using FAST or Harris response, finds their orientation using first-order
|
||||
moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or
|
||||
k-tuples) are rotated according to the measured orientation).
|
||||
@ -369,7 +369,7 @@ circle around this pixel.
|
||||
FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12,
|
||||
FastFeatureDetector::TYPE_5_8
|
||||
|
||||
Detects corners using the FAST algorithm by @cite Rosten06.
|
||||
Detects corners using the FAST algorithm by @cite Rosten06 .
|
||||
|
||||
@note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8,
|
||||
cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner
|
||||
@ -505,7 +505,7 @@ public:
|
||||
//! @addtogroup features2d_main
|
||||
//! @{
|
||||
|
||||
/** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12.
|
||||
/** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 .
|
||||
|
||||
@note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo
|
||||
F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision
|
||||
@ -556,7 +556,7 @@ public:
|
||||
CV_WRAP virtual int getDiffusivity() const = 0;
|
||||
};
|
||||
|
||||
/** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13. :
|
||||
/** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13 . :
|
||||
|
||||
@note AKAZE descriptors can only be used with KAZE or AKAZE keypoints. Try to avoid using *extract*
|
||||
and *detect* instead of *operator()* due to performance reasons. .. [ANB13] Fast Explicit Diffusion
|
||||
|
@ -969,7 +969,7 @@ An example using the LineSegmentDetector
|
||||
|
||||
/** @brief Line segment detector class
|
||||
|
||||
following the algorithm described at @cite Rafael12.
|
||||
following the algorithm described at @cite Rafael12 .
|
||||
*/
|
||||
class CV_EXPORTS_W LineSegmentDetector : public Algorithm
|
||||
{
|
||||
@ -1418,7 +1418,7 @@ CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth,
|
||||
An example on using the canny edge detector
|
||||
*/
|
||||
|
||||
/** @brief Finds edges in an image using the Canny algorithm @cite Canny86.
|
||||
/** @brief Finds edges in an image using the Canny algorithm @cite Canny86 .
|
||||
|
||||
The function finds edges in the input image image and marks them in the output map edges using the
|
||||
Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
|
||||
@ -2940,7 +2940,7 @@ An example using the watershed algorithm
|
||||
/** @brief Performs a marker-based image segmentation using the watershed algorithm.
|
||||
|
||||
The function implements one of the variants of watershed, non-parametric marker-based segmentation
|
||||
algorithm, described in @cite Meyer92.
|
||||
algorithm, described in @cite Meyer92 .
|
||||
|
||||
Before passing the image to the function, you have to roughly outline the desired regions in the
|
||||
image markers with positive (\>0) indices. So, every region is represented as one or more connected
|
||||
@ -3050,7 +3050,7 @@ The functions distanceTransform calculate the approximate or precise distance fr
|
||||
image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.
|
||||
|
||||
When maskSize == DIST_MASK_PRECISE and distanceType == DIST_L2 , the function runs the
|
||||
algorithm described in @cite Felzenszwalb04. This algorithm is parallelized with the TBB library.
|
||||
algorithm described in @cite Felzenszwalb04 . This algorithm is parallelized with the TBB library.
|
||||
|
||||
In other cases, the algorithm @cite Borgefors86 is used. This means that for a pixel the function
|
||||
finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
|
||||
@ -3371,7 +3371,7 @@ CV_EXPORTS_W int connectedComponentsWithStats(InputArray image, OutputArray labe
|
||||
|
||||
/** @brief Finds contours in a binary image.
|
||||
|
||||
The function retrieves contours from the binary image using the algorithm @cite Suzuki85. The contours
|
||||
The function retrieves contours from the binary image using the algorithm @cite Suzuki85 . The contours
|
||||
are a useful tool for shape analysis and object detection and recognition. See squares.c in the
|
||||
OpenCV sample directory.
|
||||
|
||||
|
@ -87,7 +87,7 @@ nearest feature vectors from both classes (in case of 2-class classifier) is max
|
||||
vectors that are the closest to the hyper-plane are called *support vectors*, which means that the
|
||||
position of other vectors does not affect the hyper-plane (the decision function).
|
||||
|
||||
SVM implementation in OpenCV is based on @cite LibSVM.
|
||||
SVM implementation in OpenCV is based on @cite LibSVM .
|
||||
|
||||
Prediction with SVM
|
||||
-------------------
|
||||
@ -98,7 +98,7 @@ the raw response from SVM (in the case of regression, 1-class or 2-class classif
|
||||
@defgroup ml_decsiontrees Decision Trees
|
||||
|
||||
The ML classes discussed in this section implement Classification and Regression Tree algorithms
|
||||
described in @cite Breiman84.
|
||||
described in @cite Breiman84 .
|
||||
|
||||
The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's
|
||||
also a base class for RTrees and Boost.
|
||||
@ -184,7 +184,7 @@ qualitative output is called *classification*, while predicting the quantitative
|
||||
|
||||
Boosting is a powerful learning concept that provides a solution to the supervised classification
|
||||
learning task. It combines the performance of many "weak" classifiers to produce a powerful
|
||||
committee @cite HTF01. A weak classifier is only required to be better than chance, and thus can be
|
||||
committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be
|
||||
very simple and computationally inexpensive. However, many of them smartly combine results to a
|
||||
strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and
|
||||
Neural Networks.
|
||||
@ -197,7 +197,7 @@ The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with
|
||||
the learning task at hand. The desired two-class output is encoded as -1 and +1.
|
||||
|
||||
Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle
|
||||
AdaBoost @cite FHT98. All of them are very similar in their overall structure. Therefore, this chapter
|
||||
AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this chapter
|
||||
focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below. Initially the
|
||||
same weight is assigned to each sample (step 2). Then, a weak classifier \f$f_{m(x)}\f$ is trained on
|
||||
the weighted training data (step 3a). Its weighted training error and scaling factor \f$c_m\f$ is
|
||||
@ -236,7 +236,7 @@ induced classifier. This process is controlled with the weight_trim_rate paramet
|
||||
with the summary fraction weight_trim_rate of the total weight mass are used in the weak
|
||||
classifier training. Note that the weights for **all** training examples are recomputed at each
|
||||
training iteration. Examples deleted at a particular iteration may be used again for learning some
|
||||
of the weak classifiers further @cite FHT98.
|
||||
of the weak classifiers further @cite FHT98 .
|
||||
|
||||
Prediction with Boost
|
||||
---------------------
|
||||
@ -425,8 +425,8 @@ Regression is a binary classification algorithm which is closely related to Supp
|
||||
like digit recognition (i.e. recognizing digitis like 0,1 2, 3,... from the given images). This
|
||||
version of Logistic Regression supports both binary and multi-class classifications (for multi-class
|
||||
it creates a multiple 2-class classifiers). In order to train the logistic regression classifier,
|
||||
Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see @cite BatchDesWiki).
|
||||
Logistic Regression is a discriminative classifier (see @cite LogRegTomMitch for more details).
|
||||
Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see <http://en.wikipedia.org/wiki/Gradient_descent_optimization>).
|
||||
Logistic Regression is a discriminative classifier (see <http://www.cs.cmu.edu/~tom/NewChapters.html> for more details).
|
||||
Logistic Regression is implemented as a C++ class in LogisticRegression.
|
||||
|
||||
In Logistic Regression, we try to optimize the training paramater \f$\theta\f$ such that the hypothesis
|
||||
|
@ -53,7 +53,7 @@ Haar Feature-based Cascade Classifier for Object Detection
|
||||
----------------------------------------------------------
|
||||
|
||||
The object detector described below has been initially proposed by Paul Viola @cite Viola01 and
|
||||
improved by Rainer Lienhart @cite Lienhart02.
|
||||
improved by Rainer Lienhart @cite Lienhart02 .
|
||||
|
||||
First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is
|
||||
trained with a few hundred sample views of a particular object (i.e., a face or a car), called
|
||||
|
@ -97,7 +97,7 @@ needs to be inpainted.
|
||||
by the algorithm.
|
||||
@param flags Inpainting method that could be one of the following:
|
||||
- **INPAINT_NS** Navier-Stokes based method [Navier01]
|
||||
- **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04.
|
||||
- **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04 .
|
||||
|
||||
The function reconstructs the selected image area from the pixel near the area boundary. The
|
||||
function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
|
||||
@ -220,12 +220,12 @@ as the variational problem, primal-dual algorithm then can be used to perform de
|
||||
exactly what is implemented.
|
||||
|
||||
It should be noted, that this implementation was taken from the July 2013 blog entry
|
||||
@cite Mordvintsev, which also contained (slightly more general) ready-to-use source code on Python.
|
||||
@cite MA13 , which also contained (slightly more general) ready-to-use source code on Python.
|
||||
Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
|
||||
of July 2013 and finally it was slightly adapted by later authors.
|
||||
|
||||
Although the thorough discussion and justification of the algorithm involved may be found in
|
||||
@cite ChambolleEtAl, it might make sense to skim over it here, following @cite Mordvintsev. To begin
|
||||
@cite ChambolleEtAl, it might make sense to skim over it here, following @cite MA13 . To begin
|
||||
with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
|
||||
pixels (it may be seen as set
|
||||
\f$\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\f$ for some
|
||||
@ -290,9 +290,9 @@ logarithmic domain.
|
||||
Since it's a global operator the same function is applied to all the pixels, it is controlled by the
|
||||
bias parameter.
|
||||
|
||||
Optional saturation enhancement is possible as described in @cite FL02.
|
||||
Optional saturation enhancement is possible as described in @cite FL02 .
|
||||
|
||||
For more information see @cite DM03.
|
||||
For more information see @cite DM03 .
|
||||
*/
|
||||
class CV_EXPORTS_W TonemapDrago : public Tonemap
|
||||
{
|
||||
@ -322,7 +322,7 @@ This implementation uses regular bilateral filter from opencv.
|
||||
|
||||
Saturation enhancement is possible as in ocvTonemapDrago.
|
||||
|
||||
For more information see @cite DD02.
|
||||
For more information see @cite DD02 .
|
||||
*/
|
||||
class CV_EXPORTS_W TonemapDurand : public Tonemap
|
||||
{
|
||||
@ -358,7 +358,7 @@ createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation
|
||||
Mapping function is controlled by adaptation parameter, that is computed using light adaptation and
|
||||
color adaptation.
|
||||
|
||||
For more information see @cite RD05.
|
||||
For more information see @cite RD05 .
|
||||
*/
|
||||
class CV_EXPORTS_W TonemapReinhard : public Tonemap
|
||||
{
|
||||
@ -389,7 +389,7 @@ createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_ad
|
||||
transforms contrast values to HVS response and scales the response. After this the image is
|
||||
reconstructed from new contrast values.
|
||||
|
||||
For more information see @cite MM06.
|
||||
For more information see @cite MM06 .
|
||||
*/
|
||||
class CV_EXPORTS_W TonemapMantiuk : public Tonemap
|
||||
{
|
||||
@ -435,7 +435,7 @@ It is invariant to exposure, so exposure values and camera response are not nece
|
||||
|
||||
In this implementation new image regions are filled with zeros.
|
||||
|
||||
For more information see @cite GW03.
|
||||
For more information see @cite GW03 .
|
||||
*/
|
||||
class CV_EXPORTS_W AlignMTB : public AlignExposures
|
||||
{
|
||||
@ -510,7 +510,7 @@ public:
|
||||
function as linear system. Objective function is constructed using pixel values on the same position
|
||||
in all images, extra term is added to make the result smoother.
|
||||
|
||||
For more information see @cite DM97.
|
||||
For more information see @cite DM97 .
|
||||
*/
|
||||
class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF
|
||||
{
|
||||
@ -538,7 +538,7 @@ CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, floa
|
||||
/** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
|
||||
function as linear system. This algorithm uses all image pixels.
|
||||
|
||||
For more information see @cite RB99.
|
||||
For more information see @cite RB99 .
|
||||
*/
|
||||
class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF
|
||||
{
|
||||
@ -579,7 +579,7 @@ public:
|
||||
/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
|
||||
values and camera response.
|
||||
|
||||
For more information see @cite DM97.
|
||||
For more information see @cite DM97 .
|
||||
*/
|
||||
class CV_EXPORTS_W MergeDebevec : public MergeExposures
|
||||
{
|
||||
@ -602,7 +602,7 @@ well-exposedness measures.
|
||||
The resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying
|
||||
by 255, but it's recommended to apply gamma correction and/or linear tonemapping.
|
||||
|
||||
For more information see @cite MK07.
|
||||
For more information see @cite MK07 .
|
||||
*/
|
||||
class CV_EXPORTS_W MergeMertens : public MergeExposures
|
||||
{
|
||||
@ -638,7 +638,7 @@ createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f,
|
||||
/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
|
||||
values and camera response.
|
||||
|
||||
For more information see @cite RB99.
|
||||
For more information see @cite RB99 .
|
||||
*/
|
||||
class CV_EXPORTS_W MergeRobertson : public MergeExposures
|
||||
{
|
||||
@ -656,7 +656,7 @@ CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
|
||||
|
||||
/** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
|
||||
black-and-white photograph rendering, and in many single channel image processing applications
|
||||
@cite CL12.
|
||||
@cite CL12 .
|
||||
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param grayscale Output 8-bit 1-channel image.
|
||||
@ -673,7 +673,7 @@ CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray co
|
||||
deformations) or local changes concerned to a selection. Here we are interested in achieving local
|
||||
changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless
|
||||
manner. The extent of the changes ranges from slight distortions to complete replacement by novel
|
||||
content @cite PM03.
|
||||
content @cite PM03 .
|
||||
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param dst Input 8-bit 3-channel image.
|
||||
@ -749,7 +749,7 @@ CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray
|
||||
//! @{
|
||||
|
||||
/** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
|
||||
filters are used in many different applications @cite EM11.
|
||||
filters are used in many different applications @cite EM11 .
|
||||
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param dst Output 8-bit 3-channel image.
|
||||
|
@ -61,7 +61,7 @@ class it's possible to configure/remove some steps, i.e. adjust the stitching pi
|
||||
the particular needs. All building blocks from the pipeline are available in the detail namespace,
|
||||
one can combine and use them separately.
|
||||
|
||||
The implemented stitching pipeline is very similar to the one proposed in @cite BL07.
|
||||
The implemented stitching pipeline is very similar to the one proposed in @cite BL07 .
|
||||
|
||||
![image](StitchingPipeline.jpg)
|
||||
|
||||
|
@ -229,7 +229,7 @@ public:
|
||||
enum CostType { COST_COLOR, COST_COLOR_GRAD };
|
||||
};
|
||||
|
||||
/** @brief Minimum graph cut-based seam estimator. See details in @cite V03.
|
||||
/** @brief Minimum graph cut-based seam estimator. See details in @cite V03 .
|
||||
*/
|
||||
class CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder
|
||||
{
|
||||
|
@ -50,7 +50,7 @@
|
||||
|
||||
The Super Resolution module contains a set of functions and classes that can be used to solve the
|
||||
problem of resolution enhancement. There are a few methods implemented, most of them are descibed in
|
||||
the papers @cite Farsiu03 and @cite Mitzel09.
|
||||
the papers @cite Farsiu03 and @cite Mitzel09 .
|
||||
|
||||
*/
|
||||
|
||||
|
@ -65,7 +65,7 @@ enum { OPTFLOW_USE_INITIAL_FLOW = 4,
|
||||
@param criteria Stop criteria for the underlying meanShift.
|
||||
returns
|
||||
(in old interfaces) Number of iterations CAMSHIFT took to converge
|
||||
The function implements the CAMSHIFT object tracking algorithm @cite Bradski98. First, it finds an
|
||||
The function implements the CAMSHIFT object tracking algorithm @cite Bradski98 . First, it finds an
|
||||
object center using meanShift and then adjusts the window size and finds the optimal rotation. The
|
||||
function returns the rotated rectangle structure that includes the object position, size, and
|
||||
orientation. The next position of the search window can be obtained with RotatedRect::boundingRect()
|
||||
@ -159,7 +159,7 @@ feature is filtered out and its flow is not processed, so it allows to remove ba
|
||||
performance boost.
|
||||
|
||||
The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
|
||||
@cite Bouguet00. The function is parallelized with the TBB library.
|
||||
@cite Bouguet00 . The function is parallelized with the TBB library.
|
||||
|
||||
@note
|
||||
|
||||
@ -258,7 +258,7 @@ enum
|
||||
MOTION_HOMOGRAPHY = 3
|
||||
};
|
||||
|
||||
/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08.
|
||||
/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .
|
||||
|
||||
@param templateImage single-channel template image; CV_8U or CV_32F array.
|
||||
@param inputImage single-channel input image which should be warped with the final warpMatrix in
|
||||
@ -314,7 +314,7 @@ CV_EXPORTS_W double findTransformECC( InputArray templateImage, InputArray input
|
||||
/** @brief Kalman filter class.
|
||||
|
||||
The class implements a standard Kalman filter <http://en.wikipedia.org/wiki/Kalman_filter>,
|
||||
@cite Welch95. However, you can modify transitionMatrix, controlMatrix, and measurementMatrix to get
|
||||
@cite Welch95 . However, you can modify transitionMatrix, controlMatrix, and measurementMatrix to get
|
||||
an extended Kalman filter functionality. See the OpenCV sample kalman.cpp.
|
||||
|
||||
@note
|
||||
@ -383,7 +383,7 @@ public:
|
||||
/** @brief "Dual TV L1" Optical Flow Algorithm.
|
||||
|
||||
The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and
|
||||
@cite Javier2012.
|
||||
@cite Javier2012 .
|
||||
Here are important members of the class that control the algorithm, which you can set after
|
||||
constructing the class instance:
|
||||
|
||||
|
@ -172,4 +172,3 @@
|
||||
@end
|
||||
|
||||
//! @} videoio_ios
|
||||
|
||||
|
@ -48,7 +48,7 @@
|
||||
|
||||
The video stabilization module contains a set of functions and classes that can be used to solve the
|
||||
problem of video stabilization. There are a few methods implemented, most of them are descibed in
|
||||
the papers @cite OF06 and @cite G11. However, there are some extensions and deviations from the orginal
|
||||
the papers @cite OF06 and @cite G11 . However, there are some extensions and deviations from the orginal
|
||||
paper methods.
|
||||
|
||||
### References
|
||||
@ -68,7 +68,7 @@ Both the functions and the classes are available.
|
||||
|
||||
@defgroup videostab_marching Fast Marching Method
|
||||
|
||||
The Fast Marching Method @cite T04 is used in of the video stabilization routines to do motion and
|
||||
The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and
|
||||
color inpainting. The method is implemented is a flexible way and it's made public for other users.
|
||||
|
||||
@}
|
||||
|
@ -55,7 +55,7 @@ namespace cv
|
||||
namespace videostab
|
||||
{
|
||||
|
||||
//! @addtogroup vieostab
|
||||
//! @addtogroup videostab
|
||||
//! @{
|
||||
|
||||
class CV_EXPORTS ISparseOptFlowEstimator
|
||||
|
@ -52,7 +52,7 @@ namespace cv
|
||||
namespace videostab
|
||||
{
|
||||
|
||||
//! @addtogroup vieostab
|
||||
//! @addtogroup videostab
|
||||
//! @{
|
||||
|
||||
class CV_EXPORTS IOutlierRejector
|
||||
|
@ -51,7 +51,7 @@ namespace cv
|
||||
namespace videostab
|
||||
{
|
||||
|
||||
//! @addtogroup vieostab
|
||||
//! @addtogroup videostab
|
||||
//! @{
|
||||
|
||||
template <typename T> inline T& at(int idx, std::vector<T> &items)
|
||||
|
@ -60,7 +60,7 @@ namespace cv
|
||||
namespace videostab
|
||||
{
|
||||
|
||||
//! @addtogroup vieostab
|
||||
//! @addtogroup videostab
|
||||
//! @{
|
||||
|
||||
class CV_EXPORTS StabilizerBase
|
||||
|
@ -54,7 +54,7 @@ namespace cv
|
||||
namespace videostab
|
||||
{
|
||||
|
||||
//! @addtogroup vieostab
|
||||
//! @addtogroup videostab
|
||||
//! @{
|
||||
|
||||
class CV_EXPORTS WobbleSuppressorBase
|
||||
|
Loading…
Reference in New Issue
Block a user