Merge commit '43aec5ad' into merge-2.4

Conflicts:
	cmake/OpenCVConfig.cmake
	cmake/OpenCVLegacyOptions.cmake
	modules/contrib/src/retina.cpp
	modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst
	modules/gpu/doc/video.rst
	modules/gpu/src/speckle_filtering.cpp
	modules/python/src2/cv2.cv.hpp
	modules/python/test/test2.py
	samples/python/watershed.py
This commit is contained in:
Roman Donchenko 2013-08-27 13:26:44 +04:00
commit 2c4bbb313c
448 changed files with 984 additions and 1307 deletions

80
.gitattributes vendored
View File

@ -1,42 +1,58 @@
.git* export-ignore
* text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4
*.py text
*.cpp text
*.hpp text
*.cxx text
*.hxx text
*.mm text
*.c text
*.h text
*.i text
*.js text
*.java text
*.scala text
*.cu text
*.cl text
*.css_t text
*.qrc text
*.qss text
*.S text
*.rst text
*.tex text
*.sty text
.git* text export-ignore
*.aidl text
*.mk text
*.aidl text
*.appxmanifest text
*.bib text
*.c text
*.cl text
*.conf text
*.cpp text
*.css_t text
*.cu text
*.cxx text
*.def text
*.filelist text
*.h text
*.hpp text
*.htm text
*.html text
*.hxx text
*.i text
*.idl text
*.java text
*.js text
*.mk text
*.mm text
*.plist text
*.properties text
*.py text
*.qrc text
*.qss text
*.S text
*.sbt text
*.scala text
*.sty text
*.tex text
*.txt text
*.xaml text
# reST underlines/overlines can look like conflict markers
*.rst text conflict-marker-size=80
*.cmake text whitespace=tabwidth=2
*.cmakein text whitespace=tabwidth=2
*.in text whitespace=tabwidth=2
CMakeLists.txt text whitespace=tabwidth=2
*.png binary
*.jpeg binary
*.jpg binary
*.avi binary
*.bmp binary
*.exr binary
*.ico binary
*.jpeg binary
*.jpg binary
*.png binary
*.a binary
*.so binary
@ -47,6 +63,7 @@ CMakeLists.txt text whitespace=tabwidth=2
*.pbxproj binary
*.vec binary
*.doc binary
*.dia binary
*.xml -text whitespace=cr-at-eol
*.yml -text whitespace=cr-at-eol
@ -55,9 +72,12 @@ CMakeLists.txt text whitespace=tabwidth=2
.cproject -text whitespace=cr-at-eol merge=union
org.eclipse.jdt.core.prefs -text whitespace=cr-at-eol merge=union
*.vcproj text eol=crlf merge=union
*.bat text eol=crlf
*.cmd text eol=crlf
*.cmd.tmpl text eol=crlf
*.dsp text eol=crlf -whitespace
*.sln text eol=crlf -whitespace
*.vcproj text eol=crlf -whitespace merge=union
*.vcxproj text eol=crlf -whitespace merge=union
*.sh text eol=lf
*.sh text eol=lf

1
3rdparty/.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
* -whitespace

View File

@ -8,4 +8,4 @@ set(HAVE_GENTOO_FFMPEG 1)
set(ALIASOF_libavcodec_VERSION 53.61.100)
set(ALIASOF_libavformat_VERSION 53.32.100)
set(ALIASOF_libavutil_VERSION 51.35.100)
set(ALIASOF_libswscale_VERSION 2.1.100)
set(ALIASOF_libswscale_VERSION 2.1.100)

View File

@ -1,2 +1,2 @@
set path=c:\dev\msys32\bin;%path% & gcc -Wall -shared -o opencv_ffmpeg.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat -lavcodec -lavdevice -lswscale -lavutil -lwsock32
set path=c:\dev\msys64\bin;%path% & gcc -m64 -Wall -shared -o opencv_ffmpeg_64.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat64 -lavcodec64 -lavdevice64 -lswscale64 -lavutil64 -lavcore64 -lwsock32 -lws2_32
set path=c:\dev\msys64\bin;%path% & gcc -m64 -Wall -shared -o opencv_ffmpeg_64.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat64 -lavcodec64 -lavdevice64 -lswscale64 -lavutil64 -lavcore64 -lwsock32 -lws2_32

View File

@ -40,5 +40,3 @@ How to update opencv_ffmpeg.dll and opencv_ffmpeg_64.dll when a new version of F
8. Then, go to <opencv>\3rdparty\ffmpeg, edit make.bat
(change paths to the actual paths to your msys32 and msys64 distributions) and then run make.bat

View File

@ -168,4 +168,3 @@
/* Support Deflate compression */
#define ZIP_SUPPORT 1

View File

@ -1 +1 @@
tbb*.tgz
tbb*.tgz

View File

@ -863,4 +863,3 @@ ocv_finalize_status()
if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}")
message(WARNING "The source directory is the same as binary directory. \"make clean\" may damage the source tree")
endif()

View File

@ -79,4 +79,3 @@ if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(opencv_haartraining PROPERTIES FOLDER "applications")
set_target_properties(opencv_haartraining_engine PROPERTIES FOLDER "applications")
endif()

View File

@ -90,4 +90,3 @@ int icvGetIdxAt( CvMat* idx, int pos )
void icvSave( const CvArr* ptr, const char* filename, int line );
#endif /* __CVCOMMON_H_ */

View File

@ -375,4 +375,3 @@ int main( int argc, char* argv[] )
return 0;
}

View File

@ -34,4 +34,3 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
install(TARGETS ${the_target} RUNTIME DESTINATION bin COMPONENT main)

View File

@ -98,4 +98,3 @@ if(NOT BUILD_WITH_DEBUG_INFO AND NOT MSVC)
string(REPLACE "/Zi" "" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
string(REPLACE "/Zi" "" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
endif()

View File

@ -294,4 +294,4 @@ if(MSVC)
if(NOT ENABLE_NOISY_WARNINGS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251") #class 'std::XXX' needs to have dll-interface to be used by clients of YYY
endif()
endif()
endif()

View File

@ -140,7 +140,7 @@ endfunction()
# ------------------------------------------------------------------------
function(set_ipp_new_libraries _LATEST_VERSION)
set(IPP_PREFIX "ipp")
if(${_LATEST_VERSION} VERSION_LESS "8.0")
set(IPP_SUFFIX "_l") # static not threaded libs suffix IPP 7.x
else()
@ -346,4 +346,4 @@ if(WIN32 AND MINGW AND NOT IPP_LATEST_VERSION_MAJOR LESS 7)
# See http://code.opencv.org/issues/1906 for additional details
set(MSV_NTDLL "ntdll")
set(IPP_LIBRARIES ${IPP_LIBRARIES} ${MSV_NTDLL}${IPP_LIB_SUFFIX})
endif()
endif()

View File

@ -19,7 +19,7 @@ set(XIMEA_LIBRARY_DIR)
if(WIN32)
# Try to find the XIMEA API path in registry.
GET_FILENAME_COMPONENT(XIMEA_PATH "[HKEY_CURRENT_USER\\Software\\XIMEA\\CamSupport\\API;Path]" ABSOLUTE)
if(EXISTS ${XIMEA_PATH})
set(XIMEA_FOUND 1)
# set LIB folders
@ -43,4 +43,4 @@ endif()
mark_as_advanced(FORCE XIMEA_FOUND)
mark_as_advanced(FORCE XIMEA_PATH)
mark_as_advanced(FORCE XIMEA_LIBRARY_DIR)
mark_as_advanced(FORCE XIMEA_LIBRARY_DIR)

View File

@ -515,4 +515,4 @@ function(ocv_source_group group)
cmake_parse_arguments(OCV_SOURCE_GROUP "" "" "GLOB" ${ARGN})
file(GLOB srcs ${OCV_SOURCE_GROUP_GLOB})
source_group(${group} FILES ${srcs})
endfunction()
endfunction()

View File

@ -11,4 +11,4 @@ int main()
printf("%d.%d ", prop.major, prop.minor);
}
return 0;
}
}

View File

@ -7,4 +7,4 @@ int main()
AVIFileInit();
AVIFileExit();
return 0;
}
}

View File

@ -3,4 +3,4 @@
int main(int, char**)
{
return 0;
}
}

View File

@ -32,4 +32,4 @@ foreach(cl ${cl_list})
file(APPEND ${OUTPUT} "const char* ${cl_filename}=\"${lines};\n")
endforeach()
file(APPEND ${OUTPUT} "}\n}\n")
file(APPEND ${OUTPUT} "}\n}\n")

View File

@ -23,5 +23,3 @@ FOREACH(file ${files})
MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
ENDIF(EXISTS "$ENV{DESTDIR}${file}")
ENDFOREACH(file)

View File

@ -6,4 +6,4 @@
*
*/
@OPENCV_MODULE_DEFINITIONS_CONFIGMAKE@
@OPENCV_MODULE_DEFINITIONS_CONFIGMAKE@

View File

@ -1,4 +1,4 @@
function insertIframe (elementId, iframeSrc)
function insertIframe (elementId, iframeSrc)
{
var iframe;
if (document.createElement && (iframe = document.createElement('iframe')))
@ -10,4 +10,3 @@ function insertIframe (elementId, iframeSrc)
element.parentNode.replaceChild(iframe, element);
}
}

View File

@ -387,4 +387,4 @@ div.sphinxsidebar #searchbox input[type="text"] {
div.sphinxsidebar #searchbox input[type="submit"] {
width:auto;
}
}

View File

@ -28,4 +28,4 @@ feedbacklinkcolor = #ffffff
bodyfont = sans-serif
headfont = 'Trebuchet MS', sans-serif
guifont = "Lucida Sans","Lucida Sans Unicode","Lucida Grande",Verdana,Arial,Helvetica,sans-serif
lang = none
lang = none

View File

@ -184,5 +184,3 @@ p = RSTParser()
for m in opencv_module_list:
print "\n\n*************************** " + m + " *************************\n"
p.check_module_docs(m)

View File

@ -39,4 +39,3 @@
#7 & #8 & #9
\end{bmatrix}
}

View File

@ -75,11 +75,11 @@
% if using A4 paper. (This probably isn't strictly necessary.)
% If using another size paper, use default 1cm margins.
\ifthenelse{\lengthtest { \paperwidth = 11in}}
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
}
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
}
% Turn off header and footer
% \pagestyle{empty}

View File

@ -3667,4 +3667,3 @@ class YErrorBars:
output.append(LineAxis(x, start, x, end, start, end, bars, False, False, **self.attr).SVG(trans))
return output

View File

@ -8,23 +8,23 @@ Cameras have been around for a long-long time. However, with the introduction of
Theory
======
For the distortion OpenCV takes into account the radial and tangential factors. For the radial factor one uses the following formula:
For the distortion OpenCV takes into account the radial and tangential factors. For the radial factor one uses the following formula:
.. math::
x_{corrected} = x( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) \\
y_{corrected} = y( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6)
So for an old pixel point at :math:`(x,y)` coordinates in the input image, its position on the corrected output image will be :math:`(x_{corrected} y_{corrected})`. The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect.
So for an old pixel point at :math:`(x,y)` coordinates in the input image, its position on the corrected output image will be :math:`(x_{corrected} y_{corrected})`. The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect.
Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. It can be corrected via the formulas:
Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. It can be corrected via the formulas:
.. math::
x_{corrected} = x + [ 2p_1xy + p_2(r^2+2x^2)] \\
y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy]
So we have five distortion parameters which in OpenCV are presented as one row matrix with 5 columns:
So we have five distortion parameters which in OpenCV are presented as one row matrix with 5 columns:
.. math::
@ -38,7 +38,7 @@ Now for the unit conversion we use the following formula:
Here the presence of :math:`w` is explained by the use of homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` which are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single focal length :math:`f`. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution.
The process of determining these two matrices is the calibration. Calculation of these parameters is done through basic geometrical equations. The equations used depend on the chosen calibrating objects. Currently OpenCV supports three types of objects for calibration:
The process of determining these two matrices is the calibration. Calculation of these parameters is done through basic geometrical equations. The equations used depend on the chosen calibrating objects. Currently OpenCV supports three types of objects for calibration:
.. container:: enumeratevisibleitemswithsquare
@ -148,7 +148,7 @@ Explanation
Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners <findchessboardcorners>` or the :calib3d:`findCirclesGrid <findcirclesgrid>` function. For both of them you pass the current image and the size of the board and you'll get the positions of the patterns. Furthermore, they return a boolean variable which states if the pattern was found in the input (we only need to take into account those images where this is true!).
Then again in case of cameras we only take camera images when an input delay time is passed. This is done in order to allow user moving the chessboard around and getting different images. Similar images result in similar equations, and similar equations at the calibration step will form an ill-posed problem, so the calibration will fail. For square images the positions of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix <cornersubpix>` function. It will produce better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image using :calib3d:`findChessboardCorners <drawchessboardcorners>` function.
Then again in case of cameras we only take camera images when an input delay time is passed. This is done in order to allow user moving the chessboard around and getting different images. Similar images result in similar equations, and similar equations at the calibration step will form an ill-posed problem, so the calibration will fail. For square images the positions of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix <cornersubpix>` function. It will produce better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image using :calib3d:`findChessboardCorners <drawchessboardcorners>` function.
.. code-block:: cpp
@ -175,7 +175,7 @@ Explanation
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
}
#. **Show state and result to the user, plus command line control of the application**. This part shows text output on the image.
#. **Show state and result to the user, plus command line control of the application**. This part shows text output on the image.
.. code-block:: cpp
@ -199,7 +199,7 @@ Explanation
if( blinkOutput )
bitwise_not(view, view);
If we ran calibration and got camera's matrix with the distortion coefficients we may want to correct the image using :imgproc_geometric:`undistort <undistort>` function:
If we ran calibration and got camera's matrix with the distortion coefficients we may want to correct the image using :imgproc_geometric:`undistort <undistort>` function:
.. code-block:: cpp
@ -229,7 +229,7 @@ Explanation
imagePoints.clear();
}
#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must do this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort <undistort>` function, which is in fact first calls :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` to find transformation matrices and then performs transformation using :imgproc_geometric:`remap <remap>` function. Because, after successful calibration map calculation needs to be done only once, by using this expanded form you may speed up your application:
#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must do this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort <undistort>` function, which is in fact first calls :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` to find transformation matrices and then performs transformation using :imgproc_geometric:`remap <remap>` function. Because, after successful calibration map calculation needs to be done only once, by using this expanded form you may speed up your application:
.. code-block:: cpp
@ -256,7 +256,7 @@ Explanation
The calibration and save
========================
Because the calibration needs to be done only once per camera, it makes sense to save it after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file.
Because the calibration needs to be done only once per camera, it makes sense to save it after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file.
Therefore in the first function we just split up these two processes. Because we want to save many of the calibration variables we'll create these variables here and pass on both of them to the calibration and saving function. Again, I'll not show the saving part as that has little in common with the calibration. Explore the source file in order to find out how and what:
@ -280,7 +280,7 @@ Therefore in the first function we just split up these two processes. Because we
return ok;
}
We do the calibration with the help of the :calib3d:`calibrateCamera <calibratecamera>` function. It has the following parameters:
We do the calibration with the help of the :calib3d:`calibrateCamera <calibratecamera>` function. It has the following parameters:
.. container:: enumeratevisibleitemswithsquare
@ -318,11 +318,11 @@ We do the calibration with the help of the :calib3d:`calibrateCamera <calibratec
calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern);
objectPoints.resize(imagePoints.size(),objectPoints[0]);
+ The image points. This is a vector of *Point2f* vector which for each input image contains coordinates of the important points (corners for chessboard and centers of the circles for the circle pattern). We have already collected this from :calib3d:`findChessboardCorners <findchessboardcorners>` or :calib3d:`findCirclesGrid <findcirclesgrid>` function. We just need to pass it on.
+ The image points. This is a vector of *Point2f* vector which for each input image contains coordinates of the important points (corners for chessboard and centers of the circles for the circle pattern). We have already collected this from :calib3d:`findChessboardCorners <findchessboardcorners>` or :calib3d:`findCirclesGrid <findcirclesgrid>` function. We just need to pass it on.
+ The size of the image acquired from the camera, video file or the images.
+ The camera matrix. If we used the fixed aspect ratio option we need to set the :math:`f_x` to zero:
+ The camera matrix. If we used the fixed aspect ratio option we need to set the :math:`f_x` to zero:
.. code-block:: cpp
@ -336,16 +336,16 @@ We do the calibration with the help of the :calib3d:`calibrateCamera <calibratec
distCoeffs = Mat::zeros(8, 1, CV_64F);
+ For all the views the function will calculate rotation and translation vectors which transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7-th and 8-th parameters are the output vector of matrices containing in the i-th position the rotation and translation vector for the i-th object point to the i-th image point.
+ For all the views the function will calculate rotation and translation vectors which transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7-th and 8-th parameters are the output vector of matrices containing in the i-th position the rotation and translation vector for the i-th object point to the i-th image point.
+ The final argument is the flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point.
+ The final argument is the flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point.
.. code-block:: cpp
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
+ The function returns the average re-projection error. This number gives a good estimation of precision of the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints <projectpoints>` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculated for all the calibration images.
+ The function returns the average re-projection error. This number gives a good estimation of precision of the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints <projectpoints>` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculated for all the calibration images.
.. code-block:: cpp
@ -378,7 +378,7 @@ We do the calibration with the help of the :calib3d:`calibrateCamera <calibratec
Results
=======
Let there be :download:`this input chessboard pattern <../../../pattern.png>` which has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into VID5 directory. I've put this inside the :file:`images/CameraCalibration` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use:
Let there be :download:`this input chessboard pattern <../../../pattern.png>` which has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into VID5 directory. I've put this inside the :file:`images/CameraCalibration` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use:
.. code-block:: xml
@ -396,7 +396,7 @@ Let there be :download:`this input chessboard pattern <../../../pattern.png>` wh
</images>
</opencv_storage>
Then passed :file:`images/CameraCalibration/VID5/VID5.XML` as an input in the configuration file. Here's a chessboard pattern found during the runtime of the application:
Then passed :file:`images/CameraCalibration/VID5/VID5.XML` as an input in the configuration file. Here's a chessboard pattern found during the runtime of the application:
.. image:: images/fileListImage.jpg
:alt: A found chessboard
@ -433,7 +433,7 @@ In both cases in the specified output XML/YAML file you'll find the camera and d
-4.1802327176423804e-001 5.0715244063187526e-001 0. 0.
-5.7843597214487474e-001</data></Distortion_Coefficients>
Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` and the :imgproc_geometric:`remap <remap>` function to remove distortion and enjoy distortion free inputs for cheap and low quality cameras.
Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap <initundistortrectifymap>` and the :imgproc_geometric:`remap <remap>` function to remove distortion and enjoy distortion free inputs for cheap and low quality cameras.
You may observe a runtime instance of this on the `YouTube here <https://www.youtube.com/watch?v=ViPN810E0SU>`_.

View File

@ -59,4 +59,4 @@ Now, let us write a code that detects a chessboard in a new image and finds its
#.
Calculate reprojection error like it is done in ``calibration`` sample (see ``opencv/samples/cpp/calibration.cpp``, function ``computeReprojectionErrors``).
Question: how to calculate the distance from the camera origin to any of the corners?
Question: how to calculate the distance from the camera origin to any of the corners?

View File

@ -277,4 +277,3 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
<div align="center">
<iframe title="File Input and Output using XML and YAML files in OpenCV" width="560" height="349" src="http://www.youtube.com/embed/A4yqVnByMMM?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

View File

@ -127,6 +127,3 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
<div align="center">
<iframe title="Interoperability with OpenCV 1" width="560" height="349" src="http://www.youtube.com/embed/qckm-zvo31w?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

View File

@ -218,4 +218,4 @@ Here you will learn the about the basic building blocks of the library. A must r
../random_generator_and_text/random_generator_and_text
../discrete_fourier_transform/discrete_fourier_transform
../file_input_output_with_xml_yml/file_input_output_with_xml_yml
../interoperability_with_OpenCV_1/interoperability_with_OpenCV_1
../interoperability_with_OpenCV_1/interoperability_with_OpenCV_1

View File

@ -1 +1 @@
Include in this directory only defintion files. None of the reST files entered here will be parsed by the Sphinx Builder.
Include in this directory only defintion files. None of the reST files entered here will be parsed by the Sphinx Builder.

View File

@ -100,6 +100,3 @@ Result
.. image:: images/Feature_Description_BruteForce_Result.jpg
:align: center
:height: 200pt

View File

@ -201,4 +201,3 @@ Learn about how to use the feature points detectors, descriptors and matching f
../feature_flann_matcher/feature_flann_matcher
../feature_homography/feature_homography
../detection_of_planar_objects/detection_of_planar_objects

View File

@ -135,4 +135,3 @@ Here is the result:
.. image:: images/Corner_Subpixeles_Result.jpg
:align: center

View File

@ -37,4 +37,3 @@ Result
.. image:: images/My_Shi_Tomasi_corner_detector_Result.jpg
:align: center

View File

@ -118,5 +118,3 @@ Result
.. image:: images/Feature_Detection_Result_a.jpg
:align: center

View File

@ -243,5 +243,3 @@ The detected corners are surrounded by a small black circle
.. image:: images/Harris_Detector_Result.jpg
:align: center

View File

@ -10,4 +10,3 @@ These tutorials are the bottom of the iceberg as they link together multiple of
.. raw:: latex
\pagebreak

View File

@ -74,4 +74,4 @@ This section contains valuable tutorials about how to read/save your image/video
../trackbar/trackbar
../video-input-psnr-ssim/video-input-psnr-ssim
../video-write/video-write
../video-write/video-write

View File

@ -329,4 +329,3 @@ Result
.. image:: images/Histogram_Calculation_Result.jpg
:align: center

View File

@ -369,4 +369,3 @@ Results
.. image:: images/Template_Matching_Image_Result.jpg
:align: center

View File

@ -282,6 +282,3 @@ Result
:align: center
* Notice how the image is superposed to the black background on the edge regions.

View File

@ -290,4 +290,3 @@ We get the following result by using the Probabilistic Hough Line Transform:
:align: center
You may observe that the number of lines detected vary while you change the *threshold*. The explanation is sort of evident: If you establish a higher threshold, fewer lines will be detected (since you will need more points to declare a line detected).

View File

@ -311,4 +311,3 @@ Result
:alt: Result 0 for remapping
:width: 250pt
:align: center

View File

@ -306,4 +306,3 @@ Result
:alt: Original image
:width: 250pt
:align: center

View File

@ -279,4 +279,3 @@ Results
.. image:: images/Morphology_2_Tutorial_Cover.jpg
:alt: Morphology 2: Result sample
:align: center

View File

@ -259,5 +259,3 @@ Results
.. image:: images/Pyramids_Tutorial_PyrUp_Result.jpg
:alt: Pyramids: PyrUp Result
:align: center

View File

@ -121,4 +121,3 @@ Result
.. |BRC_1| image:: images/Bounding_Rects_Circles_Result.jpg
:align: middle

View File

@ -123,4 +123,3 @@ Result
.. |BRE_1| image:: images/Bounding_Rotated_Ellipses_Result.jpg
:align: middle

View File

@ -104,4 +104,3 @@ Result
.. |contour_1| image:: images/Find_Contours_Result.jpg
:align: middle

View File

@ -113,4 +113,3 @@ Result
.. |Hull_1| image:: images/Hull_Result.jpg
:align: middle

View File

@ -133,4 +133,3 @@ Result
.. |MU_2| image:: images/Moments_Result2.jpg
:width: 250pt
:align: middle

View File

@ -114,4 +114,3 @@ Result
.. |PPT_1| image:: images/Point_Polygon_Test_Result.jpg
:align: middle

View File

@ -539,6 +539,3 @@ In this section you will learn about the image processing (manipulation) functio
../shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses
../shapedescriptors/moments/moments
../shapedescriptors/point_polygon_test/point_polygon_test

View File

@ -245,6 +245,3 @@ Say you have or create a new file, *helloworld.cpp* in a directory called *foo*:
a. You can also optionally modify the ``Build command:`` from ``make`` to something like ``make VERBOSE=1 -j4`` which tells the compiler to produce detailed symbol files for debugging and also to compile in 4 parallel threads.
#. Done!

View File

@ -81,3 +81,4 @@ Building OpenCV from Source Using CMake, Using the Command Line
If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter.
If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter.

View File

@ -127,4 +127,4 @@ Check out an instance of running code with more Image Effects on `YouTube <http:
<div align="center">
<iframe width="560" height="350" src="http://www.youtube.com/embed/Ko3K_xdhJ1I" frameborder="0" allowfullscreen></iframe>
</div>
</div>

View File

@ -130,4 +130,3 @@ Result
.. image:: images/Cascade_Classifier_Tutorial_Result_LBP.jpg
:align: center
:height: 300pt

View File

@ -5,5 +5,3 @@ install(FILES ${old_hdrs}
install(FILES "opencv2/opencv.hpp"
DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2
COMPONENT main)

View File

@ -73,4 +73,3 @@
#endif //CV_IMPL
#endif // __OPENCV_OLD_CV_H_

View File

@ -46,4 +46,3 @@
#include "opencv2/core/eigen.hpp"
#endif

View File

@ -14,4 +14,3 @@ double getCameraPropertyC(void* camera, int propIdx);
void setCameraPropertyC(void* camera, int propIdx, double value);
void applyCameraPropertiesC(void** camera);
}

View File

@ -44,4 +44,4 @@ private:
int frameHeight;
};
#endif
#endif

View File

@ -6,4 +6,3 @@ calib3d. Camera Calibration and 3D Reconstruction
:maxdepth: 2
camera_calibration_and_3d_reconstruction

View File

@ -621,4 +621,3 @@ void epnp::qr_solve(CvMat * A, CvMat * b, CvMat * X)
pX[i] = (pb[i] - sum) / A2[i];
}
}

View File

@ -411,4 +411,3 @@ bool p3p::jacobi_4x4(double * A, double * D, double * U)
return false;
}

View File

@ -59,4 +59,3 @@ class p3p
};
#endif // P3P_H

View File

@ -348,4 +348,3 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
}
return;
}

View File

@ -195,4 +195,3 @@ void CV_Affine3D_EstTest::run( int /* start_from */)
}
TEST(Calib3d_EstimateAffineTransform, accuracy) { CV_Affine3D_EstTest test; test.safe_run(); }

View File

@ -735,5 +735,3 @@ protected:
TEST(Calib3d_CalibrateCamera_C, badarg) { CV_CameraCalibrationBadArgTest test; test.safe_run(); }
TEST(Calib3d_Rodrigues_C, badarg) { CV_Rodrigues2BadArgTest test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_C, badarg) { CV_ProjectPoints2BadArgTest test; test.safe_run(); }

View File

@ -329,4 +329,3 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
return generateChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2,
squareSize.width, squareSize.height, pts3d, corners);
}

View File

@ -212,4 +212,3 @@ protected:
};
TEST(Calib3d_ComposeRT, accuracy) { CV_composeRT_Test test; test.safe_run(); }

View File

@ -21,4 +21,3 @@ namespace cvtest
}
#endif

View File

@ -173,4 +173,3 @@ protected:
};
TEST(Calib3d_ReprojectImageTo3D, accuracy) { CV_ReprojectImageTo3DTest test; test.safe_run(); }

View File

@ -306,4 +306,4 @@ TEST(DISABLED_Calib3d_SolvePnPRansac, concurrency)
EXPECT_LT(tnorm, 1e-6);
}
#endif
#endif

View File

@ -94,4 +94,4 @@ void CV_UndistortTest::run(int /* start_from */)
}
}
TEST(Calib3d_Undistort, accuracy) { CV_UndistortTest test; test.safe_run(); }
TEST(Calib3d_Undistort, accuracy) { CV_UndistortTest test; test.safe_run(); }

View File

@ -626,5 +626,3 @@ CSV for the AT&T Facedatabase
.. literalinclude:: etc/at.txt
:language: none
:linenos:

View File

@ -30,4 +30,3 @@ Indices and tables
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -23,4 +23,3 @@ target_link_libraries(facerec_fisherfaces opencv_contrib opencv_core opencv_imgp
add_executable(facerec_lbph facerec_lbph.cpp)
target_link_libraries(facerec_lbph opencv_contrib opencv_core opencv_imgproc opencv_highgui)

View File

@ -231,5 +231,3 @@ Here are some examples:
+---------------------------------+----------------------------------------------------------------------------+
| 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg |
+---------------------------------+----------------------------------------------------------------------------+

View File

@ -44,4 +44,3 @@ And here is the Reconstruction, which is the same as the original:
.. image:: ../img/eigenface_reconstruction_opencv.png
:align: center

View File

@ -205,5 +205,3 @@ Here are some examples:
+---------------------------------+----------------------------------------------------------------------------+
| 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg |
+---------------------------------+----------------------------------------------------------------------------+

View File

@ -113,5 +113,3 @@ The method executes the variational algorithm on a rectified stereo pair. See ``
**Note**:
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.

View File

@ -286,5 +286,3 @@ void CvAdaptiveSkinDetector::Histogram::mergeWith(CvAdaptiveSkinDetector::Histog
}
}
};

View File

@ -136,4 +136,3 @@ Mat BOWMSCTrainer::cluster(const Mat& _descriptors) const {
}
}

View File

@ -287,4 +287,3 @@ bool ChowLiuTree::reduceEdgesToMinSpan(std::list<info>& edges) {
}
}

View File

@ -132,5 +132,3 @@ Point2f CvMeanShiftTracker::getTrackingCenter()
{
return prev_center;
}

View File

@ -41,4 +41,3 @@
//M*/
#include "precomp.hpp"

View File

@ -892,4 +892,3 @@ const cv::DetectionBasedTracker::Parameters& DetectionBasedTracker::getParameter
}
#endif

View File

@ -221,4 +221,3 @@ Point2f CvFeatureTracker::getTrackingCenter()
center.y = (float)(prev_center.y + prev_trackwindow.height/2.0);
return center;
}

View File

@ -721,4 +721,3 @@ void CvFuzzyMeanShiftTracker::track(IplImage *maskImage, IplImage *depthMap, int
searchMode = tsTracking;
}
};

View File

@ -233,4 +233,3 @@ void CvHybridTracker::updateTrackerWithLowPassFilter(Mat) {
Rect CvHybridTracker::getTrackingWindow() {
return prev_window;
}

View File

@ -1106,4 +1106,3 @@ Mat LDA::reconstruct(InputArray src) {
}
}

View File

@ -649,4 +649,3 @@ LogPolar_Adjacent::~LogPolar_Adjacent()
}
}

View File

@ -408,4 +408,4 @@ void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
u.release();
}
} // namespace
} // namespace

Some files were not shown because too many files have changed in this diff Show More