Merge commit '43aec5ad' into merge-2.4

Conflicts:
	cmake/OpenCVConfig.cmake
	cmake/OpenCVLegacyOptions.cmake
	modules/contrib/src/retina.cpp
	modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst
	modules/gpu/doc/video.rst
	modules/gpu/src/speckle_filtering.cpp
	modules/python/src2/cv2.cv.hpp
	modules/python/test/test2.py
	samples/python/watershed.py
This commit is contained in:
Roman Donchenko 2013-08-27 13:26:44 +04:00
commit 2c4bbb313c
448 changed files with 984 additions and 1307 deletions

64
.gitattributes vendored
View File

@ -1,42 +1,58 @@
.git* export-ignore
* text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4
*.py text
*.cpp text
*.hpp text
*.cxx text
*.hxx text
*.mm text
.git* text export-ignore
*.aidl text
*.appxmanifest text
*.bib text
*.c text
*.h text
*.i text
*.js text
*.java text
*.scala text
*.cu text
*.cl text
*.conf text
*.cpp text
*.css_t text
*.cu text
*.cxx text
*.def text
*.filelist text
*.h text
*.hpp text
*.htm text
*.html text
*.hxx text
*.i text
*.idl text
*.java text
*.js text
*.mk text
*.mm text
*.plist text
*.properties text
*.py text
*.qrc text
*.qss text
*.S text
*.rst text
*.tex text
*.sbt text
*.scala text
*.sty text
*.tex text
*.txt text
*.xaml text
*.aidl text
*.mk text
# reST underlines/overlines can look like conflict markers
*.rst text conflict-marker-size=80
*.cmake text whitespace=tabwidth=2
*.cmakein text whitespace=tabwidth=2
*.in text whitespace=tabwidth=2
CMakeLists.txt text whitespace=tabwidth=2
*.png binary
*.jpeg binary
*.jpg binary
*.avi binary
*.bmp binary
*.exr binary
*.ico binary
*.jpeg binary
*.jpg binary
*.png binary
*.a binary
*.so binary
@ -47,6 +63,7 @@ CMakeLists.txt text whitespace=tabwidth=2
*.pbxproj binary
*.vec binary
*.doc binary
*.dia binary
*.xml -text whitespace=cr-at-eol
*.yml -text whitespace=cr-at-eol
@ -55,9 +72,12 @@ CMakeLists.txt text whitespace=tabwidth=2
.cproject -text whitespace=cr-at-eol merge=union
org.eclipse.jdt.core.prefs -text whitespace=cr-at-eol merge=union
*.vcproj text eol=crlf merge=union
*.bat text eol=crlf
*.cmd text eol=crlf
*.cmd.tmpl text eol=crlf
*.dsp text eol=crlf -whitespace
*.sln text eol=crlf -whitespace
*.vcproj text eol=crlf -whitespace merge=union
*.vcxproj text eol=crlf -whitespace merge=union
*.sh text eol=lf

1
3rdparty/.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
* -whitespace

View File

@ -40,5 +40,3 @@ How to update opencv_ffmpeg.dll and opencv_ffmpeg_64.dll when a new version of F
8. Then, go to <opencv>\3rdparty\ffmpeg, edit make.bat
(change paths to the actual paths to your msys32 and msys64 distributions) and then run make.bat

View File

@ -168,4 +168,3 @@
/* Support Deflate compression */
#define ZIP_SUPPORT 1

View File

@ -863,4 +863,3 @@ ocv_finalize_status()
if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}")
message(WARNING "The source directory is the same as binary directory. \"make clean\" may damage the source tree")
endif()

View File

@ -79,4 +79,3 @@ if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(opencv_haartraining PROPERTIES FOLDER "applications")
set_target_properties(opencv_haartraining_engine PROPERTIES FOLDER "applications")
endif()

View File

@ -90,4 +90,3 @@ int icvGetIdxAt( CvMat* idx, int pos )
void icvSave( const CvArr* ptr, const char* filename, int line );
#endif /* __CVCOMMON_H_ */

View File

@ -375,4 +375,3 @@ int main( int argc, char* argv[] )
return 0;
}

View File

@ -34,4 +34,3 @@ if(ENABLE_SOLUTION_FOLDERS)
endif()
install(TARGETS ${the_target} RUNTIME DESTINATION bin COMPONENT main)

View File

@ -98,4 +98,3 @@ if(NOT BUILD_WITH_DEBUG_INFO AND NOT MSVC)
string(REPLACE "/Zi" "" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
string(REPLACE "/Zi" "" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
endif()

View File

@ -23,5 +23,3 @@ FOREACH(file ${files})
MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
ENDIF(EXISTS "$ENV{DESTDIR}${file}")
ENDFOREACH(file)

View File

@ -10,4 +10,3 @@ function insertIframe (elementId, iframeSrc)
element.parentNode.replaceChild(iframe, element);
}
}

View File

@ -184,5 +184,3 @@ p = RSTParser()
for m in opencv_module_list:
print "\n\n*************************** " + m + " *************************\n"
p.check_module_docs(m)

View File

@ -39,4 +39,3 @@
#7 & #8 & #9
\end{bmatrix}
}

View File

@ -3667,4 +3667,3 @@ class YErrorBars:
output.append(LineAxis(x, start, x, end, start, end, bars, False, False, **self.attr).SVG(trans))
return output

View File

@ -277,4 +277,3 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
<div align="center">
<iframe title="File Input and Output using XML and YAML files in OpenCV" width="560" height="349" src="http://www.youtube.com/embed/A4yqVnByMMM?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

View File

@ -127,6 +127,3 @@ You may observe a runtime instance of this on the `YouTube here <https://www.you
<div align="center">
<iframe title="Interoperability with OpenCV 1" width="560" height="349" src="http://www.youtube.com/embed/qckm-zvo31w?rel=0&loop=1" frameborder="0" allowfullscreen align="middle"></iframe>
</div>

View File

@ -100,6 +100,3 @@ Result
.. image:: images/Feature_Description_BruteForce_Result.jpg
:align: center
:height: 200pt

View File

@ -201,4 +201,3 @@ Learn about how to use the feature points detectors, descriptors and matching f
../feature_flann_matcher/feature_flann_matcher
../feature_homography/feature_homography
../detection_of_planar_objects/detection_of_planar_objects

View File

@ -135,4 +135,3 @@ Here is the result:
.. image:: images/Corner_Subpixeles_Result.jpg
:align: center

View File

@ -37,4 +37,3 @@ Result
.. image:: images/My_Shi_Tomasi_corner_detector_Result.jpg
:align: center

View File

@ -118,5 +118,3 @@ Result
.. image:: images/Feature_Detection_Result_a.jpg
:align: center

View File

@ -243,5 +243,3 @@ The detected corners are surrounded by a small black circle
.. image:: images/Harris_Detector_Result.jpg
:align: center

View File

@ -10,4 +10,3 @@ These tutorials are the bottom of the iceberg as they link together multiple of
.. raw:: latex
\pagebreak

View File

@ -329,4 +329,3 @@ Result
.. image:: images/Histogram_Calculation_Result.jpg
:align: center

View File

@ -369,4 +369,3 @@ Results
.. image:: images/Template_Matching_Image_Result.jpg
:align: center

View File

@ -282,6 +282,3 @@ Result
:align: center
* Notice how the image is superposed to the black background on the edge regions.

View File

@ -290,4 +290,3 @@ We get the following result by using the Probabilistic Hough Line Transform:
:align: center
You may observe that the number of lines detected vary while you change the *threshold*. The explanation is sort of evident: If you establish a higher threshold, fewer lines will be detected (since you will need more points to declare a line detected).

View File

@ -311,4 +311,3 @@ Result
:alt: Result 0 for remapping
:width: 250pt
:align: center

View File

@ -306,4 +306,3 @@ Result
:alt: Original image
:width: 250pt
:align: center

View File

@ -279,4 +279,3 @@ Results
.. image:: images/Morphology_2_Tutorial_Cover.jpg
:alt: Morphology 2: Result sample
:align: center

View File

@ -259,5 +259,3 @@ Results
.. image:: images/Pyramids_Tutorial_PyrUp_Result.jpg
:alt: Pyramids: PyrUp Result
:align: center

View File

@ -121,4 +121,3 @@ Result
.. |BRC_1| image:: images/Bounding_Rects_Circles_Result.jpg
:align: middle

View File

@ -123,4 +123,3 @@ Result
.. |BRE_1| image:: images/Bounding_Rotated_Ellipses_Result.jpg
:align: middle

View File

@ -104,4 +104,3 @@ Result
.. |contour_1| image:: images/Find_Contours_Result.jpg
:align: middle

View File

@ -113,4 +113,3 @@ Result
.. |Hull_1| image:: images/Hull_Result.jpg
:align: middle

View File

@ -133,4 +133,3 @@ Result
.. |MU_2| image:: images/Moments_Result2.jpg
:width: 250pt
:align: middle

View File

@ -114,4 +114,3 @@ Result
.. |PPT_1| image:: images/Point_Polygon_Test_Result.jpg
:align: middle

View File

@ -539,6 +539,3 @@ In this section you will learn about the image processing (manipulation) functio
../shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses
../shapedescriptors/moments/moments
../shapedescriptors/point_polygon_test/point_polygon_test

View File

@ -245,6 +245,3 @@ Say you have or create a new file, *helloworld.cpp* in a directory called *foo*:
a. You can also optionally modify the ``Build command:`` from ``make`` to something like ``make VERBOSE=1 -j4`` which tells the compiler to produce detailed symbol files for debugging and also to compile in 4 parallel threads.
#. Done!

View File

@ -81,3 +81,4 @@ Building OpenCV from Source Using CMake, Using the Command Line
If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter.
If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter.

View File

@ -130,4 +130,3 @@ Result
.. image:: images/Cascade_Classifier_Tutorial_Result_LBP.jpg
:align: center
:height: 300pt

View File

@ -5,5 +5,3 @@ install(FILES ${old_hdrs}
install(FILES "opencv2/opencv.hpp"
DESTINATION ${OPENCV_INCLUDE_INSTALL_PATH}/opencv2
COMPONENT main)

View File

@ -73,4 +73,3 @@
#endif //CV_IMPL
#endif // __OPENCV_OLD_CV_H_

View File

@ -46,4 +46,3 @@
#include "opencv2/core/eigen.hpp"
#endif

View File

@ -14,4 +14,3 @@ double getCameraPropertyC(void* camera, int propIdx);
void setCameraPropertyC(void* camera, int propIdx, double value);
void applyCameraPropertiesC(void** camera);
}

View File

@ -6,4 +6,3 @@ calib3d. Camera Calibration and 3D Reconstruction
:maxdepth: 2
camera_calibration_and_3d_reconstruction

View File

@ -621,4 +621,3 @@ void epnp::qr_solve(CvMat * A, CvMat * b, CvMat * X)
pX[i] = (pb[i] - sum) / A2[i];
}
}

View File

@ -411,4 +411,3 @@ bool p3p::jacobi_4x4(double * A, double * D, double * U)
return false;
}

View File

@ -59,4 +59,3 @@ class p3p
};
#endif // P3P_H

View File

@ -348,4 +348,3 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
}
return;
}

View File

@ -195,4 +195,3 @@ void CV_Affine3D_EstTest::run( int /* start_from */)
}
TEST(Calib3d_EstimateAffineTransform, accuracy) { CV_Affine3D_EstTest test; test.safe_run(); }

View File

@ -735,5 +735,3 @@ protected:
TEST(Calib3d_CalibrateCamera_C, badarg) { CV_CameraCalibrationBadArgTest test; test.safe_run(); }
TEST(Calib3d_Rodrigues_C, badarg) { CV_Rodrigues2BadArgTest test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_C, badarg) { CV_ProjectPoints2BadArgTest test; test.safe_run(); }

View File

@ -329,4 +329,3 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
return generateChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2,
squareSize.width, squareSize.height, pts3d, corners);
}

View File

@ -212,4 +212,3 @@ protected:
};
TEST(Calib3d_ComposeRT, accuracy) { CV_composeRT_Test test; test.safe_run(); }

View File

@ -21,4 +21,3 @@ namespace cvtest
}
#endif

View File

@ -173,4 +173,3 @@ protected:
};
TEST(Calib3d_ReprojectImageTo3D, accuracy) { CV_ReprojectImageTo3DTest test; test.safe_run(); }

View File

@ -626,5 +626,3 @@ CSV for the AT&T Facedatabase
.. literalinclude:: etc/at.txt
:language: none
:linenos:

View File

@ -30,4 +30,3 @@ Indices and tables
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -23,4 +23,3 @@ target_link_libraries(facerec_fisherfaces opencv_contrib opencv_core opencv_imgp
add_executable(facerec_lbph facerec_lbph.cpp)
target_link_libraries(facerec_lbph opencv_contrib opencv_core opencv_imgproc opencv_highgui)

View File

@ -231,5 +231,3 @@ Here are some examples:
+---------------------------------+----------------------------------------------------------------------------+
| 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg |
+---------------------------------+----------------------------------------------------------------------------+

View File

@ -44,4 +44,3 @@ And here is the Reconstruction, which is the same as the original:
.. image:: ../img/eigenface_reconstruction_opencv.png
:align: center

View File

@ -205,5 +205,3 @@ Here are some examples:
+---------------------------------+----------------------------------------------------------------------------+
| 0.2 (20%), 0.2 (20%), (70,70) | .. image:: ../img/tutorial/gender_classification/arnie_20_20_70_70.jpg |
+---------------------------------+----------------------------------------------------------------------------+

View File

@ -113,5 +113,3 @@ The method executes the variational algorithm on a rectified stereo pair. See ``
**Note**:
The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously.

View File

@ -286,5 +286,3 @@ void CvAdaptiveSkinDetector::Histogram::mergeWith(CvAdaptiveSkinDetector::Histog
}
}
};

View File

@ -136,4 +136,3 @@ Mat BOWMSCTrainer::cluster(const Mat& _descriptors) const {
}
}

View File

@ -287,4 +287,3 @@ bool ChowLiuTree::reduceEdgesToMinSpan(std::list<info>& edges) {
}
}

View File

@ -132,5 +132,3 @@ Point2f CvMeanShiftTracker::getTrackingCenter()
{
return prev_center;
}

View File

@ -41,4 +41,3 @@
//M*/
#include "precomp.hpp"

View File

@ -892,4 +892,3 @@ const cv::DetectionBasedTracker::Parameters& DetectionBasedTracker::getParameter
}
#endif

View File

@ -221,4 +221,3 @@ Point2f CvFeatureTracker::getTrackingCenter()
center.y = (float)(prev_center.y + prev_trackwindow.height/2.0);
return center;
}

View File

@ -721,4 +721,3 @@ void CvFuzzyMeanShiftTracker::track(IplImage *maskImage, IplImage *depthMap, int
searchMode = tsTracking;
}
};

View File

@ -233,4 +233,3 @@ void CvHybridTracker::updateTrackerWithLowPassFilter(Mat) {
Rect CvHybridTracker::getTrackingWindow() {
return prev_window;
}

View File

@ -1106,4 +1106,3 @@ Mat LDA::reconstruct(InputArray src) {
}
}

View File

@ -649,4 +649,3 @@ LogPolar_Adjacent::~LogPolar_Adjacent()
}
}

View File

@ -14,4 +14,3 @@
#include <iostream>
#endif

View File

@ -2941,4 +2941,3 @@ The above methods are usually enough for users. If you want to make your own alg
* Add public virtual method ``AlgorithmInfo* info() const;`` to your class.
* Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take http://code.opencv.org/projects/opencv/repository/revisions/master/entry/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters.
* Add some public function (e.g. ``initModule_<mymodule>()``) that calls info() of your algorithm and put it into the same source file as ``info()`` implementation. This is to force C++ linker to include this object file into the target application. See ``Algorithm::create()`` for details.

View File

@ -16,4 +16,3 @@ core. The Core Functionality
clustering
utility_and_system_functions_and_macros
opengl_interop

View File

@ -1580,4 +1580,3 @@ Gathers all node pointers to a single sequence.
:param storage: Container for the sequence
The function puts pointers of all nodes reachable from ``first`` into a single sequence. The pointers are written sequentially in the depth-first order.

View File

@ -1677,4 +1677,3 @@ For example, `NumPy <http://numpy.scipy.org/>`_ arrays support the array interfa
(480, 640, 3) 1
.. note:: In the new Python wrappers (**cv2** module) the function is not needed, since cv2 can process Numpy arrays (and this is the only supported array type).

View File

@ -24,4 +24,3 @@ PERF_TEST_P(Size_MatType, abs, TYPICAL_MATS_ABS)
SANITY_CHECK(c);
}

View File

@ -73,4 +73,3 @@ PERF_TEST_P(Size_MatType, bitwise_xor, TYPICAL_MATS_BITW_ARITHM)
SANITY_CHECK(c);
}

View File

@ -2118,5 +2118,3 @@ TEST(Core_DS_Seq, sort_invert) { Core_SeqSortInvTest test; test.safe_run(); }
TEST(Core_DS_Set, basic_operations) { Core_SetTest test; test.safe_run(); }
TEST(Core_DS_Graph, basic_operations) { Core_GraphTest test; test.safe_run(); }
TEST(Core_DS_Graph, scan) { Core_GraphScanTest test; test.safe_run(); }

View File

@ -866,5 +866,3 @@ protected:
};
TEST(Core_DFT, complex_output) { Core_DFTComplexOutputTest test; test.safe_run(); }

View File

@ -2755,4 +2755,3 @@ TEST(CovariationMatrixVectorOfMatWithMean, accuracy)
}
/* End of file. */

View File

@ -1,3 +1,2 @@
set(the_description "2D Features Framework")
ocv_define_module(features2d opencv_imgproc opencv_flann OPTIONAL opencv_highgui)

View File

@ -274,5 +274,3 @@ Example: ::
VectorDescriptorMatcher matcher( new SurfDescriptorExtractor,
new BruteForceMatcher<L2<float> > );

View File

@ -41,4 +41,3 @@ PERF_TEST_P(fast, detect, testing::Combine(
SANITY_CHECK_KEYPOINTS(points);
}

View File

@ -357,4 +357,3 @@ int cornerScore<8>(const uchar* ptr, const int pixel[], int threshold)
}
} // namespace cv

View File

@ -93,4 +93,3 @@ void CV_BRISKTest::run( int )
}
TEST(Features2d_BRISK, regression) { CV_BRISKTest test; test.safe_run(); }

View File

@ -135,4 +135,3 @@ void CV_FastTest::run( int )
}
TEST(Features2d_FAST, regression) { CV_FastTest test; test.safe_run(); }

View File

@ -166,5 +166,3 @@ TEST(Features2d_Detector_Keypoints_Dense, validation)
CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.Dense"));
test.safe_run();
}

View File

@ -205,4 +205,3 @@ void CV_MserTest::run(int)
}
TEST(Features2d_MSER, DISABLED_regression) { CV_MserTest test; test.safe_run(); }

View File

@ -1,3 +1,2 @@
set(the_description "Clustering and Search in Multi-Dimensional Spaces")
ocv_define_module(flann opencv_core)

View File

@ -131,5 +131,3 @@ public:
}
#endif //OPENCV_FLANN_RANDOM_H

View File

@ -540,4 +540,3 @@ private:
}
#endif //OPENCV_FLANN_RESULTSET_H

View File

@ -309,4 +309,3 @@ Class that enables getting ``cudaStream_t`` from :ocv:class:`gpu::Stream` and is
{
CV_EXPORTS static cudaStream_t getStream(const Stream& stream);
};

View File

@ -60,4 +60,3 @@ With this algorithm, a dual GPU gave a 180
%
performance increase comparing to the single Fermi GPU. For a source code example, see
http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/gpu/.

View File

@ -257,4 +257,3 @@ if __name__ == "__main__":
outputFile = open(sys.argv[2], 'w')
outputFile.writelines(lines)
outputFile.close()

View File

@ -290,5 +290,3 @@ void cv::gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& cam
}
#endif

Some files were not shown because too many files have changed in this diff Show More