Merge pull request #1914 from SpecLad:merge-2.4

This commit is contained in:
Roman Donchenko 2013-12-04 16:30:55 +04:00 committed by OpenCV Buildbot
commit 0736ede7e5
125 changed files with 3283 additions and 304 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -116,10 +116,10 @@ endif()
OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O" ON IF IOS)
OCV_OPTION(WITH_CARBON "Use Carbon for UI instead of Cocoa" OFF IF APPLE )
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OFF IF (NOT ANDROID AND NOT IOS AND NOT APPLE) )
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT IOS) )
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT IOS) )
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" OFF IF (NOT IOS) )
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" OFF IF (NOT IOS AND NOT APPLE) )
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" ON)
OCV_OPTION(WITH_VFW "Include Video for Windows support" ON IF WIN32 )
OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS))

View File

@ -14,6 +14,19 @@ using cv::FileNodeIterator;
using cv::ParallelLoopBody;
using cv::Size;
using cv::Mat;
using cv::Point;
using cv::FileStorage;
using cv::Rect;
using cv::Ptr;
using cv::FileNode;
using cv::Mat_;
using cv::Range;
using cv::FileNodeIterator;
using cv::ParallelLoopBody;
#include "boost.h"
#include "cascadeclassifier.h"
#include <queue>

View File

@ -137,6 +137,9 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
const CvCascadeBoostParams& _stageParams,
bool baseFormatSave )
{
// Start recording clock ticks for training time output
const clock_t begin_time = clock();
if( _cascadeDirName.empty() || _posFilename.empty() || _negFilename.empty() )
CV_Error( CV_StsBadArg, "_cascadeDirName or _bgfileName or _vecFileName is NULL" );
@ -247,6 +250,14 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
fs << FileStorage::getDefaultObjectName(stageFilename) << "{";
tempStage->write( fs, Mat() );
fs << "}";
// Output training time up till now
float seconds = float( clock () - begin_time ) / CLOCKS_PER_SEC;
int days = int(seconds) / 60 / 60 / 24;
int hours = (int(seconds) / 60 / 60) % 24;
int minutes = (int(seconds) / 60) % 60;
int seconds_left = int(seconds) % 60;
cout << "Training until now has taken " << days << " days " << hours << " hours " << minutes << " minutes " << seconds_left <<" seconds." << endl;
}
if(stageClassifiers.size() == 0)
@ -310,6 +321,7 @@ int CvCascadeClassifier::fillPassedSamples( int first, int count, bool isPositiv
if( predict( i ) == 1.0F )
{
getcount++;
printf("%s current samples: %d\r", isPositive ? "POS":"NEG", getcount);
break;
}
}

View File

@ -759,11 +759,15 @@ endif()
set(CUDA_VERSION_STRING "${CUDA_VERSION}")
# Support for arm cross compilation with CUDA 5.5
if(CUDA_VERSION VERSION_GREATER "5.0" AND CMAKE_CROSSCOMPILING AND ${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" AND EXISTS "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-gnueabihf")
set(CUDA_TOOLKIT_TARGET_DIR "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-gnueabihf" CACHE PATH "Toolkit target location.")
else()
set(CUDA_TOOLKIT_TARGET_DIR "${CUDA_TOOLKIT_ROOT_DIR}" CACHE PATH "Toolkit target location.")
set(__cuda_toolkit_target_dir_initial "${CUDA_TOOLKIT_ROOT_DIR}")
if(CUDA_VERSION VERSION_GREATER "5.0" AND CMAKE_CROSSCOMPILING AND ${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm")
if(ANDROID AND EXISTS "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-androideabi")
set(__cuda_toolkit_target_dir_initial "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-androideabi")
elseif(EXISTS "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-gnueabihf")
set(__cuda_toolkit_target_dir_initial "${CUDA_TOOLKIT_ROOT_DIR}/targets/armv7-linux-gnueabihf")
endif()
endif()
set(CUDA_TOOLKIT_TARGET_DIR "${__cuda_toolkit_target_dir_initial}" CACHE PATH "Toolkit target location.")
mark_as_advanced(CUDA_TOOLKIT_TARGET_DIR)
# Target CPU architecture

View File

@ -181,9 +181,6 @@ if(CMAKE_COMPILER_IS_GNUCXX)
set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} -DNDEBUG")
set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -O0 -DDEBUG -D_DEBUG")
if(BUILD_WITH_DEBUG_INFO)
set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -ggdb3")
endif()
endif()
if(MSVC)

View File

@ -88,12 +88,17 @@ if(CUDA_FOUND)
endif()
if(NOT DEFINED __cuda_arch_bin)
if(${CUDA_VERSION} VERSION_LESS "5.0")
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0")
if(ANDROID)
set(__cuda_arch_bin "3.2")
set(__cuda_arch_ptx "")
else()
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0 3.5")
if(${CUDA_VERSION} VERSION_LESS "5.0")
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0")
else()
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0 3.5")
endif()
set(__cuda_arch_ptx "3.0")
endif()
set(__cuda_arch_ptx "3.0")
endif()
set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
@ -145,6 +150,11 @@ if(CUDA_FOUND)
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} ${NVCC_FLAGS_EXTRA})
set(OpenCV_CUDA_CC "${NVCC_FLAGS_EXTRA}")
if(ANDROID)
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xptxas;-dlcm=ca")
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-target-os-variant=Android")
endif()
message(STATUS "CUDA NVCC target flags: ${CUDA_NVCC_FLAGS}")
OCV_OPTION(CUDA_FAST_MATH "Enable --use_fast_math for CUDA compiler " OFF)
@ -171,6 +181,14 @@ if(CUDA_FOUND)
# we remove -fvisibility-inlines-hidden because it's used for C++ compiler
# but NVCC uses C compiler by default
string(REPLACE "-fvisibility-inlines-hidden" "" ${var} "${${var}}")
# we remove -Wno-delete-non-virtual-dtor because it's used for C++ compiler
# but NVCC uses C compiler by default
string(REPLACE "-Wno-delete-non-virtual-dtor" "" ${var} "${${var}}")
# we remove -frtti because it's used for C++ compiler
# but NVCC uses C compiler by default
string(REPLACE "-frtti" "" ${var} "${${var}}")
endforeach()
if(BUILD_SHARED_LIBS)

View File

@ -437,7 +437,7 @@ Example. Decimate image by factor of $\sqrt{2}$:\\
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#watershed}{watershed()}},
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html\#grabcut}{grabCut()}}
& marker-based image segmentation algorithms.
See the samples \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/watershed.cpp}{watershed.cpp}} and \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/grabcut.cpp}{grabcut.cpp}}.
See the samples \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/watershed.cpp}{watershed.cpp}} and \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/grabcut.cpp}{grabcut.cpp}}.
\end{tabular}
@ -465,7 +465,7 @@ Example. Compute Hue-Saturation histogram of an image:\\
\end{tabbing}
\subsection{Contours}
See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/contours2.cpp}{contours2.cpp}} and \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/squares.cpp}{squares.cpp}}
See \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/contours2.cpp}{contours2.cpp}} and \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/squares.cpp}{squares.cpp}}
samples on what are the contours and how to use them.
\section{Data I/O}
@ -559,7 +559,7 @@ samples on what are the contours and how to use them.
\end{tabular}
See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/camshiftdemo.cpp}{camshiftdemo.cpp}} and other \href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/}{OpenCV samples} on how to use the GUI functions.
See \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/camshiftdemo.cpp}{camshiftdemo.cpp}} and other \href{https://github.com/Itseez/opencv/tree/master/samples/}{OpenCV samples} on how to use the GUI functions.
\section{Camera Calibration, Pose Estimation and Depth Estimation}
@ -586,10 +586,10 @@ See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/ma
\end{tabular}
To calibrate a camera, you can use \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/calibration.cpp}{calibration.cpp}} or
\texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/stereo\_calib.cpp}{stereo\_calib.cpp}} samples.
To calibrate a camera, you can use \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/calibration.cpp}{calibration.cpp}} or
\texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/stereo\_calib.cpp}{stereo\_calib.cpp}} samples.
To get the disparity maps and the point clouds, use
\texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/stereo\_match.cpp}{stereo\_match.cpp}} sample.
\texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/stereo\_match.cpp}{stereo\_match.cpp}} sample.
\section{Object Detection}
@ -597,9 +597,9 @@ To get the disparity maps and the point clouds, use
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://docs.opencv.org/modules/imgproc/doc/object_detection.html\#matchtemplate}{matchTemplate}} & Compute proximity map for given template.\\
\texttt{\href{http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html\#cascadeclassifier}{CascadeClassifier}} & Viola's Cascade of Boosted classifiers using Haar or LBP features. Suits for detecting faces, facial features and some other objects without diverse textures. See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/c/facedetect.cpp}{facedetect.cpp}}\\
\texttt{\href{http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html\#cascadeclassifier}{CascadeClassifier}} & Viola's Cascade of Boosted classifiers using Haar or LBP features. Suits for detecting faces, facial features and some other objects without diverse textures. See \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/c/facedetect.cpp}{facedetect.cpp}}\\
\texttt{{HOGDescriptor}} & N. Dalal's object detector using Histogram-of-Oriented-Gradients (HOG) features. Suits for detecting people, cars and other objects with well-defined silhouettes. See \texttt{\href{http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/peopledetect.cpp}{peopledetect.cpp}}\\
\texttt{{HOGDescriptor}} & N. Dalal's object detector using Histogram-of-Oriented-Gradients (HOG) features. Suits for detecting people, cars and other objects with well-defined silhouettes. See \texttt{\href{https://github.com/Itseez/opencv/tree/master/samples/cpp/peopledetect.cpp}{peopledetect.cpp}}\\
\end{tabular}

View File

@ -56,7 +56,7 @@ Scalar
Code
=====
* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp>`_
* This code is in your OpenCV sample folder. Otherwise you can grab it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp>`_
Explanation
=============

View File

@ -23,7 +23,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
.. code-block:: cpp

View File

@ -19,97 +19,10 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
.. code-block:: cpp
#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/nonfree.hpp"
using namespace cv;
void readme();
/** @function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= 2*min_dist )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
/** @function readme */
void readme()
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
.. literalinclude:: ../../../../samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp
:language: cpp
Explanation
============

View File

@ -19,7 +19,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp>`_
.. code-block:: cpp

View File

@ -20,7 +20,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp>`_
.. literalinclude:: ../../../../../samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
:language: cpp

View File

@ -18,7 +18,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp>`_
.. code-block:: cpp

View File

@ -151,7 +151,7 @@ How does it work?
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp>`_
.. code-block:: cpp

View File

@ -70,7 +70,7 @@ Erosion
Code
======
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp>`_
.. code-block:: cpp

View File

@ -115,7 +115,7 @@ Code
* Applies 4 different kinds of filters (explained in Theory) and show the filtered images sequentially
* **Downloadable code**:
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp>`_
Click `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp>`_
* **Code at glance:**

View File

@ -99,9 +99,9 @@ Code
* **Downloadable code**:
a. Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp>`_ for the basic version (explained in this tutorial).
b. For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the skin area) you can check the `improved demo <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp>`_
c. ...or you can always check out the classical `camshiftdemo <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/camshiftdemo.cpp>`_ in samples.
a. Click `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp>`_ for the basic version (explained in this tutorial).
b. For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the skin area) you can check the `improved demo <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp>`_
c. ...or you can always check out the classical `camshiftdemo <https://github.com/Itseez/opencv/tree/master/samples/cpp/camshiftdemo.cpp>`_ in samples.
* **Code at glance:**

View File

@ -82,7 +82,7 @@ Code
* Plot the three histograms in a window
* **Downloadable code**:
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp>`_
Click `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp>`_
* **Code at glance:**

View File

@ -80,7 +80,7 @@ Code
* Display the numerical matching parameters obtained.
* **Downloadable code**:
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp>`_
Click `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp>`_
* **Code at glance:**

View File

@ -83,7 +83,7 @@ Code
* Display the source and equalized images in a window.
* **Downloadable code**:
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp>`_
Click `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp>`_
* **Code at glance:**

View File

@ -125,7 +125,7 @@ Code
* Draw a rectangle around the area corresponding to the highest match
* **Downloadable code**:
Click `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp>`_
Click `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp>`_
* **Code at glance:**

View File

@ -86,7 +86,7 @@ Code
* Applies the *Canny Detector* and generates a **mask** (bright lines representing the edges on a black background).
* Applies the mask obtained on the original image and display it in a window.
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp>`_
.. code-block:: cpp

View File

@ -47,7 +47,7 @@ Code
The user chooses either option by pressing 'c' (constant) or 'r' (replicate)
* The program finishes when the user presses 'ESC'
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp>`_
.. code-block:: cpp

View File

@ -72,7 +72,7 @@ Code
* The filter output (with each kernel) will be shown during 500 milliseconds
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp>`_
.. code-block:: cpp

View File

@ -40,9 +40,9 @@ Code
* Display the detected circle in a window.
.. |TutorialHoughCirclesSimpleDownload| replace:: here
.. _TutorialHoughCirclesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghcircles.cpp
.. _TutorialHoughCirclesSimpleDownload: https://github.com/Itseez/opencv/tree/master/samples/cpp/houghcircles.cpp
.. |TutorialHoughCirclesFancyDownload| replace:: here
.. _TutorialHoughCirclesFancyDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
.. _TutorialHoughCirclesFancyDownload: https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
#. The sample code that we will explain can be downloaded from |TutorialHoughCirclesSimpleDownload|_. A slightly fancier version (which shows both Hough standard and probabilistic with trackbars for changing the threshold values) can be found |TutorialHoughCirclesFancyDownload|_.

View File

@ -89,9 +89,9 @@ Code
======
.. |TutorialHoughLinesSimpleDownload| replace:: here
.. _TutorialHoughLinesSimpleDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/houghlines.cpp
.. _TutorialHoughLinesSimpleDownload: https://github.com/Itseez/opencv/tree/master/samples/cpp/houghlines.cpp
.. |TutorialHoughLinesFancyDownload| replace:: here
.. _TutorialHoughLinesFancyDownload: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp
.. _TutorialHoughLinesFancyDownload: https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp
#. **What does this program do?**

View File

@ -55,7 +55,7 @@ Code
* Applies a Laplacian operator to the grayscale image and stores the output image
* Display the result in a window
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp>`_
.. code-block:: cpp

View File

@ -59,7 +59,7 @@ Code
* Each second, apply 1 of 4 different remapping processes to the image and display them indefinitely in a window.
* Wait for the user to exit the program
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp>`_
.. code-block:: cpp

View File

@ -121,7 +121,7 @@ Code
* Applies the *Sobel Operator* and generates as output an image with the detected *edges* bright on a darker background.
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp>`_
.. code-block:: cpp

View File

@ -71,7 +71,7 @@ How do we get an Affine Transformation?
a. We know both :math:`X` and `T` and we also know that they are related. Then our job is to find :math:`M`
b. We know :math:`M` and :math:'X`. To obtain :math:`T` we only need to apply :math:`T = M \cdot X`. Our information for :math:`M` may be explicit (i.e. have the 2-by-3 matrix) or it can come as a geometric relation between points.
b. We know :math:`M` and :math:`X`. To obtain :math:`T` we only need to apply :math:`T = M \cdot X`. Our information for :math:`M` may be explicit (i.e. have the 2-by-3 matrix) or it can come as a geometric relation between points.
2. Let's explain a little bit better (b). Since :math:`M` relates 02 images, we can analyze the simplest case in which it relates three points in both images. Look at the figure below:
@ -93,7 +93,7 @@ Code
* Applies a Rotation to the image after being transformed. This rotation is with respect to the image center
* Waits until the user exits the program
#. The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp>`_
#. The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp>`_
.. code-block:: cpp

View File

@ -111,7 +111,7 @@ Black Hat
Code
======
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp>`_
.. code-block:: cpp

View File

@ -80,7 +80,7 @@ Gaussian Pyramid
Code
======
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp>`_
.. code-block:: cpp

View File

@ -21,7 +21,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp>`_
.. code-block:: cpp

View File

@ -21,7 +21,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp>`_
.. code-block:: cpp

View File

@ -19,7 +19,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp>`_
.. code-block:: cpp

View File

@ -19,7 +19,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp>`_
.. code-block:: cpp

View File

@ -21,7 +21,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp>`_
.. code-block:: cpp

View File

@ -19,7 +19,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp>`_
.. code-block:: cpp

View File

@ -130,7 +130,7 @@ Threshold to Zero, Inverted
Code
======
The tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/ImgProc/Threshold.cpp>`_
The tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Threshold.cpp>`_
.. code-block:: cpp

View File

@ -17,7 +17,7 @@ In this tutorial you will learn how to:
Source Code
===========
Download the source code from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp>`_.
Download the source code from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp>`_.
.. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
:language: cpp

View File

@ -22,7 +22,7 @@ Theory
Code
====
This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp>`_ . The second version (using LBP for face detection) can be `found here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp>`_
This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp>`_ . The second version (using LBP for face detection) can be `found here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp>`_
.. code-block:: cpp

View File

@ -130,4 +130,4 @@ Flags specifing the needed generator type must be used in combination with parti
For more information please refer to the example of usage openni_capture.cpp_ in ``opencv/samples/cpp`` folder.
.. _openni_capture.cpp: http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/openni_capture.cpp
.. _openni_capture.cpp: https://github.com/Itseez/opencv/tree/master/samples/cpp/openni_capture.cpp

View File

@ -686,7 +686,7 @@ void CameraHandler::closeCameraConnect()
camera->stopPreview();
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) \
|| defined(ANDROID_r4_3_0) || defined(ANDROID_r4_3_0)
|| defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
camera->setPreviewCallbackFlags(CAMERA_FRAME_CALLBACK_FLAG_NOOP);
#endif
camera->disconnect();

View File

@ -70,7 +70,7 @@ int main(int argc, const char *argv[]) {
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
exit(1);
}
string output_folder;
string output_folder = ".";
if (argc == 3) {
output_folder = string(argv[2]);
}

View File

@ -70,7 +70,7 @@ int main(int argc, const char *argv[]) {
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
exit(1);
}
string output_folder;
string output_folder = ".";
if (argc == 3) {
output_folder = string(argv[2]);
}

View File

@ -70,7 +70,7 @@ int main(int argc, const char *argv[]) {
cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
exit(1);
}
string output_folder;
string output_folder = ".";
if (argc == 3) {
output_folder = string(argv[2]);
}

View File

@ -602,8 +602,8 @@ inline void elbp_(InputArray _src, OutputArray _dst, int radius, int neighbors)
dst.setTo(0);
for(int n=0; n<neighbors; n++) {
// sample points
float x = static_cast<float>(-radius * sin(2.0*CV_PI*n/static_cast<float>(neighbors)));
float y = static_cast<float>(radius * cos(2.0*CV_PI*n/static_cast<float>(neighbors)));
float x = static_cast<float>(radius * cos(2.0*CV_PI*n/static_cast<float>(neighbors)));
float y = static_cast<float>(-radius * sin(2.0*CV_PI*n/static_cast<float>(neighbors)));
// relative indices
int fx = static_cast<int>(floor(x));
int fy = static_cast<int>(floor(y));

View File

@ -3122,5 +3122,5 @@ The above methods are usually enough for users. If you want to make your own alg
* Make a class and specify ``Algorithm`` as its base class.
* The algorithm parameters should be the class members. See ``Algorithm::get()`` for the list of possible types of the parameters.
* Add public virtual method ``AlgorithmInfo* info() const;`` to your class.
* Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take http://code.opencv.org/projects/opencv/repository/revisions/master/entry/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters.
* Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take https://github.com/Itseez/opencv/tree/master/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters.
* Add some public function (e.g. ``initModule_<mymodule>()``) that calls info() of your algorithm and put it into the same source file as ``info()`` implementation. This is to force C++ linker to include this object file into the target application. See ``Algorithm::create()`` for details.

View File

@ -211,16 +211,15 @@ CommandLineParser::CommandLineParser(int argc, const char* const argv[], const S
}
impl->apply_params(k_v[0], k_v[1]);
}
else if (s.length() > 2 && s[0] == '-' && s[1] == '-')
{
impl->apply_params(s.substr(2), "true");
}
else if (s.length() > 1 && s[0] == '-')
{
for (int h = 0; h < 2; h++)
{
if (s[0] == '-')
s = s.substr(1, s.length() - 1);
}
impl->apply_params(s, "true");
impl->apply_params(s.substr(1), "true");
}
else if (s[0] != '-')
else
{
impl->apply_params(jj, s);
jj++;

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cuda)
endif()

View File

@ -58,4 +58,4 @@ While developing algorithms for multiple GPUs, note a data passing overhead. For
3. Merge the results into a single disparity map.
With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU. For a source code example, see http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/gpu/.
With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU. For a source code example, see https://github.com/Itseez/opencv/tree/master/samples/gpu/.

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudaarithm)
endif()

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudabgsegm)
endif()

View File

@ -89,6 +89,8 @@ DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, FGDStatModel,
Values(string("gpu/video/768x576.avi")))
{
const int numIters = 10;
declare.time(60);
const string inputFile = perf::TestBase::getDataPath(GetParam());
@ -107,18 +109,36 @@ PERF_TEST_P(Video, FGDStatModel,
cv::Ptr<cv::cuda::BackgroundSubtractorFGD> d_fgd = cv::cuda::createBackgroundSubtractorFGD();
d_fgd->apply(d_frame, foreground);
for (int i = 0; i < 10; ++i)
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer(); next();
startTimer();
if(!next())
break;
d_fgd->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
d_fgd->apply(d_frame, foreground);
}
CUDA_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
#ifdef HAVE_OPENCV_CUDAIMGPROC
@ -134,18 +154,36 @@ PERF_TEST_P(Video, FGDStatModel,
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
for (int i = 0; i < 10; ++i)
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
startTimer(); next();
startTimer();
if(!next())
break;
cvUpdateBGStatModel(&ipl_frame, model);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
cvUpdateBGStatModel(&ipl_frame, model);
}
const cv::Mat background = cv::cvarrToMat(model->background);
const cv::Mat foreground = cv::cvarrToMat(model->foreground);
@ -171,6 +209,8 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
CUDA_CHANNELS_1_3_4,
Values(0.0, 0.01)))
{
const int numIters = 10;
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const float learningRate = static_cast<float>(GET_PARAM(2));
@ -202,7 +242,10 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
d_mog->apply(d_frame, foreground, learningRate);
for (int i = 0; i < 10; ++i)
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
@ -219,21 +262,17 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
d_frame.upload(frame);
startTimer(); next();
startTimer();
if(!next())
break;
d_mog->apply(d_frame, foreground, learningRate);
stopTimer();
}
CUDA_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground;
mog->apply(frame, foreground, learningRate);
for (int i = 0; i < 10; ++i)
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
@ -248,11 +287,66 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
cv::swap(temp, frame);
}
startTimer(); next();
d_frame.upload(frame);
d_mog->apply(d_frame, foreground, learningRate);
}
CUDA_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground;
mog->apply(frame, foreground, learningRate);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer();
if(!next())
break;
mog->apply(frame, foreground, learningRate);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
mog->apply(frame, foreground, learningRate);
}
CPU_SANITY_CHECK(foreground);
}
}
@ -266,10 +360,12 @@ PERF_TEST_P(Video_Cn_LearningRate, MOG,
DEF_PARAM_TEST(Video_Cn, string, int);
PERF_TEST_P(Video_Cn, MOG2,
PERF_TEST_P(Video_Cn, DISABLED_MOG2,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
CUDA_CHANNELS_1_3_4))
{
const int numIters = 10;
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
@ -301,7 +397,10 @@ PERF_TEST_P(Video_Cn, MOG2,
d_mog2->apply(d_frame, foreground);
for (int i = 0; i < 10; ++i)
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
@ -318,23 +417,17 @@ PERF_TEST_P(Video_Cn, MOG2,
d_frame.upload(frame);
startTimer(); next();
startTimer();
if(!next())
break;
d_mog2->apply(d_frame, foreground);
stopTimer();
}
CUDA_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::createBackgroundSubtractorMOG2();
mog2->setDetectShadows(false);
cv::Mat foreground;
mog2->apply(frame, foreground);
for (int i = 0; i < 10; ++i)
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
@ -349,11 +442,68 @@ PERF_TEST_P(Video_Cn, MOG2,
cv::swap(temp, frame);
}
startTimer(); next();
d_frame.upload(frame);
d_mog2->apply(d_frame, foreground);
}
CUDA_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2 = cv::createBackgroundSubtractorMOG2();
mog2->setDetectShadows(false);
cv::Mat foreground;
mog2->apply(frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer();
if(!next())
break;
mog2->apply(frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
mog2->apply(frame, foreground);
}
CPU_SANITY_CHECK(foreground);
}
}
@ -455,6 +605,8 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
CUDA_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const int numIters = 150;
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const int maxFeatures = GET_PARAM(2);
@ -486,7 +638,10 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
d_gmg->apply(d_frame, foreground);
for (int i = 0; i < 150; ++i)
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
@ -508,24 +663,17 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
d_frame.upload(frame);
startTimer(); next();
startTimer();
if(!next())
break;
d_gmg->apply(d_frame, foreground);
stopTimer();
}
CUDA_SANITY_CHECK(foreground);
}
else
{
cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::createBackgroundSubtractorGMG();
gmg->setMaxFeatures(maxFeatures);
gmg->apply(frame, foreground);
for (int i = 0; i < 150; ++i)
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
@ -545,11 +693,79 @@ PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
cv::swap(temp, frame);
}
startTimer(); next();
d_frame.upload(frame);
d_gmg->apply(d_frame, foreground);
}
CUDA_SANITY_CHECK(foreground);
}
else
{
cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::Ptr<cv::BackgroundSubtractorGMG> gmg = cv::createBackgroundSubtractorGMG();
gmg->setMaxFeatures(maxFeatures);
gmg->apply(frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer();
if(!next())
break;
gmg->apply(frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
gmg->apply(frame, foreground);
}
CPU_SANITY_CHECK(foreground);
}
}

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS OR APPLE)
if(IOS OR APPLE)
ocv_module_disable(cudacodec)
endif()

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudafeatures2d)
endif()

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudafilters)
endif()

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudaimgproc)
endif()

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudaoptflow)
endif()

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudastereo)
endif()

View File

@ -1,4 +1,4 @@
if(ANDROID OR IOS)
if(IOS)
ocv_module_disable(cudawarping)
endif()

View File

@ -250,7 +250,7 @@ VideoCapture constructors.
.. ocv:cfunction:: CvCapture* cvCaptureFromCAM( int device )
.. ocv:cfunction:: CvCapture* cvCaptureFromFile( const char* filename )
:param filename: name of the opened video file (eg. video.avi) or image sequence (eg. img%02d.jpg)
:param filename: name of the opened video file (eg. video.avi) or image sequence (eg. img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
:param device: id of the opened video capturing device (i.e. a camera index). If there is a single camera connected, just pass 0.
@ -267,7 +267,7 @@ Open video file or a capturing device for video capturing
.. ocv:pyfunction:: cv2.VideoCapture.open(filename) -> retval
.. ocv:pyfunction:: cv2.VideoCapture.open(device) -> retval
:param filename: name of the opened video file (eg. video.avi) or image sequence (eg. img%02d.jpg)
:param filename: name of the opened video file (eg. video.avi) or image sequence (eg. img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
:param device: id of the opened video capturing device (i.e. a camera index).
@ -313,7 +313,7 @@ The methods/functions grab the next frame from video file or camera and return t
The primary use of the function is in multi-camera environments, especially when the cameras do not have hardware synchronization. That is, you call ``VideoCapture::grab()`` for each camera and after that call the slower method ``VideoCapture::retrieve()`` to decode and get frame from each camera. This way the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames from different cameras will be closer in time.
Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the correct way of retrieving data from it is to call `VideoCapture::grab` first and then call :ocv:func:`VideoCapture::retrieve` one or more times with different values of the ``channel`` parameter. See http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/kinect_maps.cpp
Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the correct way of retrieving data from it is to call `VideoCapture::grab` first and then call :ocv:func:`VideoCapture::retrieve` one or more times with different values of the ``channel`` parameter. See https://github.com/Itseez/opencv/tree/master/samples/cpp/openni_capture.cpp
VideoCapture::retrieve

View File

@ -203,7 +203,7 @@ Sets mouse handler for the specified window
:param winname: Window name
:param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param onMouse: Mouse callback. See OpenCV samples, such as https://github.com/Itseez/opencv/tree/master/samples/cpp/ffilldemo.cpp, on how to specify and use the callback.
:param userdata: The optional parameter passed to the callback.

View File

@ -236,7 +236,7 @@ Approximates a polygonal curve(s) with the specified precision.
The functions ``approxPolyDP`` approximate a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. It uses the Douglas-Peucker algorithm
http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
See http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/contours.cpp for the function usage model.
See https://github.com/Itseez/opencv/tree/master/samples/cpp/contours2.cpp for the function usage model.
ApproxChains

View File

@ -21,7 +21,7 @@ The word "cascade" in the classifier name means that the resultant classifier co
The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within the region of interest and the scale (this scale is not the same as the scale used at the detection stage, though these two scales are multiplied). For example, in the case of the third line feature (2c) the response is calculated as the difference between the sum of image pixels under the rectangle covering the whole feature (including the two white stripes and the black stripe in the middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to compensate for the differences in the size of areas. The sums of pixel values over a rectangular regions are calculated rapidly using integral images (see below and the :ocv:func:`integral` description).
To see the object detector at work, have a look at the facedetect demo:
http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/dbt_face_detection.cpp
https://github.com/Itseez/opencv/tree/master/samples/cpp/dbt_face_detection.cpp
The following reference is for the detection part only. There is a separate application called ``opencv_traincascade`` that can train a cascade of boosted classifiers from a set of samples.

View File

@ -44,6 +44,8 @@
#ifndef __OPENCV_OCL_MATRIX_OPERATIONS_HPP__
#define __OPENCV_OCL_MATRIX_OPERATIONS_HPP__
#include "opencv2/ocl.hpp"
namespace cv
{

View File

@ -189,11 +189,8 @@ static bool parseOpenCLDeviceConfiguration(const std::string& configurationStr,
return true;
}
static bool __deviceSelected = false;
static bool selectOpenCLDevice()
{
__deviceSelected = true;
std::string platform;
std::vector<std::string> deviceTypes;
std::string deviceName;
@ -528,26 +525,38 @@ private:
static ContextImpl* currentContext = NULL;
static bool __deviceSelected = false;
Context* Context::getContext()
{
if (currentContext == NULL)
{
if (!__initialized || !__deviceSelected)
static bool defaultInitiaization = false;
if (!defaultInitiaization)
{
cv::AutoLock lock(getInitializationMutex());
if (!__initialized)
try
{
if (initializeOpenCLDevices() == 0)
if (!__initialized)
{
CV_Error(Error::OpenCLInitError, "OpenCL not available");
if (initializeOpenCLDevices() == 0)
{
CV_Error(Error::OpenCLInitError, "OpenCL not available");
}
}
if (!__deviceSelected)
{
if (!selectOpenCLDevice())
{
CV_Error(Error::OpenCLInitError, "Can't select OpenCL device");
}
}
defaultInitiaization = true;
}
if (!__deviceSelected)
catch (...)
{
if (!selectOpenCLDevice())
{
CV_Error(Error::OpenCLInitError, "Can't select OpenCL device");
}
defaultInitiaization = true;
throw;
}
}
CV_Assert(currentContext != NULL);
@ -744,10 +753,16 @@ int getOpenCLDevices(std::vector<const DeviceInfo*> &devices, int deviceType, co
void setDevice(const DeviceInfo* info)
{
if (!__deviceSelected)
try
{
ContextImpl::setContext(info);
__deviceSelected = true;
ContextImpl::setContext(info);
}
catch (...)
{
__deviceSelected = true;
throw;
}
}
bool supportsFeature(FEATURE_TYPE featureType)

View File

@ -192,6 +192,7 @@ void openCLMallocPitchEx(Context *ctx, void **dev_ptr, size_t *pitch,
clFinish(getClCommandQueue(ctx));
#endif
CheckBuffers data(mainBuffer, size, widthInBytes, height);
cv::AutoLock lock(getInitializationMutex());
__check_buffers.insert(std::pair<cl_mem, CheckBuffers>((cl_mem)*dev_ptr, data));
}
#endif
@ -253,10 +254,17 @@ void openCLFree(void *devPtr)
bool failBefore = false, failAfter = false;
#endif
CheckBuffers data;
std::map<cl_mem, CheckBuffers>::iterator i = __check_buffers.find((cl_mem)devPtr);
if (i != __check_buffers.end())
{
data = i->second;
cv::AutoLock lock(getInitializationMutex());
std::map<cl_mem, CheckBuffers>::iterator i = __check_buffers.find((cl_mem)devPtr);
if (i != __check_buffers.end())
{
data = i->second;
__check_buffers.erase(i);
}
}
if (data.mainBuffer != NULL)
{
#ifdef CHECK_MEMORY_CORRUPTION
Context* ctx = Context::getContext();
std::vector<uchar> checkBefore(__memory_corruption_guard_bytes);
@ -286,7 +294,6 @@ void openCLFree(void *devPtr)
clFinish(getClCommandQueue(ctx));
#endif
openCLSafeCall(clReleaseMemObject(data.mainBuffer));
__check_buffers.erase(i);
}
#if defined(CHECK_MEMORY_CORRUPTION)
if (failBefore)

View File

@ -923,7 +923,7 @@ void OclCascadeClassifier::detectMultiScale(oclMat &gimg, CV_OUT std::vector<cv:
//use known local data stride to precalulate indexes
int DATA_SIZE_X = (localThreads[0]+cascade->orig_window_size.width);
// check that maximal value is less than maximal unsigned short
assert(DATA_SIZE_X*cascade->orig_window_size.height+cascade->orig_window_size.width < USHRT_MAX);
assert(DATA_SIZE_X*cascade->orig_window_size.height+cascade->orig_window_size.width < (int)USHRT_MAX);
for(int i = 0;i<nodenum;++i)
{//process each node from classifier
struct NodePK

View File

@ -42,6 +42,10 @@
//
//M*/
#if (__GNUC__ == 4) && (__GNUC_MINOR__ == 6)
# pragma GCC diagnostic ignored "-Warray-bounds"
#endif
#include "precomp.hpp"
#include "opencl_kernels.hpp"

Some files were not shown because too many files have changed in this diff Show More