Merge remote-tracking branch 'refs/remotes/upstream/master' into rho

This commit is contained in:
Olexa Bilaniuk 2015-02-03 22:50:45 -05:00
commit 8907e6feac
160 changed files with 13093 additions and 5170 deletions

View File

@ -131,7 +131,7 @@ OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support"
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" ON)
OCV_OPTION(WITH_VFW "Include Video for Windows support" ON IF WIN32 )
OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS))
OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (UNIX AND NOT ANDROID) )
OCV_OPTION(WITH_GSTREAMER_0_10 "Enable Gstreamer 0.10 support (instead of 1.x)" OFF )
OCV_OPTION(WITH_GTK "Include GTK support" ON IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_GTK_2_X "Use GTK version 2" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
@ -162,6 +162,7 @@ OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF
OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_CLP "Include Clp support (EPL)" OFF)
OCV_OPTION(WITH_OPENCL "Include OpenCL Runtime support" ON IF (NOT IOS) )
OCV_OPTION(WITH_OPENCL_SVM "Include OpenCL Shared Virtual Memory support" OFF ) # experimental
OCV_OPTION(WITH_OPENCLAMDFFT "Include AMD OpenCL FFT library support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" ON IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_DIRECTX "Include DirectX support" ON IF WIN32 )
@ -216,11 +217,14 @@ OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC"
OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE "Enable SSE instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE2 "Enable SSE2 instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" ON IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE3 "Enable SSE3 instructions" ON IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_POPCNT "Enable POPCNT instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_FMA3 "Enable FMA3 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) )
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR IOS) )
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )

View File

@ -529,7 +529,7 @@ endmacro()
# Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed,
# if they have then clear the cache variables, so that will be detected again.
if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")
if(DEFINED CUDA_TOOLKIT_ROOT_DIR_INTERNAL AND (NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}"))
unset(CUDA_TARGET_TRIPLET CACHE)
unset(CUDA_TOOLKIT_TARGET_DIR CACHE)
unset(CUDA_NVCC_EXECUTABLE CACHE)
@ -537,8 +537,8 @@ if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")
cuda_unset_include_and_libraries()
endif()
if(NOT "${CUDA_TARGET_TRIPLET}" STREQUAL "${CUDA_TARGET_TRIPLET_INTERNAL}" OR
NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}")
if(DEFINED CUDA_TARGET_TRIPLET_INTERNAL AND (NOT "${CUDA_TARGET_TRIPLET}" STREQUAL "${CUDA_TARGET_TRIPLET_INTERNAL}") OR
(DEFINED CUDA_TOOLKIT_TARGET_DIR AND DEFINED CUDA_TOOLKIT_TARGET_DIR_INTERNAL AND NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}"))
cuda_unset_include_and_libraries()
endif()

View File

@ -77,7 +77,7 @@ else()
endforeach(flag_var)
endif()
if(NOT ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} LESS 2.8 AND NOT ${CMAKE_MINOR_VERSION}.${CMAKE_PATCH_VERSION} LESS 8.6)
if(CMAKE_VERSION VERSION_GREATER "2.8.6")
include(ProcessorCount)
ProcessorCount(N)
if(NOT N EQUAL 0)

View File

@ -128,10 +128,10 @@ if(CMAKE_COMPILER_IS_GNUCXX)
if(ENABLE_SSE2)
add_extra_compiler_option(-msse2)
endif()
if (ENABLE_NEON)
if(ENABLE_NEON)
add_extra_compiler_option("-mfpu=neon")
endif()
if (ENABLE_VFPV3 AND NOT ENABLE_NEON)
if(ENABLE_VFPV3 AND NOT ENABLE_NEON)
add_extra_compiler_option("-mfpu=vfpv3")
endif()
@ -140,6 +140,13 @@ if(CMAKE_COMPILER_IS_GNUCXX)
if(ENABLE_AVX)
add_extra_compiler_option(-mavx)
endif()
if(ENABLE_AVX2)
add_extra_compiler_option(-mavx2)
if(ENABLE_FMA3)
add_extra_compiler_option(-mfma)
endif()
endif()
# GCC depresses SSEx instructions when -mavx is used. Instead, it generates new AVX instructions or AVX equivalence for all SSEx instructions when needed.
if(NOT OPENCV_EXTRA_CXX_FLAGS MATCHES "-mavx")
@ -158,6 +165,10 @@ if(CMAKE_COMPILER_IS_GNUCXX)
if(ENABLE_SSE42)
add_extra_compiler_option(-msse4.2)
endif()
if(ENABLE_POPCNT)
add_extra_compiler_option(-mpopcnt)
endif()
endif()
endif(NOT MINGW)
@ -214,7 +225,10 @@ if(MSVC)
set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} /Zi")
endif()
if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600)
if(ENABLE_AVX2 AND NOT MSVC_VERSION LESS 1800)
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX2")
endif()
if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600 AND NOT OPENCV_EXTRA_FLAGS MATCHES "/arch:")
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX")
endif()
@ -236,7 +250,7 @@ if(MSVC)
endif()
endif()
if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX)
if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX OR ENABLE_AVX2)
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /Oi")
endif()
@ -308,6 +322,7 @@ if(MSVC)
endforeach()
if(NOT ENABLE_NOISY_WARNINGS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251") #class 'std::XXX' needs to have dll-interface to be used by clients of YYY
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4251) # class 'std::XXX' needs to have dll-interface to be used by clients of YYY
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4324) # 'struct_name' : structure was padded due to __declspec(align())
endif()
endif()

View File

@ -26,6 +26,10 @@ if(OPENCL_FOUND)
set(HAVE_OPENCL 1)
if(WITH_OPENCL_SVM)
set(HAVE_OPENCL_SVM 1)
endif()
if(HAVE_OPENCL_STATIC)
set(OPENCL_LIBRARIES "${OPENCL_LIBRARY}")
else()

View File

@ -32,7 +32,7 @@ if(WITH_GSTREAMER AND NOT WITH_GSTREAMER_0_10)
endif(WITH_GSTREAMER AND NOT WITH_GSTREAMER_0_10)
# if gstreamer 1.x was not found, or we specified we wanted 0.10, try to find it
if(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER)
if(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10)
CHECK_MODULE(gstreamer-base-0.10 HAVE_GSTREAMER_BASE)
CHECK_MODULE(gstreamer-video-0.10 HAVE_GSTREAMER_VIDEO)
CHECK_MODULE(gstreamer-app-0.10 HAVE_GSTREAMER_APP)
@ -47,7 +47,7 @@ if(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER)
set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-0.10_VERSION})
set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-0.10_VERSION})
endif()
endif(WITH_GSTREAMER_0_10 OR NOT HAVE_GSTREAMER)
endif(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10)
# --- unicap ---
ocv_clear_vars(HAVE_UNICAP)

View File

@ -109,7 +109,7 @@ endmacro()
# Usage:
# ocv_add_module(<name> [INTERNAL|BINDINGS] [REQUIRED] [<list of dependencies>] [OPTIONAL <list of optional dependencies>])
# Example:
# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cuda)
# ocv_add_module(yaom INTERNAL opencv_core opencv_highgui opencv_flann OPTIONAL opencv_cudev)
macro(ocv_add_module _name)
ocv_debug_message("ocv_add_module(" ${_name} ${ARGN} ")")
string(TOLOWER "${_name}" name)

View File

@ -122,6 +122,7 @@
/* OpenCL Support */
#cmakedefine HAVE_OPENCL
#cmakedefine HAVE_OPENCL_STATIC
#cmakedefine HAVE_OPENCL_SVM
/* OpenEXR codec */
#cmakedefine HAVE_OPENEXR

View File

@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>14</height>
<width>28</width>
<height>28</height>
<width>14</width>
<stageParams>
<maxWeakCount>107</maxWeakCount></stageParams>
<featureParams>

View File

@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>19</height>
<width>23</width>
<height>23</height>
<width>19</width>
<stageParams>
<maxWeakCount>89</maxWeakCount></stageParams>
<featureParams>

View File

@ -47,8 +47,8 @@
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>36</height>
<width>18</width>
<height>18</height>
<width>36</width>
<stageParams>
<maxWeakCount>53</maxWeakCount></stageParams>
<featureParams>

View File

@ -139,8 +139,8 @@ Thanks to Martin Spengler, ETH Zurich, for providing the demo movie.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>22</height>
<width>18</width>
<height>18</height>
<width>22</width>
<stageParams>
<maxWeakCount>152</maxWeakCount></stageParams>
<featureParams>

View File

@ -106,11 +106,11 @@
year = {1998},
publisher = {Citeseer}
}
@ARTICLE{Breiman84,
author = {Olshen, LBJFR and Stone, Charles J},
title = {Classification and regression trees},
year = {1984},
journal = {Wadsworth International Group}
@book{Breiman84,
title={Classification and regression trees},
author={Breiman, Leo and Friedman, Jerome and Stone, Charles J and Olshen, Richard A},
year={1984},
publisher={CRC press}
}
@INCOLLECTION{Brox2004,
author = {Brox, Thomas and Bruhn, Andres and Papenberg, Nils and Weickert, Joachim},

View File

@ -0,0 +1,57 @@
Image Segmentation with Distance Transform and Watershed Algorithm {#tutorial_distance_transform}
=============
Goal
----
In this tutorial you will learn how to:
- Use the OpenCV function @ref cv::filter2D in order to perform some laplacian filtering for image sharpening
- Use the OpenCV function @ref cv::distanceTransform in order to obtain the derived representation of a binary image, where the value of each pixel is replaced by its distance to the nearest background pixel
- Use the OpenCV function @ref cv::watershed in order to isolate objects in the image from the background
Theory
------
Code
----
This tutorial code's is shown lines below. You can also download it from
[here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp).
@includelineno samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp
Explanation / Result
--------------------
-# Load the source image and check if it is loaded without any problem, then show it:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp load_image
![](images/source.jpeg)
-# Then if we have an image with white background, it is good to tranform it black. This will help us to desciminate the foreground objects easier when we will apply the Distance Transform:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp black_bg
![](images/black_bg.jpeg)
-# Afterwards we will sharp our image in order to acute the edges of the foreground objects. We will apply a laplacian filter with a quite strong filter (an approximation of second derivative):
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp sharp
![](images/laplace.jpeg)
![](images/sharp.jpeg)
-# Now we tranfrom our new sharped source image to a grayscale and a binary one, respectively:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp bin
![](images/bin.jpeg)
-# We are ready now to apply the Distance Tranform on the binary image. Moreover, we normalize the output image in order to be able visualize and threshold the result:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp dist
![](images/dist_transf.jpeg)
-# We threshold the *dist* image and then perform some morphology operation (i.e. dilation) in order to extract the peaks from the above image:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp peaks
![](images/peaks.jpeg)
-# From each blob then we create a seed/marker for the watershed algorithm with the help of the @ref cv::findContours function:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp seeds
![](images/markers.jpeg)
-# Finally, we can apply the watershed algorithm, and visualize the result:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp watershed
![](images/final.jpeg)

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

View File

@ -202,3 +202,11 @@ In this section you will learn about the image processing (manipulation) functio
*Author:* Ana Huamán
Where we learn how to calculate distances from the image to contours
- @subpage tutorial_distance_transform
*Compatibility:* \> OpenCV 2.0
*Author:* Theodore Tsesmelis
Where we learn to segment objects using Laplacian filtering, the Distance Transformation and the Watershed algorithm.

View File

@ -1652,7 +1652,7 @@ namespace fisheye
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
/** @overload */
CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
/** @brief Distorts 2D points using fisheye model.
@ -1664,7 +1664,7 @@ namespace fisheye
@param alpha The skew coefficient.
@param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
*/
CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
/** @brief Undistorts 2D points using fisheye model
@ -1677,7 +1677,7 @@ namespace fisheye
@param P New camera matrix (3x3) or new projection matrix (3x4)
@param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
*/
CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted,
CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray());
/** @brief Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero
@ -1694,7 +1694,7 @@ namespace fisheye
@param map1 The first output map.
@param map2 The second output map.
*/
CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
/** @brief Transforms an image to compensate for fisheye lens distortion.
@ -1725,7 +1725,7 @@ namespace fisheye
![image](pics/fisheye_undistorted.jpg)
*/
CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted,
CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted,
InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
/** @brief Estimates new camera matrix for undistortion or rectification.
@ -1741,7 +1741,7 @@ namespace fisheye
@param new_size
@param fov_scale Divisor for new focal length.
*/
CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
/** @brief Performs camera calibaration
@ -1775,7 +1775,7 @@ namespace fisheye
zero.
@param criteria Termination criteria for the iterative optimization algorithm.
*/
CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
@ -1809,7 +1809,7 @@ namespace fisheye
length. Balance is in range of [0, 1].
@param fov_scale Divisor for new focal length.
*/
CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
double balance = 0.0, double fov_scale = 1.0);
@ -1845,9 +1845,9 @@ namespace fisheye
zero.
@param criteria Termination criteria for the iterative optimization algorithm.
*/
CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC,
OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
//! @} calib3d_fisheye

View File

@ -794,8 +794,20 @@ double cv::fisheye::calibrate(InputArrayOfArrays objectPoints, InputArrayOfArray
if (K.needed()) cv::Mat(_K).convertTo(K, K.empty() ? CV_64FC1 : K.type());
if (D.needed()) cv::Mat(finalParam.k).convertTo(D, D.empty() ? CV_64FC1 : D.type());
if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type());
if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type());
if (rvecs.kind()==_InputArray::STD_VECTOR_MAT)
{
int i;
for( i = 0; i < (int)objectPoints.total(); i++ )
{
rvecs.getMat(i)=omc[i];
tvecs.getMat(i)=Tc[i];
}
}
else
{
if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type());
if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type());
}
return rms;
}

View File

@ -13,6 +13,7 @@
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2014, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -813,4 +814,6 @@ inline float32x2_t cv_vsqrt_f32(float32x2_t val)
} // cv
#include "sse_utils.hpp"
#endif //__OPENCV_CORE_BASE_HPP__

View File

@ -13,6 +13,7 @@
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -104,17 +105,32 @@
#endif
/* CPU features and intrinsics support */
#define CV_CPU_NONE 0
#define CV_CPU_MMX 1
#define CV_CPU_SSE 2
#define CV_CPU_SSE2 3
#define CV_CPU_SSE3 4
#define CV_CPU_SSSE3 5
#define CV_CPU_SSE4_1 6
#define CV_CPU_SSE4_2 7
#define CV_CPU_POPCNT 8
#define CV_CPU_AVX 10
#define CV_CPU_NEON 11
#define CV_CPU_NONE 0
#define CV_CPU_MMX 1
#define CV_CPU_SSE 2
#define CV_CPU_SSE2 3
#define CV_CPU_SSE3 4
#define CV_CPU_SSSE3 5
#define CV_CPU_SSE4_1 6
#define CV_CPU_SSE4_2 7
#define CV_CPU_POPCNT 8
#define CV_CPU_AVX 10
#define CV_CPU_AVX2 11
#define CV_CPU_FMA3 12
#define CV_CPU_AVX_512F 13
#define CV_CPU_AVX_512BW 14
#define CV_CPU_AVX_512CD 15
#define CV_CPU_AVX_512DQ 16
#define CV_CPU_AVX_512ER 17
#define CV_CPU_AVX_512IFMA512 18
#define CV_CPU_AVX_512PF 19
#define CV_CPU_AVX_512VBMI 20
#define CV_CPU_AVX_512VL 21
#define CV_CPU_NEON 100
// when adding to this list remember to update the enum in core/utility.cpp
#define CV_HARDWARE_MAX_FEATURE 255
@ -123,6 +139,7 @@
#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
# include <emmintrin.h>
# define CV_MMX 1
# define CV_SSE 1
# define CV_SSE2 1
# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
@ -141,7 +158,15 @@
# include <nmmintrin.h>
# define CV_SSE4_2 1
# endif
# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219)
# if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500)
# ifdef _MSC_VER
# include <nmmintrin.h>
# else
# include <popcntintrin.h>
# endif
# define CV_POPCNT 1
# endif
# if defined __AVX__ || (defined _MSC_VER && _MSC_VER >= 1600 && 0)
// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX
// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32
# include <immintrin.h>
@ -152,6 +177,13 @@
# define __xgetbv() 0
# endif
# endif
# if defined __AVX2__ || (defined _MSC_VER && _MSC_VER >= 1800 && 0)
# include <immintrin.h>
# define CV_AVX2 1
# if defined __FMA__
# define CV_FMA3 1
# endif
# endif
#endif
#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
@ -166,6 +198,12 @@
#endif // __CUDACC__
#ifndef CV_POPCNT
#define CV_POPCNT 0
#endif
#ifndef CV_MMX
# define CV_MMX 0
#endif
#ifndef CV_SSE
# define CV_SSE 0
#endif
@ -187,6 +225,40 @@
#ifndef CV_AVX
# define CV_AVX 0
#endif
#ifndef CV_AVX2
# define CV_AVX2 0
#endif
#ifndef CV_FMA3
# define CV_FMA3 0
#endif
#ifndef CV_AVX_512F
# define CV_AVX_512F 0
#endif
#ifndef CV_AVX_512BW
# define CV_AVX_512BW 0
#endif
#ifndef CV_AVX_512CD
# define CV_AVX_512CD 0
#endif
#ifndef CV_AVX_512DQ
# define CV_AVX_512DQ 0
#endif
#ifndef CV_AVX_512ER
# define CV_AVX_512ER 0
#endif
#ifndef CV_AVX_512IFMA512
# define CV_AVX_512IFMA512 0
#endif
#ifndef CV_AVX_512PF
# define CV_AVX_512PF 0
#endif
#ifndef CV_AVX_512VBMI
# define CV_AVX_512VBMI 0
#endif
#ifndef CV_AVX_512VL
# define CV_AVX_512VL 0
#endif
#ifndef CV_NEON
# define CV_NEON 0
#endif

View File

@ -376,9 +376,10 @@ enum UMatUsageFlags
{
USAGE_DEFAULT = 0,
// default allocation policy is platform and usage specific
// buffer allocation policy is platform and usage specific
USAGE_ALLOCATE_HOST_MEMORY = 1 << 0,
USAGE_ALLOCATE_DEVICE_MEMORY = 1 << 1,
USAGE_ALLOCATE_SHARED_MEMORY = 1 << 2, // It is not equal to: USAGE_ALLOCATE_HOST_MEMORY | USAGE_ALLOCATE_DEVICE_MEMORY
__UMAT_USAGE_FLAGS_32BIT = 0x7fffffff // Binary compatibility hint
};
@ -414,7 +415,7 @@ public:
const size_t dstofs[], const size_t dststep[], bool sync) const;
// default implementation returns DummyBufferPoolController
virtual BufferPoolController* getBufferPoolController() const;
virtual BufferPoolController* getBufferPoolController(const char* id = NULL) const;
};
@ -480,7 +481,7 @@ struct CV_EXPORTS UMatData
int refcount;
uchar* data;
uchar* origdata;
size_t size, capacity;
size_t size;
int flags;
void* handle;

View File

@ -56,6 +56,8 @@ CV_EXPORTS_W bool haveAmdFft();
CV_EXPORTS_W void setUseOpenCL(bool flag);
CV_EXPORTS_W void finish();
CV_EXPORTS bool haveSVM();
class CV_EXPORTS Context;
class CV_EXPORTS Device;
class CV_EXPORTS Kernel;
@ -248,7 +250,10 @@ public:
void* ptr() const;
friend void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device);
protected:
bool useSVM() const;
void setUseSVM(bool enabled);
struct Impl;
Impl* p;
};
@ -666,8 +671,17 @@ protected:
CV_EXPORTS MatAllocator* getOpenCLAllocator();
CV_EXPORTS_W bool isPerformanceCheckBypassed();
#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::isPerformanceCheckBypassed() || (condition))
#ifdef __OPENCV_BUILD
namespace internal {
CV_EXPORTS bool isPerformanceCheckBypassed();
#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::internal::isPerformanceCheckBypassed() || (condition))
CV_EXPORTS bool isCLBuffer(UMat& u);
} // namespace internal
#endif
//! @}

View File

@ -0,0 +1,81 @@
/* See LICENSE file in the root OpenCV directory */
#ifndef __OPENCV_CORE_OPENCL_SVM_HPP__
#define __OPENCV_CORE_OPENCL_SVM_HPP__
//
// Internal usage only (binary compatibility is not guaranteed)
//
#ifndef __OPENCV_BUILD
#error Internal header file
#endif
#if defined(HAVE_OPENCL) && defined(HAVE_OPENCL_SVM)
#include "runtime/opencl_core.hpp"
#include "runtime/opencl_svm_20.hpp"
#include "runtime/opencl_svm_hsa_extension.hpp"
namespace cv { namespace ocl { namespace svm {
struct SVMCapabilities
{
enum Value
{
SVM_COARSE_GRAIN_BUFFER = (1 << 0),
SVM_FINE_GRAIN_BUFFER = (1 << 1),
SVM_FINE_GRAIN_SYSTEM = (1 << 2),
SVM_ATOMICS = (1 << 3),
};
int value_;
SVMCapabilities(int capabilities = 0) : value_(capabilities) { }
operator int() const { return value_; }
inline bool isNoSVMSupport() const { return value_ == 0; }
inline bool isSupportCoarseGrainBuffer() const { return (value_ & SVM_COARSE_GRAIN_BUFFER) != 0; }
inline bool isSupportFineGrainBuffer() const { return (value_ & SVM_FINE_GRAIN_BUFFER) != 0; }
inline bool isSupportFineGrainSystem() const { return (value_ & SVM_FINE_GRAIN_SYSTEM) != 0; }
inline bool isSupportAtomics() const { return (value_ & SVM_ATOMICS) != 0; }
};
CV_EXPORTS const SVMCapabilities getSVMCapabilitites(const ocl::Context& context);
struct SVMFunctions
{
clSVMAllocAMD_fn fn_clSVMAlloc;
clSVMFreeAMD_fn fn_clSVMFree;
clSetKernelArgSVMPointerAMD_fn fn_clSetKernelArgSVMPointer;
//clSetKernelExecInfoAMD_fn fn_clSetKernelExecInfo;
//clEnqueueSVMFreeAMD_fn fn_clEnqueueSVMFree;
clEnqueueSVMMemcpyAMD_fn fn_clEnqueueSVMMemcpy;
clEnqueueSVMMemFillAMD_fn fn_clEnqueueSVMMemFill;
clEnqueueSVMMapAMD_fn fn_clEnqueueSVMMap;
clEnqueueSVMUnmapAMD_fn fn_clEnqueueSVMUnmap;
inline SVMFunctions()
: fn_clSVMAlloc(NULL), fn_clSVMFree(NULL),
fn_clSetKernelArgSVMPointer(NULL), /*fn_clSetKernelExecInfo(NULL),*/
/*fn_clEnqueueSVMFree(NULL),*/ fn_clEnqueueSVMMemcpy(NULL), fn_clEnqueueSVMMemFill(NULL),
fn_clEnqueueSVMMap(NULL), fn_clEnqueueSVMUnmap(NULL)
{
// nothing
}
inline bool isValid() const
{
return fn_clSVMAlloc != NULL && fn_clSVMFree && fn_clSetKernelArgSVMPointer &&
/*fn_clSetKernelExecInfo && fn_clEnqueueSVMFree &&*/ fn_clEnqueueSVMMemcpy &&
fn_clEnqueueSVMMemFill && fn_clEnqueueSVMMap && fn_clEnqueueSVMUnmap;
}
};
// We should guarantee that SVMFunctions lifetime is not less than context's lifetime
CV_EXPORTS const SVMFunctions* getSVMFunctions(const ocl::Context& context);
CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags);
}}} //namespace cv::ocl::svm
#endif
#endif // __OPENCV_CORE_OPENCL_SVM_HPP__
/* End of file. */

View File

@ -62,6 +62,18 @@
#endif
#endif
#ifdef HAVE_OPENCL_SVM
#define clSVMAlloc clSVMAlloc_
#define clSVMFree clSVMFree_
#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_
#define clSetKernelExecInfo clSetKernelExecInfo_
#define clEnqueueSVMFree clEnqueueSVMFree_
#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_
#define clEnqueueSVMMemFill clEnqueueSVMMemFill_
#define clEnqueueSVMMap clEnqueueSVMMap_
#define clEnqueueSVMUnmap clEnqueueSVMUnmap_
#endif
#include "autogenerated/opencl_core.hpp"
#endif // HAVE_OPENCL_STATIC

View File

@ -0,0 +1,52 @@
/* See LICENSE file in the root OpenCV directory */
#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__
#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__
#if defined(HAVE_OPENCL_SVM)
#include "opencl_core.hpp"
#include "opencl_svm_definitions.hpp"
#ifndef HAVE_OPENCL_STATIC
#undef clSVMAlloc
#define clSVMAlloc clSVMAlloc_pfn
#undef clSVMFree
#define clSVMFree clSVMFree_pfn
#undef clSetKernelArgSVMPointer
#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_pfn
#undef clSetKernelExecInfo
//#define clSetKernelExecInfo clSetKernelExecInfo_pfn
#undef clEnqueueSVMFree
//#define clEnqueueSVMFree clEnqueueSVMFree_pfn
#undef clEnqueueSVMMemcpy
#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_pfn
#undef clEnqueueSVMMemFill
#define clEnqueueSVMMemFill clEnqueueSVMMemFill_pfn
#undef clEnqueueSVMMap
#define clEnqueueSVMMap clEnqueueSVMMap_pfn
#undef clEnqueueSVMUnmap
#define clEnqueueSVMUnmap clEnqueueSVMUnmap_pfn
extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, unsigned int alignment);
extern CL_RUNTIME_EXPORT void (CL_API_CALL *clSVMFree)(cl_context context, void* svm_pointer);
extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value);
//extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value);
//extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMFree)(cl_command_queue command_queue, cl_uint num_svm_pointers, void* svm_pointers[],
// void (CL_CALLBACK *pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void* svm_pointers[], void* user_data), void* user_data,
// cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event);
extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size,
cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event);
extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size,
cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event);
extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void* svm_ptr, size_t size,
cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event);
extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr,
cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event);
#endif // HAVE_OPENCL_STATIC
#endif // HAVE_OPENCL_SVM
#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP__

View File

@ -0,0 +1,42 @@
/* See LICENSE file in the root OpenCV directory */
#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__
#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__
#if defined(HAVE_OPENCL_SVM)
#if defined(CL_VERSION_2_0)
// OpenCL 2.0 contains SVM definitions
#else
typedef cl_bitfield cl_device_svm_capabilities;
typedef cl_bitfield cl_svm_mem_flags;
typedef cl_uint cl_kernel_exec_info;
//
// TODO Add real values after OpenCL 2.0 release
//
#ifndef CL_DEVICE_SVM_CAPABILITIES
#define CL_DEVICE_SVM_CAPABILITIES 0x1053
#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER (1 << 0)
#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER (1 << 1)
#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM (1 << 2)
#define CL_DEVICE_SVM_ATOMICS (1 << 3)
#endif
#ifndef CL_MEM_SVM_FINE_GRAIN_BUFFER
#define CL_MEM_SVM_FINE_GRAIN_BUFFER (1 << 10)
#endif
#ifndef CL_MEM_SVM_ATOMICS
#define CL_MEM_SVM_ATOMICS (1 << 11)
#endif
#endif // CL_VERSION_2_0
#endif // HAVE_OPENCL_SVM
#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP__

View File

@ -0,0 +1,166 @@
/* See LICENSE file in the root OpenCV directory */
#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__
#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__
#if defined(HAVE_OPENCL_SVM)
#include "opencl_core.hpp"
#ifndef CL_DEVICE_SVM_CAPABILITIES_AMD
//
// Part of the file is an extract from the cl_ext.h file from AMD APP SDK package.
// Below is the original copyright.
//
/*******************************************************************************
* Copyright (c) 2008-2013 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
******************************************************************************/
/*******************************************
* Shared Virtual Memory (SVM) extension
*******************************************/
typedef cl_bitfield cl_device_svm_capabilities_amd;
typedef cl_bitfield cl_svm_mem_flags_amd;
typedef cl_uint cl_kernel_exec_info_amd;
/* cl_device_info */
#define CL_DEVICE_SVM_CAPABILITIES_AMD 0x1053
#define CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT_AMD 0x1054
/* cl_device_svm_capabilities_amd */
#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_AMD (1 << 0)
#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER_AMD (1 << 1)
#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM_AMD (1 << 2)
#define CL_DEVICE_SVM_ATOMICS_AMD (1 << 3)
/* cl_svm_mem_flags_amd */
#define CL_MEM_SVM_FINE_GRAIN_BUFFER_AMD (1 << 10)
#define CL_MEM_SVM_ATOMICS_AMD (1 << 11)
/* cl_mem_info */
#define CL_MEM_USES_SVM_POINTER_AMD 0x1109
/* cl_kernel_exec_info_amd */
#define CL_KERNEL_EXEC_INFO_SVM_PTRS_AMD 0x11B6
#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM_AMD 0x11B7
/* cl_command_type */
#define CL_COMMAND_SVM_FREE_AMD 0x1209
#define CL_COMMAND_SVM_MEMCPY_AMD 0x120A
#define CL_COMMAND_SVM_MEMFILL_AMD 0x120B
#define CL_COMMAND_SVM_MAP_AMD 0x120C
#define CL_COMMAND_SVM_UNMAP_AMD 0x120D
typedef CL_API_ENTRY void*
(CL_API_CALL * clSVMAllocAMD_fn)(
cl_context /* context */,
cl_svm_mem_flags_amd /* flags */,
size_t /* size */,
unsigned int /* alignment */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY void
(CL_API_CALL * clSVMFreeAMD_fn)(
cl_context /* context */,
void* /* svm_pointer */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int
(CL_API_CALL * clEnqueueSVMFreeAMD_fn)(
cl_command_queue /* command_queue */,
cl_uint /* num_svm_pointers */,
void** /* svm_pointers */,
void (CL_CALLBACK *)( /*pfn_free_func*/
cl_command_queue /* queue */,
cl_uint /* num_svm_pointers */,
void** /* svm_pointers */,
void* /* user_data */),
void* /* user_data */,
cl_uint /* num_events_in_wait_list */,
const cl_event* /* event_wait_list */,
cl_event* /* event */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int
(CL_API_CALL * clEnqueueSVMMemcpyAMD_fn)(
cl_command_queue /* command_queue */,
cl_bool /* blocking_copy */,
void* /* dst_ptr */,
const void* /* src_ptr */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event* /* event_wait_list */,
cl_event* /* event */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int
(CL_API_CALL * clEnqueueSVMMemFillAMD_fn)(
cl_command_queue /* command_queue */,
void* /* svm_ptr */,
const void* /* pattern */,
size_t /* pattern_size */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event* /* event_wait_list */,
cl_event* /* event */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int
(CL_API_CALL * clEnqueueSVMMapAMD_fn)(
cl_command_queue /* command_queue */,
cl_bool /* blocking_map */,
cl_map_flags /* map_flags */,
void* /* svm_ptr */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event* /* event_wait_list */,
cl_event* /* event */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int
(CL_API_CALL * clEnqueueSVMUnmapAMD_fn)(
cl_command_queue /* command_queue */,
void* /* svm_ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event* /* event_wait_list */,
cl_event* /* event */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int
(CL_API_CALL * clSetKernelArgSVMPointerAMD_fn)(
cl_kernel /* kernel */,
cl_uint /* arg_index */,
const void * /* arg_value */
) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int
(CL_API_CALL * clSetKernelExecInfoAMD_fn)(
cl_kernel /* kernel */,
cl_kernel_exec_info_amd /* param_name */,
size_t /* param_value_size */,
const void * /* param_value */
) CL_EXT_SUFFIX__VERSION_1_2;
#endif
#endif // HAVE_OPENCL_SVM
#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP__

View File

@ -0,0 +1,645 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CORE_SSE_UTILS_HPP__
#define __OPENCV_CORE_SSE_UTILS_HPP__
#ifndef __cplusplus
# error sse_utils.hpp header must be compiled as C++
#endif
#if CV_SSE2
inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)
{
__m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g0);
__m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g0);
__m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_g1);
__m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_g1);
__m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk2);
__m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk2);
__m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk3);
__m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk3);
__m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk2);
__m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk2);
__m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk3);
__m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk3);
__m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk2);
__m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk2);
__m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk3);
__m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk3);
v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk2);
v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk2);
v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk3);
v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk3);
}
inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,
__m128i & v_g1, __m128i & v_b0, __m128i & v_b1)
{
__m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g1);
__m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g1);
__m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b0);
__m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b0);
__m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_b1);
__m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_b1);
__m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk3);
__m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk3);
__m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk4);
__m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk4);
__m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk5);
__m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk5);
__m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk3);
__m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk3);
__m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk4);
__m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk4);
__m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk5);
__m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk5);
__m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk3);
__m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk3);
__m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk4);
__m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk4);
__m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk5);
__m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk5);
v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk3);
v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk3);
v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk4);
v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk4);
v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk5);
v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5);
}
inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)
{
__m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0);
__m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0);
__m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b1);
__m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b1);
__m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_a0);
__m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_a0);
__m128i layer1_chunk6 = _mm_unpacklo_epi8(v_g1, v_a1);
__m128i layer1_chunk7 = _mm_unpackhi_epi8(v_g1, v_a1);
__m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk4);
__m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk4);
__m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk5);
__m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk5);
__m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk6);
__m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk6);
__m128i layer2_chunk6 = _mm_unpacklo_epi8(layer1_chunk3, layer1_chunk7);
__m128i layer2_chunk7 = _mm_unpackhi_epi8(layer1_chunk3, layer1_chunk7);
__m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk4);
__m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk4);
__m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk5);
__m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk5);
__m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk6);
__m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk6);
__m128i layer3_chunk6 = _mm_unpacklo_epi8(layer2_chunk3, layer2_chunk7);
__m128i layer3_chunk7 = _mm_unpackhi_epi8(layer2_chunk3, layer2_chunk7);
__m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk4);
__m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk4);
__m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk5);
__m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk5);
__m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk6);
__m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk6);
__m128i layer4_chunk6 = _mm_unpacklo_epi8(layer3_chunk3, layer3_chunk7);
__m128i layer4_chunk7 = _mm_unpackhi_epi8(layer3_chunk3, layer3_chunk7);
v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk4);
v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk4);
v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk5);
v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk5);
v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk6);
v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk6);
v_a0 = _mm_unpacklo_epi8(layer4_chunk3, layer4_chunk7);
v_a1 = _mm_unpackhi_epi8(layer4_chunk3, layer4_chunk7);
}
inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)
{
__m128i v_mask = _mm_set1_epi16(0x00ff);
__m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));
__m128i layer4_chunk2 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8));
__m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));
__m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8));
__m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask));
__m128i layer3_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8));
__m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask));
__m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8));
__m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));
__m128i layer2_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8));
__m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));
__m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8));
__m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));
__m128i layer1_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8));
__m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));
__m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8));
v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));
v_g0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8));
v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));
v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8));
}
inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,
__m128i & v_g1, __m128i & v_b0, __m128i & v_b1)
{
__m128i v_mask = _mm_set1_epi16(0x00ff);
__m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));
__m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8));
__m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));
__m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8));
__m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));
__m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8));
__m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask));
__m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8));
__m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask));
__m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8));
__m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask));
__m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8));
__m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));
__m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8));
__m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));
__m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8));
__m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));
__m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8));
__m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));
__m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8));
__m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));
__m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8));
__m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));
__m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8));
v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));
v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8));
v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));
v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8));
v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));
v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8));
}
inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)
{
__m128i v_mask = _mm_set1_epi16(0x00ff);
__m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));
__m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8));
__m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));
__m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8));
__m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));
__m128i layer4_chunk6 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8));
__m128i layer4_chunk3 = _mm_packus_epi16(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask));
__m128i layer4_chunk7 = _mm_packus_epi16(_mm_srli_epi16(v_a0, 8), _mm_srli_epi16(v_a1, 8));
__m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask));
__m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8));
__m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask));
__m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8));
__m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask));
__m128i layer3_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8));
__m128i layer3_chunk3 = _mm_packus_epi16(_mm_and_si128(layer4_chunk6, v_mask), _mm_and_si128(layer4_chunk7, v_mask));
__m128i layer3_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk6, 8), _mm_srli_epi16(layer4_chunk7, 8));
__m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));
__m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8));
__m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));
__m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8));
__m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));
__m128i layer2_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8));
__m128i layer2_chunk3 = _mm_packus_epi16(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask));
__m128i layer2_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk6, 8), _mm_srli_epi16(layer3_chunk7, 8));
__m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));
__m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8));
__m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));
__m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8));
__m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));
__m128i layer1_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8));
__m128i layer1_chunk3 = _mm_packus_epi16(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask));
__m128i layer1_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk6, 8), _mm_srli_epi16(layer2_chunk7, 8));
v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));
v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8));
v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));
v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8));
v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));
v_a0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8));
v_g1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask));
v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8));
}
inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)
{
__m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g0);
__m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g0);
__m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_g1);
__m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_g1);
__m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk2);
__m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk2);
__m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk3);
__m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk3);
__m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk2);
__m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk2);
__m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk3);
__m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk3);
v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk2);
v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk2);
v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk3);
v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk3);
}
inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,
__m128i & v_g1, __m128i & v_b0, __m128i & v_b1)
{
__m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1);
__m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1);
__m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b0);
__m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b0);
__m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_b1);
__m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_b1);
__m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk3);
__m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk3);
__m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk4);
__m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk4);
__m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk5);
__m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk5);
__m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk3);
__m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk3);
__m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk4);
__m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk4);
__m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk5);
__m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk5);
v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk3);
v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk3);
v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk4);
v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk4);
v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk5);
v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5);
}
inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)
{
__m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0);
__m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0);
__m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b1);
__m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b1);
__m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_a0);
__m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_a0);
__m128i layer1_chunk6 = _mm_unpacklo_epi16(v_g1, v_a1);
__m128i layer1_chunk7 = _mm_unpackhi_epi16(v_g1, v_a1);
__m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk4);
__m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk4);
__m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk5);
__m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk5);
__m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk6);
__m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk6);
__m128i layer2_chunk6 = _mm_unpacklo_epi16(layer1_chunk3, layer1_chunk7);
__m128i layer2_chunk7 = _mm_unpackhi_epi16(layer1_chunk3, layer1_chunk7);
__m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk4);
__m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk4);
__m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk5);
__m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk5);
__m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk6);
__m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk6);
__m128i layer3_chunk6 = _mm_unpacklo_epi16(layer2_chunk3, layer2_chunk7);
__m128i layer3_chunk7 = _mm_unpackhi_epi16(layer2_chunk3, layer2_chunk7);
v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk4);
v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk4);
v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk5);
v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk5);
v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk6);
v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk6);
v_a0 = _mm_unpacklo_epi16(layer3_chunk3, layer3_chunk7);
v_a1 = _mm_unpackhi_epi16(layer3_chunk3, layer3_chunk7);
}
#if CV_SSE4_1
inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1)
{
__m128i v_mask = _mm_set1_epi32(0x0000ffff);
__m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));
__m128i layer3_chunk2 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16));
__m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));
__m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16));
__m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));
__m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));
__m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));
__m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));
__m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));
__m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));
v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));
v_g0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16));
v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));
v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16));
}
inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0,
__m128i & v_g1, __m128i & v_b0, __m128i & v_b1)
{
__m128i v_mask = _mm_set1_epi32(0x0000ffff);
__m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));
__m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16));
__m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));
__m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16));
__m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));
__m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16));
__m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));
__m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));
__m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));
__m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));
__m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16));
__m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));
__m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));
__m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));
__m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));
__m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16));
v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));
v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16));
v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));
v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16));
v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));
v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16));
}
inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1,
__m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1)
{
__m128i v_mask = _mm_set1_epi32(0x0000ffff);
__m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask));
__m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16));
__m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask));
__m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16));
__m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask));
__m128i layer3_chunk6 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16));
__m128i layer3_chunk3 = _mm_packus_epi32(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask));
__m128i layer3_chunk7 = _mm_packus_epi32(_mm_srli_epi32(v_a0, 16), _mm_srli_epi32(v_a1, 16));
__m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask));
__m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16));
__m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask));
__m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16));
__m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask));
__m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16));
__m128i layer2_chunk3 = _mm_packus_epi32(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask));
__m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16));
__m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask));
__m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16));
__m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask));
__m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16));
__m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask));
__m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16));
__m128i layer1_chunk3 = _mm_packus_epi32(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask));
__m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16));
v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask));
v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16));
v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask));
v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16));
v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask));
v_a0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16));
v_g1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask));
v_a1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk6, 16), _mm_srli_epi32(layer1_chunk7, 16));
}
#endif // CV_SSE4_1
inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1)
{
__m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g0);
__m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g0);
__m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_g1);
__m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_g1);
__m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk2);
__m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk2);
__m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk3);
__m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk3);
v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk2);
v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk2);
v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk3);
v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk3);
}
inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0,
__m128 & v_g1, __m128 & v_b0, __m128 & v_b1)
{
__m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1);
__m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1);
__m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b0);
__m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b0);
__m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_b1);
__m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_b1);
__m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3);
__m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3);
__m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4);
__m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk4);
__m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk5);
__m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk5);
v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk3);
v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk3);
v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk4);
v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk4);
v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk5);
v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5);
}
inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1,
__m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1)
{
__m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0);
__m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0);
__m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b1);
__m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b1);
__m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_a0);
__m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_a0);
__m128 layer1_chunk6 = _mm_unpacklo_ps(v_g1, v_a1);
__m128 layer1_chunk7 = _mm_unpackhi_ps(v_g1, v_a1);
__m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk4);
__m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk4);
__m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk5);
__m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk5);
__m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk6);
__m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk6);
__m128 layer2_chunk6 = _mm_unpacklo_ps(layer1_chunk3, layer1_chunk7);
__m128 layer2_chunk7 = _mm_unpackhi_ps(layer1_chunk3, layer1_chunk7);
v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk4);
v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk4);
v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk5);
v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk5);
v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk6);
v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk6);
v_a0 = _mm_unpacklo_ps(layer2_chunk3, layer2_chunk7);
v_a1 = _mm_unpackhi_ps(layer2_chunk3, layer2_chunk7);
}
inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1)
{
const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1);
__m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo);
__m128 layer2_chunk2 = _mm_shuffle_ps(v_r0, v_r1, mask_hi);
__m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo);
__m128 layer2_chunk3 = _mm_shuffle_ps(v_g0, v_g1, mask_hi);
__m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo);
__m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi);
__m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo);
__m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi);
v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo);
v_g0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi);
v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo);
v_g1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi);
}
inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0,
__m128 & v_g1, __m128 & v_b0, __m128 & v_b1)
{
const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1);
__m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo);
__m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi);
__m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo);
__m128 layer2_chunk4 = _mm_shuffle_ps(v_g0, v_g1, mask_hi);
__m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo);
__m128 layer2_chunk5 = _mm_shuffle_ps(v_b0, v_b1, mask_hi);
__m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo);
__m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi);
__m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo);
__m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi);
__m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo);
__m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi);
v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo);
v_g1 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi);
v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo);
v_b0 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi);
v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo);
v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi);
}
inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1,
__m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1)
{
const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1);
__m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo);
__m128 layer2_chunk4 = _mm_shuffle_ps(v_r0, v_r1, mask_hi);
__m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo);
__m128 layer2_chunk5 = _mm_shuffle_ps(v_g0, v_g1, mask_hi);
__m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo);
__m128 layer2_chunk6 = _mm_shuffle_ps(v_b0, v_b1, mask_hi);
__m128 layer2_chunk3 = _mm_shuffle_ps(v_a0, v_a1, mask_lo);
__m128 layer2_chunk7 = _mm_shuffle_ps(v_a0, v_a1, mask_hi);
__m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo);
__m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi);
__m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo);
__m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi);
__m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo);
__m128 layer1_chunk6 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi);
__m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_lo);
__m128 layer1_chunk7 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_hi);
v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo);
v_b0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi);
v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo);
v_b1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi);
v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo);
v_a0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi);
v_g1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_lo);
v_a1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_hi);
}
#endif // CV_SSE2
#endif //__OPENCV_CORE_SSE_UTILS_HPP__

View File

@ -13,6 +13,7 @@
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -281,16 +282,30 @@ CV_EXPORTS_W int64 getCPUTickCount();
remember to keep this list identical to the one in cvdef.h
*/
enum CpuFeatures {
CPU_MMX = 1,
CPU_SSE = 2,
CPU_SSE2 = 3,
CPU_SSE3 = 4,
CPU_SSSE3 = 5,
CPU_SSE4_1 = 6,
CPU_SSE4_2 = 7,
CPU_POPCNT = 8,
CPU_AVX = 10,
CPU_NEON = 11
CPU_MMX = 1,
CPU_SSE = 2,
CPU_SSE2 = 3,
CPU_SSE3 = 4,
CPU_SSSE3 = 5,
CPU_SSE4_1 = 6,
CPU_SSE4_2 = 7,
CPU_POPCNT = 8,
CPU_AVX = 10,
CPU_AVX2 = 11,
CPU_FMA3 = 12,
CPU_AVX_512F = 13,
CPU_AVX_512BW = 14,
CPU_AVX_512CD = 15,
CPU_AVX_512DQ = 16,
CPU_AVX_512ER = 17,
CPU_AVX_512IFMA512 = 18,
CPU_AVX_512PF = 19,
CPU_AVX_512VBMI = 20,
CPU_AVX_512VL = 21,
CPU_NEON = 100
};
/** @brief Returns true if the specified feature is supported by the host hardware.

View File

@ -242,3 +242,31 @@ PERF_TEST_P(Size_MatType, multiplyScale, TYPICAL_MATS_CORE_ARITHM)
SANITY_CHECK(c, 1e-8);
}
PERF_TEST_P(Size_MatType, divide, TYPICAL_MATS_CORE_ARITHM)
{
Size sz = get<0>(GetParam());
int type = get<1>(GetParam());
cv::Mat a(sz, type), b(sz, type), c(sz, type);
double scale = 0.5;
declare.in(a, b, WARMUP_RNG).out(c);
TEST_CYCLE() divide(a, b, c, scale);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(Size_MatType, reciprocal, TYPICAL_MATS_CORE_ARITHM)
{
Size sz = get<0>(GetParam());
int type = get<1>(GetParam());
cv::Mat b(sz, type), c(sz, type);
double scale = 0.5;
declare.in(b, WARMUP_RNG).out(c);
TEST_CYCLE() divide(scale, b, c);
SANITY_CHECK_NOTHING();
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,

View File

@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -593,14 +594,46 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre
{
const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1];
double *angle = (double*)ptrs[2];
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)),
_mm_cvtpd_ps(_mm_loadu_pd(x + k + 2)));
__m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)),
_mm_cvtpd_ps(_mm_loadu_pd(y + k + 2)));
_mm_storeu_ps(buf[0] + k, v_dst0);
_mm_storeu_ps(buf[1] + k, v_dst1);
}
}
#endif
for( ; k < len; k++ )
{
buf[0][k] = (float)x[k];
buf[1][k] = (float)y[k];
}
FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_src = _mm_loadu_ps(buf[0] + k);
_mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src));
_mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8))));
}
}
#endif
for( ; k < len; k++ )
angle[k] = buf[0][k];
}
ptrs[0] += len*esz1;
@ -698,14 +731,46 @@ void cartToPolar( InputArray src1, InputArray src2,
double *angle = (double*)ptrs[3];
Magnitude_64f(x, y, (double*)ptrs[2], len);
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_dst0 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(x + k)),
_mm_cvtpd_ps(_mm_loadu_pd(x + k + 2)));
__m128 v_dst1 = _mm_movelh_ps(_mm_cvtpd_ps(_mm_loadu_pd(y + k)),
_mm_cvtpd_ps(_mm_loadu_pd(y + k + 2)));
_mm_storeu_ps(buf[0] + k, v_dst0);
_mm_storeu_ps(buf[1] + k, v_dst1);
}
}
#endif
for( ; k < len; k++ )
{
buf[0][k] = (float)x[k];
buf[1][k] = (float)y[k];
}
FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
for( k = 0; k < len; k++ )
k = 0;
#if CV_SSE2
if (USE_SSE2)
{
for ( ; k <= len - 4; k += 4)
{
__m128 v_src = _mm_loadu_ps(buf[0] + k);
_mm_storeu_pd(angle + k, _mm_cvtps_pd(v_src));
_mm_storeu_pd(angle + k + 2, _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8))));
}
}
#endif
for( ; k < len; k++ )
angle[k] = buf[0][k];
}
ptrs[0] += len*esz1;
@ -771,14 +836,77 @@ static void SinCos_32f( const float *angle, float *sinval, float* cosval,
/*static const double cos_a2 = 1;*/
double k1;
int i;
int i = 0;
if( !angle_in_degrees )
k1 = N/(2*CV_PI);
else
k1 = N/360.;
for( i = 0; i < len; i++ )
#if CV_AVX2
if (USE_AVX2)
{
__m128d v_k1 = _mm_set1_pd(k1);
__m128d v_1 = _mm_set1_pd(1);
__m128i v_N1 = _mm_set1_epi32(N - 1);
__m128i v_N4 = _mm_set1_epi32(N >> 2);
__m128d v_sin_a0 = _mm_set1_pd(sin_a0);
__m128d v_sin_a2 = _mm_set1_pd(sin_a2);
__m128d v_cos_a0 = _mm_set1_pd(cos_a0);
for ( ; i <= len - 4; i += 4)
{
__m128 v_angle = _mm_loadu_ps(angle + i);
// 0-1
__m128d v_t = _mm_mul_pd(_mm_cvtps_pd(v_angle), v_k1);
__m128i v_it = _mm_cvtpd_epi32(v_t);
v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it));
__m128i v_sin_idx = _mm_and_si128(v_it, v_N1);
__m128i v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1);
__m128d v_t2 = _mm_mul_pd(v_t, v_t);
__m128d v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t);
__m128d v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1);
__m128d v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8);
__m128d v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8);
__m128d v_sin_val_0 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b),
_mm_mul_pd(v_cos_a, v_sin_b));
__m128d v_cos_val_0 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b),
_mm_mul_pd(v_sin_a, v_sin_b));
// 2-3
v_t = _mm_mul_pd(_mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_angle), 8))), v_k1);
v_it = _mm_cvtpd_epi32(v_t);
v_t = _mm_sub_pd(v_t, _mm_cvtepi32_pd(v_it));
v_sin_idx = _mm_and_si128(v_it, v_N1);
v_cos_idx = _mm_and_si128(_mm_sub_epi32(v_N4, v_sin_idx), v_N1);
v_t2 = _mm_mul_pd(v_t, v_t);
v_sin_b = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(v_sin_a0, v_t2), v_sin_a2), v_t);
v_cos_b = _mm_add_pd(_mm_mul_pd(v_cos_a0, v_t2), v_1);
v_sin_a = _mm_i32gather_pd(sin_table, v_sin_idx, 8);
v_cos_a = _mm_i32gather_pd(sin_table, v_cos_idx, 8);
__m128d v_sin_val_1 = _mm_add_pd(_mm_mul_pd(v_sin_a, v_cos_b),
_mm_mul_pd(v_cos_a, v_sin_b));
__m128d v_cos_val_1 = _mm_sub_pd(_mm_mul_pd(v_cos_a, v_cos_b),
_mm_mul_pd(v_sin_a, v_sin_b));
_mm_storeu_ps(sinval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_sin_val_0),
_mm_cvtpd_ps(v_sin_val_1)));
_mm_storeu_ps(cosval + i, _mm_movelh_ps(_mm_cvtpd_ps(v_cos_val_0),
_mm_cvtpd_ps(v_cos_val_1)));
}
}
#endif
for( ; i < len; i++ )
{
double t = angle[i]*k1;
int it = cvRound(t);
@ -914,6 +1042,16 @@ void polarToCart( InputArray src1, InputArray src2,
vst1q_f32(x + k, vmulq_f32(vld1q_f32(x + k), v_m));
vst1q_f32(y + k, vmulq_f32(vld1q_f32(y + k), v_m));
}
#elif CV_SSE2
if (USE_SSE2)
{
for( ; k <= len - 4; k += 4 )
{
__m128 v_m = _mm_loadu_ps(mag + k);
_mm_storeu_ps(x + k, _mm_mul_ps(_mm_loadu_ps(x + k), v_m));
_mm_storeu_ps(y + k, _mm_mul_ps(_mm_loadu_ps(y + k), v_m));
}
}
#endif
for( ; k < len; k++ )
@ -939,10 +1077,10 @@ void polarToCart( InputArray src1, InputArray src2,
x[k] = buf[0][k]*m; y[k] = buf[1][k]*m;
}
else
for( k = 0; k < len; k++ )
{
x[k] = buf[0][k]; y[k] = buf[1][k];
}
{
std::memcpy(x, buf[0], sizeof(float) * len);
std::memcpy(y, buf[1], sizeof(float) * len);
}
}
if( ptrs[0] )

View File

@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -720,6 +721,16 @@ static bool ocl_gemm_amdblas( InputArray matA, InputArray matB, double alpha,
return false;
UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat();
if (!ocl::internal::isCLBuffer(A) || !ocl::internal::isCLBuffer(B) || !ocl::internal::isCLBuffer(D))
{
return false;
}
if (haveC)
{
UMat C = matC.getUMat();
if (!ocl::internal::isCLBuffer(C))
return false;
}
if (haveC)
ctrans ? transpose(matC, D) : matC.copyTo(D);
else

View File

@ -159,8 +159,9 @@ void MatAllocator::copy(UMatData* usrc, UMatData* udst, int dims, const size_t s
memcpy(ptrs[1], ptrs[0], planesz);
}
BufferPoolController* MatAllocator::getBufferPoolController() const
BufferPoolController* MatAllocator::getBufferPoolController(const char* id) const
{
(void)id;
static DummyBufferPoolController dummy;
return &dummy;
}
@ -2644,6 +2645,10 @@ void _OutputArray::assign(const UMat& u) const
{
u.copyTo(*(Mat*)obj); // TODO check u.getMat()
}
else if (k == MATX)
{
u.copyTo(getMat()); // TODO check u.getMat()
}
else
{
CV_Error(Error::StsNotImplemented, "");
@ -2662,6 +2667,10 @@ void _OutputArray::assign(const Mat& m) const
{
*(Mat*)obj = m;
}
else if (k == MATX)
{
m.copyTo(getMat());
}
else
{
CV_Error(Error::StsNotImplemented, "");

File diff suppressed because it is too large Load Diff

View File

@ -70,7 +70,8 @@ static void* AppleCLGetProcAddress(const char* name)
handle = dlopen(oclpath, RTLD_LAZY | RTLD_GLOBAL);
if (handle == NULL)
{
fprintf(stderr, ERROR_MSG_CANT_LOAD);
if (envPath)
fprintf(stderr, ERROR_MSG_CANT_LOAD);
}
else if (dlsym(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL)
{
@ -108,7 +109,8 @@ static void* WinGetProcAddress(const char* name)
handle = LoadLibraryA(path);
if (!handle)
{
fprintf(stderr, ERROR_MSG_CANT_LOAD);
if (envPath)
fprintf(stderr, ERROR_MSG_CANT_LOAD);
}
else if (GetProcAddress(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL)
{
@ -145,7 +147,8 @@ static void* GetProcAddress(const char* name)
handle = dlopen(path, RTLD_LAZY | RTLD_GLOBAL);
if (handle == NULL)
{
fprintf(stderr, ERROR_MSG_CANT_LOAD);
if (envPath)
fprintf(stderr, ERROR_MSG_CANT_LOAD);
}
else if (dlsym(handle, OPENCL_FUNC_TO_CHECK_1_1) == NULL)
{
@ -182,6 +185,65 @@ static void* opencl_check_fn(int ID);
#define CUSTOM_FUNCTION_ID 1000
#ifdef HAVE_OPENCL_SVM
#include "opencv2/core/opencl/runtime/opencl_svm_20.hpp"
#define SVM_FUNCTION_ID_START CUSTOM_FUNCTION_ID
#define SVM_FUNCTION_ID_END CUSTOM_FUNCTION_ID + 100
enum OPENCL_FN_SVM_ID
{
OPENCL_FN_clSVMAlloc = SVM_FUNCTION_ID_START,
OPENCL_FN_clSVMFree,
OPENCL_FN_clSetKernelArgSVMPointer,
OPENCL_FN_clSetKernelExecInfo,
OPENCL_FN_clEnqueueSVMFree,
OPENCL_FN_clEnqueueSVMMemcpy,
OPENCL_FN_clEnqueueSVMMemFill,
OPENCL_FN_clEnqueueSVMMap,
OPENCL_FN_clEnqueueSVMUnmap,
};
void* (CL_API_CALL *clSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, unsigned int alignment) =
opencl_fn4<OPENCL_FN_clSVMAlloc, void*, cl_context, cl_svm_mem_flags, size_t, unsigned int>::switch_fn;
static const struct DynamicFnEntry _clSVMAlloc_definition = { "clSVMAlloc", (void**)&clSVMAlloc};
void (CL_API_CALL *clSVMFree)(cl_context context, void* svm_pointer) =
opencl_fn2<OPENCL_FN_clSVMFree, void, cl_context, void*>::switch_fn;
static const struct DynamicFnEntry _clSVMFree_definition = { "clSVMFree", (void**)&clSVMFree};
cl_int (CL_API_CALL *clSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value) =
opencl_fn3<OPENCL_FN_clSetKernelArgSVMPointer, cl_int, cl_kernel, cl_uint, const void*>::switch_fn;
static const struct DynamicFnEntry _clSetKernelArgSVMPointer_definition = { "clSetKernelArgSVMPointer", (void**)&clSetKernelArgSVMPointer};
//void* (CL_API_CALL *clSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value) =
// opencl_fn4<OPENCL_FN_clSetKernelExecInfo, void*, cl_kernel, cl_kernel_exec_info, size_t, const void*>::switch_fn;
//static const struct DynamicFnEntry _clSetKernelExecInfo_definition = { "clSetKernelExecInfo", (void**)&clSetKernelExecInfo};
//cl_int (CL_API_CALL *clEnqueueSVMFree)(...) =
// opencl_fn8<OPENCL_FN_clEnqueueSVMFree, cl_int, ...>::switch_fn;
//static const struct DynamicFnEntry _clEnqueueSVMFree_definition = { "clEnqueueSVMFree", (void**)&clEnqueueSVMFree};
cl_int (CL_API_CALL *clEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) =
opencl_fn8<OPENCL_FN_clEnqueueSVMMemcpy, cl_int, cl_command_queue, cl_bool, void*, const void*, size_t, cl_uint, const cl_event*, cl_event*>::switch_fn;
static const struct DynamicFnEntry _clEnqueueSVMMemcpy_definition = { "clEnqueueSVMMemcpy", (void**)&clEnqueueSVMMemcpy};
cl_int (CL_API_CALL *clEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) =
opencl_fn8<OPENCL_FN_clEnqueueSVMMemFill, cl_int, cl_command_queue, void*, const void*, size_t, size_t, cl_uint, const cl_event*, cl_event*>::switch_fn;
static const struct DynamicFnEntry _clEnqueueSVMMemFill_definition = { "clEnqueueSVMMemFill", (void**)&clEnqueueSVMMemFill};
cl_int (CL_API_CALL *clEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void* svm_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) =
opencl_fn8<OPENCL_FN_clEnqueueSVMMap, cl_int, cl_command_queue, cl_bool, cl_map_flags, void*, size_t, cl_uint, const cl_event*, cl_event*>::switch_fn;
static const struct DynamicFnEntry _clEnqueueSVMMap_definition = { "clEnqueueSVMMap", (void**)&clEnqueueSVMMap};
cl_int (CL_API_CALL *clEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) =
opencl_fn5<OPENCL_FN_clEnqueueSVMUnmap, cl_int, cl_command_queue, void*, cl_uint, const cl_event*, cl_event*>::switch_fn;
static const struct DynamicFnEntry _clEnqueueSVMUnmap_definition = { "clEnqueueSVMUnmap", (void**)&clEnqueueSVMUnmap};
static const struct DynamicFnEntry* opencl_svm_fn_list[] = {
&_clSVMAlloc_definition,
&_clSVMFree_definition,
&_clSetKernelArgSVMPointer_definition,
NULL/*&_clSetKernelExecInfo_definition*/,
NULL/*&_clEnqueueSVMFree_definition*/,
&_clEnqueueSVMMemcpy_definition,
&_clEnqueueSVMMemFill_definition,
&_clEnqueueSVMMap_definition,
&_clEnqueueSVMUnmap_definition,
};
#endif // HAVE_OPENCL_SVM
//
// END OF CUSTOM FUNCTIONS HERE
//
@ -194,6 +256,14 @@ static void* opencl_check_fn(int ID)
assert(ID >= 0 && ID < (int)(sizeof(opencl_fn_list)/sizeof(opencl_fn_list[0])));
e = opencl_fn_list[ID];
}
#ifdef HAVE_OPENCL_SVM
else if (ID >= SVM_FUNCTION_ID_START && ID < SVM_FUNCTION_ID_END)
{
ID = ID - SVM_FUNCTION_ID_START;
assert(ID >= 0 && ID < (int)(sizeof(opencl_svm_fn_list)/sizeof(opencl_svm_fn_list[0])));
e = opencl_svm_fn_list[ID];
}
#endif
else
{
CV_ErrorNoReturn(cv::Error::StsBadArg, "Invalid function ID");

View File

@ -192,6 +192,7 @@ struct NoVec
extern volatile bool USE_SSE2;
extern volatile bool USE_SSE4_2;
extern volatile bool USE_AVX;
extern volatile bool USE_AVX2;
enum { BLOCK_SIZE = 1024 };

View File

@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -72,7 +73,114 @@ struct Sum_SIMD
}
};
#if CV_NEON
#if CV_SSE2
template <>
struct Sum_SIMD<schar, int>
{
int operator () (const schar * src0, const uchar * mask, int * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2)
return 0;
int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero;
for ( ; x <= len - 16; x += 16)
{
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x));
__m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
}
for ( ; x <= len - 8; x += 8)
{
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
}
int CV_DECL_ALIGNED(16) ar[4];
_mm_store_si128((__m128i*)ar, v_sum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
template <>
struct Sum_SIMD<int, double>
{
int operator () (const int * src0, const uchar * mask, double * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2)
return 0;
int x = 0;
__m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero;
for ( ; x <= len - 4; x += 4)
{
__m128i v_src = _mm_loadu_si128((__m128i const *)(src0 + x));
v_sum0 = _mm_add_pd(v_sum0, _mm_cvtepi32_pd(v_src));
v_sum1 = _mm_add_pd(v_sum1, _mm_cvtepi32_pd(_mm_srli_si128(v_src, 8)));
}
double CV_DECL_ALIGNED(16) ar[4];
_mm_store_pd(ar, v_sum0);
_mm_store_pd(ar + 2, v_sum1);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
template <>
struct Sum_SIMD<float, double>
{
int operator () (const float * src0, const uchar * mask, double * dst, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2 && cn != 4) || !USE_SSE2)
return 0;
int x = 0;
__m128d v_zero = _mm_setzero_pd(), v_sum0 = v_zero, v_sum1 = v_zero;
for ( ; x <= len - 4; x += 4)
{
__m128 v_src = _mm_loadu_ps(src0 + x);
v_sum0 = _mm_add_pd(v_sum0, _mm_cvtps_pd(v_src));
v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8));
v_sum1 = _mm_add_pd(v_sum1, _mm_cvtps_pd(v_src));
}
double CV_DECL_ALIGNED(16) ar[4];
_mm_store_pd(ar, v_sum0);
_mm_store_pd(ar + 2, v_sum1);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
dst[j] += ar[j + i];
return x / cn;
}
};
#elif CV_NEON
template <>
struct Sum_SIMD<uchar, int>
@ -396,6 +504,38 @@ static int countNonZero_(const T* src, int len )
return nz;
}
#if CV_SSE2
static const uchar * initPopcountTable()
{
static uchar tab[256];
static volatile bool initialized = false;
if( !initialized )
{
// we compute inverse popcount table,
// since we pass (img[x] == 0) mask as index in the table.
unsigned int j = 0u;
#if CV_POPCNT
if (checkHardwareSupport(CV_CPU_POPCNT))
for( ; j < 256u; j++ )
tab[j] = (uchar)(8 - _mm_popcnt_u32(j));
#else
for( ; j < 256u; j++ )
{
int val = 0;
for( int mask = 1; mask < 256; mask += mask )
val += (j & mask) == 0;
tab[j] = (uchar)val;
}
#endif
initialized = true;
}
return tab;
}
#endif
static int countNonZero8u( const uchar* src, int len )
{
int i=0, nz = 0;
@ -403,21 +543,7 @@ static int countNonZero8u( const uchar* src, int len )
if(USE_SSE2)//5x-6x
{
__m128i pattern = _mm_setzero_si128 ();
static uchar tab[256];
static volatile bool initialized = false;
if( !initialized )
{
// we compute inverse popcount table,
// since we pass (img[x] == 0) mask as index in the table.
for( int j = 0; j < 256; j++ )
{
int val = 0;
for( int mask = 1; mask < 256; mask += mask )
val += (j & mask) == 0;
tab[j] = (uchar)val;
}
initialized = true;
}
static const uchar * tab = initPopcountTable();
for (; i<=len-16; i+=16)
{
@ -467,7 +593,22 @@ static int countNonZero8u( const uchar* src, int len )
static int countNonZero16u( const ushort* src, int len )
{
int i = 0, nz = 0;
#if CV_NEON
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero = _mm_setzero_si128 ();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_src = _mm_loadu_si128((const __m128i*)(src + i));
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_cmpeq_epi16(v_src, v_zero), v_zero));
nz += tab[val];
}
src += i;
}
#elif CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
uint16x8_t v_zero = vdupq_n_u16(0), v_1 = vdupq_n_u16(1);
@ -503,7 +644,27 @@ static int countNonZero16u( const ushort* src, int len )
static int countNonZero32s( const int* src, int len )
{
int i = 0, nz = 0;
#if CV_NEON
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero = _mm_setzero_si128 ();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_src = _mm_loadu_si128((const __m128i*)(src + i));
__m128i v_dst0 = _mm_cmpeq_epi32(v_src, v_zero);
v_src = _mm_loadu_si128((const __m128i*)(src + i + 4));
__m128i v_dst1 = _mm_cmpeq_epi32(v_src, v_zero);
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero));
nz += tab[val];
}
src += i;
}
#elif CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
int32x4_t v_zero = vdupq_n_s32(0.0f);
@ -541,7 +702,25 @@ static int countNonZero32s( const int* src, int len )
static int countNonZero32f( const float* src, int len )
{
int i = 0, nz = 0;
#if CV_NEON
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero_i = _mm_setzero_si128();
__m128 v_zero_f = _mm_setzero_ps();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_dst0 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i), v_zero_f));
__m128i v_dst1 = _mm_castps_si128(_mm_cmpeq_ps(_mm_loadu_ps(src + i + 4), v_zero_f));
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i));
nz += tab[val];
}
src += i;
}
#elif CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
float32x4_t v_zero = vdupq_n_f32(0.0f);
@ -577,7 +756,34 @@ static int countNonZero32f( const float* src, int len )
}
static int countNonZero64f( const double* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_SSE2
if (USE_SSE2)
{
__m128i v_zero_i = _mm_setzero_si128();
__m128d v_zero_d = _mm_setzero_pd();
static const uchar * tab = initPopcountTable();
for ( ; i <= len - 8; i += 8)
{
__m128i v_dst0 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i), v_zero_d));
__m128i v_dst1 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 2), v_zero_d));
__m128i v_dst2 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 4), v_zero_d));
__m128i v_dst3 = _mm_castpd_si128(_mm_cmpeq_pd(_mm_loadu_pd(src + i + 6), v_zero_d));
v_dst0 = _mm_packs_epi32(v_dst0, v_dst1);
v_dst1 = _mm_packs_epi32(v_dst2, v_dst3);
int val = _mm_movemask_epi8(_mm_packs_epi16(_mm_packs_epi32(v_dst0, v_dst1), v_zero_i));
nz += tab[val];
}
src += i;
}
#endif
return nz + countNonZero_(src, len - i);
}
typedef int (*CountNonZeroFunc)(const uchar*, int);
@ -594,6 +800,137 @@ static CountNonZeroFunc getCountNonZeroTab(int depth)
return countNonZeroTab[depth];
}
template <typename T, typename ST, typename SQT>
struct SumSqr_SIMD
{
int operator () (const T *, const uchar *, ST *, SQT *, int, int) const
{
return 0;
}
};
#if CV_SSE2
template <>
struct SumSqr_SIMD<uchar, int, int>
{
int operator () (const uchar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2) || !USE_SSE2)
return 0;
int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero;
for ( ; x <= len - 16; x += 16)
{
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x));
__m128i v_half = _mm_unpacklo_epi8(v_src, v_zero);
__m128i v_mullo = _mm_mullo_epi16(v_half, v_half);
__m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero));
v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
v_half = _mm_unpackhi_epi8(v_src, v_zero);
v_mullo = _mm_mullo_epi16(v_half, v_half);
v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_half, v_zero));
v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_half, v_zero));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
for ( ; x <= len - 8; x += 8)
{
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src0 + x)), v_zero);
__m128i v_mullo = _mm_mullo_epi16(v_src, v_src);
__m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src);
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_src, v_zero));
v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_src, v_zero));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
int CV_DECL_ALIGNED(16) ar[8];
_mm_store_si128((__m128i*)ar, v_sum);
_mm_store_si128((__m128i*)(ar + 4), v_sqsum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
{
sum[j] += ar[j + i];
sqsum[j] += ar[4 + j + i];
}
return x / cn;
}
};
template <>
struct SumSqr_SIMD<schar, int, int>
{
int operator () (const schar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const
{
if (mask || (cn != 1 && cn != 2) || !USE_SSE2)
return 0;
int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero;
for ( ; x <= len - 16; x += 16)
{
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x));
__m128i v_half = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8);
__m128i v_mullo = _mm_mullo_epi16(v_half, v_half);
__m128i v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
v_half = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8);
v_mullo = _mm_mullo_epi16(v_half, v_half);
v_mulhi = _mm_mulhi_epi16(v_half, v_half);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_half), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_half), 16));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
for ( ; x <= len - 8; x += 8)
{
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8);
__m128i v_mullo = _mm_mullo_epi16(v_src, v_src);
__m128i v_mulhi = _mm_mulhi_epi16(v_src, v_src);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src), 16));
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src), 16));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpacklo_epi16(v_mullo, v_mulhi));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_unpackhi_epi16(v_mullo, v_mulhi));
}
int CV_DECL_ALIGNED(16) ar[8];
_mm_store_si128((__m128i*)ar, v_sum);
_mm_store_si128((__m128i*)(ar + 4), v_sqsum);
for (int i = 0; i < 4; i += cn)
for (int j = 0; j < cn; ++j)
{
sum[j] += ar[j + i];
sqsum[j] += ar[4 + j + i];
}
return x / cn;
}
};
#endif
template<typename T, typename ST, typename SQT>
static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn )
{
@ -601,14 +938,15 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
if( !mask )
{
int i;
int k = cn % 4;
SumSqr_SIMD<T, ST, SQT> vop;
int i = vop(src0, mask, sum, sqsum, len, cn), k = cn % 4;
src += i * cn;
if( k == 1 )
{
ST s0 = sum[0];
SQT sq0 = sqsum[0];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v = src[0];
s0 += v; sq0 += (SQT)v*v;
@ -620,7 +958,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
{
ST s0 = sum[0], s1 = sum[1];
SQT sq0 = sqsum[0], sq1 = sqsum[1];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v0 = src[0], v1 = src[1];
s0 += v0; sq0 += (SQT)v0*v0;
@ -633,7 +971,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
{
ST s0 = sum[0], s1 = sum[1], s2 = sum[2];
SQT sq0 = sqsum[0], sq1 = sqsum[1], sq2 = sqsum[2];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v0 = src[0], v1 = src[1], v2 = src[2];
s0 += v0; sq0 += (SQT)v0*v0;
@ -649,7 +987,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
src = src0 + k;
ST s0 = sum[k], s1 = sum[k+1], s2 = sum[k+2], s3 = sum[k+3];
SQT sq0 = sqsum[k], sq1 = sqsum[k+1], sq2 = sqsum[k+2], sq3 = sqsum[k+3];
for( i = 0; i < len; i++, src += cn )
for( ; i < len; i++, src += cn )
{
T v0, v1;
v0 = src[0], v1 = src[1];
@ -924,7 +1262,6 @@ cv::Scalar cv::sum( InputArray _src )
}
}
#endif
SumFunc func = getSumFunc(depth);
CV_Assert( cn <= 4 && func != 0 );

View File

@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -89,6 +90,22 @@
pop ebx
}
}
static void __cpuidex(int* cpuid_data, int, int)
{
__asm
{
push edi
mov edi, cpuid_data
mov eax, 7
mov ecx, 0
cpuid
mov [edi], eax
mov [edi + 4], ebx
mov [edi + 8], ecx
mov [edi + 12], edx
pop edi
}
}
#endif
#endif
@ -208,7 +225,7 @@ struct HWFeatures
enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE };
HWFeatures(void)
{
{
memset( have, 0, sizeof(have) );
x86_family = 0;
}
@ -252,10 +269,53 @@ struct HWFeatures
f.have[CV_CPU_SSE2] = (cpuid_data[3] & (1<<26)) != 0;
f.have[CV_CPU_SSE3] = (cpuid_data[2] & (1<<0)) != 0;
f.have[CV_CPU_SSSE3] = (cpuid_data[2] & (1<<9)) != 0;
f.have[CV_CPU_FMA3] = (cpuid_data[2] & (1<<12)) != 0;
f.have[CV_CPU_SSE4_1] = (cpuid_data[2] & (1<<19)) != 0;
f.have[CV_CPU_SSE4_2] = (cpuid_data[2] & (1<<20)) != 0;
f.have[CV_CPU_POPCNT] = (cpuid_data[2] & (1<<23)) != 0;
f.have[CV_CPU_AVX] = (((cpuid_data[2] & (1<<28)) != 0)&&((cpuid_data[2] & (1<<27)) != 0));//OS uses XSAVE_XRSTORE and CPU support AVX
// make the second call to the cpuid command in order to get
// information about extended features like AVX2
#if defined _MSC_VER && (defined _M_IX86 || defined _M_X64)
__cpuidex(cpuid_data, 7, 0);
#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
#ifdef __x86_64__
asm __volatile__
(
"movl $7, %%eax\n\t"
"movl $0, %%ecx\n\t"
"cpuid\n\t"
:[eax]"=a"(cpuid_data[0]),[ebx]"=b"(cpuid_data[1]),[ecx]"=c"(cpuid_data[2]),[edx]"=d"(cpuid_data[3])
:
: "cc"
);
#else
asm volatile
(
"pushl %%ebx\n\t"
"movl $7,%%eax\n\t"
"movl $0,%%ecx\n\t"
"cpuid\n\t"
"movl %%ebx, %0\n\t"
"popl %%ebx\n\t"
: "=r"(cpuid_data[1]), "=c"(cpuid_data[2])
:
: "cc"
);
#endif
#endif
f.have[CV_CPU_AVX2] = (cpuid_data[1] & (1<<5)) != 0;
f.have[CV_CPU_AVX_512F] = (cpuid_data[1] & (1<<16)) != 0;
f.have[CV_CPU_AVX_512DQ] = (cpuid_data[1] & (1<<17)) != 0;
f.have[CV_CPU_AVX_512IFMA512] = (cpuid_data[1] & (1<<21)) != 0;
f.have[CV_CPU_AVX_512PF] = (cpuid_data[1] & (1<<26)) != 0;
f.have[CV_CPU_AVX_512ER] = (cpuid_data[1] & (1<<27)) != 0;
f.have[CV_CPU_AVX_512CD] = (cpuid_data[1] & (1<<28)) != 0;
f.have[CV_CPU_AVX_512BW] = (cpuid_data[1] & (1<<30)) != 0;
f.have[CV_CPU_AVX_512VL] = (cpuid_data[1] & (1<<31)) != 0;
f.have[CV_CPU_AVX_512VBMI] = (cpuid_data[2] & (1<<1)) != 0;
}
#if defined ANDROID || defined __linux__
@ -318,6 +378,7 @@ IPPInitializer ippInitializer;
volatile bool USE_SSE2 = featuresEnabled.have[CV_CPU_SSE2];
volatile bool USE_SSE4_2 = featuresEnabled.have[CV_CPU_SSE4_2];
volatile bool USE_AVX = featuresEnabled.have[CV_CPU_AVX];
volatile bool USE_AVX2 = featuresEnabled.have[CV_CPU_AVX2];
void setUseOptimized( bool flag )
{

View File

@ -10,8 +10,7 @@
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -56,7 +55,7 @@ UMatData::UMatData(const MatAllocator* allocator)
prevAllocator = currAllocator = allocator;
urefcount = refcount = 0;
data = origdata = 0;
size = 0; capacity = 0;
size = 0;
flags = 0;
handle = 0;
userdata = 0;
@ -68,7 +67,7 @@ UMatData::~UMatData()
prevAllocator = currAllocator = 0;
urefcount = refcount = 0;
data = origdata = 0;
size = 0; capacity = 0;
size = 0;
flags = 0;
handle = 0;
userdata = 0;
@ -222,7 +221,7 @@ UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const
temp_u = a->allocate(dims, size.p, type(), data, step.p, accessFlags, usageFlags);
temp_u->refcount = 1;
}
UMat::getStdAllocator()->allocate(temp_u, accessFlags, usageFlags);
UMat::getStdAllocator()->allocate(temp_u, accessFlags, usageFlags); // TODO result is not checked
hdr.flags = flags;
setSize(hdr, dims, size.p, step.p);
finalizeHdr(hdr);
@ -576,7 +575,7 @@ Mat UMat::getMat(int accessFlags) const
{
if(!u)
return Mat();
u->currAllocator->map(u, accessFlags | ACCESS_READ);
u->currAllocator->map(u, accessFlags | ACCESS_READ); // TODO Support ACCESS_WRITE without unnecessary data transfers
CV_Assert(u->data != 0);
Mat hdr(dims, size.p, type(), u->data + offset, step.p);
hdr.flags = flags;

View File

@ -1577,7 +1577,7 @@ PARAM_TEST_CASE(ConvertScaleAbs, MatDepth, Channels, bool)
Size roiSize = randomSize(1, MAX_VALUE);
Border srcBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
randomSubMat(src, src_roi, roiSize, srcBorder, stype, 2, 11); // FIXIT: Test with minV, maxV
randomSubMat(src, src_roi, roiSize, srcBorder, stype, -11, 11); // FIXIT: Test with minV, maxV
Border dstBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
randomSubMat(dst, dst_roi, roiSize, dstBorder, dtype, 5, 16);

View File

@ -26,3 +26,106 @@ TEST(Core_SaturateCast, NegativeNotClipped)
ASSERT_EQ(0xffffffff, val);
}
template<typename T, typename U>
static double maxAbsDiff(const T &t, const U &u)
{
Mat_<double> d;
absdiff(t, u, d);
double ret;
minMaxLoc(d, NULL, &ret);
return ret;
}
TEST(Core_OutputArrayAssign, _Matxd_Matd)
{
Mat expected = (Mat_<double>(2,3) << 1, 2, 3, .1, .2, .3);
Matx23d actualx;
{
OutputArray oa(actualx);
oa.assign(expected);
}
Mat actual = (Mat) actualx;
EXPECT_LE(maxAbsDiff(expected, actual), 0.0);
}
TEST(Core_OutputArrayAssign, _Matxd_Matf)
{
Mat expected = (Mat_<float>(2,3) << 1, 2, 3, .1, .2, .3);
Matx23d actualx;
{
OutputArray oa(actualx);
oa.assign(expected);
}
Mat actual = (Mat) actualx;
EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON);
}
TEST(Core_OutputArrayAssign, _Matxf_Matd)
{
Mat expected = (Mat_<double>(2,3) << 1, 2, 3, .1, .2, .3);
Matx23f actualx;
{
OutputArray oa(actualx);
oa.assign(expected);
}
Mat actual = (Mat) actualx;
EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON);
}
TEST(Core_OutputArrayAssign, _Matxd_UMatd)
{
Mat expected = (Mat_<double>(2,3) << 1, 2, 3, .1, .2, .3);
UMat uexpected = expected.getUMat(ACCESS_READ);
Matx23d actualx;
{
OutputArray oa(actualx);
oa.assign(uexpected);
}
Mat actual = (Mat) actualx;
EXPECT_LE(maxAbsDiff(expected, actual), 0.0);
}
TEST(Core_OutputArrayAssign, _Matxd_UMatf)
{
Mat expected = (Mat_<float>(2,3) << 1, 2, 3, .1, .2, .3);
UMat uexpected = expected.getUMat(ACCESS_READ);
Matx23d actualx;
{
OutputArray oa(actualx);
oa.assign(uexpected);
}
Mat actual = (Mat) actualx;
EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON);
}
TEST(Core_OutputArrayAssign, _Matxf_UMatd)
{
Mat expected = (Mat_<double>(2,3) << 1, 2, 3, .1, .2, .3);
UMat uexpected = expected.getUMat(ACCESS_READ);
Matx23f actualx;
{
OutputArray oa(actualx);
oa.assign(uexpected);
}
Mat actual = (Mat) actualx;
EXPECT_LE(maxAbsDiff(expected, actual), FLT_EPSILON);
}

View File

@ -1,9 +0,0 @@
if(IOS OR (NOT HAVE_CUDA AND NOT BUILD_CUDA_STUBS))
ocv_module_disable(cuda)
endif()
set(the_description "CUDA-accelerated Computer Vision")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4100 /wd4324 /wd4512 /wd4515 -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter)
ocv_define_module(cuda opencv_calib3d opencv_cudaarithm opencv_cudawarping OPTIONAL opencv_cudalegacy)

View File

@ -1,135 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CUDA_HPP__
#define __OPENCV_CUDA_HPP__
#ifndef __cplusplus
# error cuda.hpp header must be compiled as C++
#endif
#include "opencv2/core/cuda.hpp"
/**
@addtogroup cuda
@{
@defgroup cuda_calib3d Camera Calibration and 3D Reconstruction
@}
*/
namespace cv { namespace cuda {
//////////////////////////// Labeling ////////////////////////////
//! @addtogroup cuda
//! @{
//!performs labeling via graph cuts of a 2D regular 4-connected graph.
CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels,
GpuMat& buf, Stream& stream = Stream::Null());
//!performs labeling via graph cuts of a 2D regular 8-connected graph.
CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight,
GpuMat& labels,
GpuMat& buf, Stream& stream = Stream::Null());
//! compute mask for Generalized Flood fill componetns labeling.
CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null());
//! performs connected componnents labeling.
CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null());
//! @}
//////////////////////////// Calib3d ////////////////////////////
//! @addtogroup cuda_calib3d
//! @{
CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
GpuMat& dst, Stream& stream = Stream::Null());
CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst,
Stream& stream = Stream::Null());
/** @brief Finds the object pose from 3D-2D point correspondences.
@param object Single-row matrix of object points.
@param image Single-row matrix of image points.
@param camera_mat 3x3 matrix of intrinsic camera parameters.
@param dist_coef Distortion coefficients. See undistortPoints for details.
@param rvec Output 3D rotation vector.
@param tvec Output 3D translation vector.
@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an
initial transformation guess. It is not supported for now.
@param num_iters Maximum number of RANSAC iterations.
@param max_dist Euclidean distance threshold to detect whether point is inlier or not.
@param min_inlier_count Flag to indicate that the function must stop if greater or equal number
of inliers is achieved. It is not supported for now.
@param inliers Output vector of inlier indices.
*/
CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false,
int num_iters=100, float max_dist=8.0, int min_inlier_count=100,
std::vector<int>* inliers=NULL);
//! @}
//////////////////////////// VStab ////////////////////////////
//! @addtogroup cuda
//! @{
//! removes points (CV_32FC2, single row matrix) with zero mask value
CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask);
CV_EXPORTS void calcWobbleSuppressionMaps(
int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
GpuMat &mapx, GpuMat &mapy);
//! @}
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_CUDA_HPP__ */

View File

@ -1,96 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
void cv::cuda::compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); }
void cv::cuda::calcWobbleSuppressionMaps(
int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
#else
namespace cv { namespace cuda { namespace device { namespace globmotion {
int compactPoints(int N, float *points0, float *points1, const uchar *mask);
void calcWobbleSuppressionMaps(
int left, int idx, int right, int width, int height,
const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy);
}}}}
void cv::cuda::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
{
CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1);
CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U);
CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols);
int npoints = points0.cols;
int remaining = cv::cuda::device::globmotion::compactPoints(
npoints, (float*)points0.data, (float*)points1.data, mask.data);
points0 = points0.colRange(0, remaining);
points1 = points1.colRange(0, remaining);
}
void cv::cuda::calcWobbleSuppressionMaps(
int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
GpuMat &mapx, GpuMat &mapy)
{
CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous());
CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous());
mapx.create(size, CV_32F);
mapy.create(size, CV_32F);
cv::cuda::device::globmotion::calcWobbleSuppressionMaps(
left, idx, right, size.width, size.height,
ml.ptr<float>(), mr.ptr<float>(), mapx, mapy);
}
#endif

View File

@ -1,60 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/cuda.hpp"
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudawarping.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CUDALEGACY
# include "opencv2/cudalegacy/private.hpp"
#endif
#endif /* __OPENCV_PRECOMP_H__ */

View File

@ -1,90 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace std;
using namespace cv;
struct CompactPoints : testing::TestWithParam<cuda::DeviceInfo>
{
virtual void SetUp() { cuda::setDevice(GetParam().deviceID()); }
};
CUDA_TEST_P(CompactPoints, CanCompactizeSmallInput)
{
Mat src0(1, 3, CV_32FC2);
src0.at<Point2f>(0,0) = Point2f(0,0);
src0.at<Point2f>(0,1) = Point2f(0,1);
src0.at<Point2f>(0,2) = Point2f(0,2);
Mat src1(1, 3, CV_32FC2);
src1.at<Point2f>(0,0) = Point2f(1,0);
src1.at<Point2f>(0,1) = Point2f(1,1);
src1.at<Point2f>(0,2) = Point2f(1,2);
Mat mask(1, 3, CV_8U);
mask.at<uchar>(0,0) = 1;
mask.at<uchar>(0,1) = 0;
mask.at<uchar>(0,2) = 1;
cuda::GpuMat dsrc0(src0), dsrc1(src1), dmask(mask);
cuda::compactPoints(dsrc0, dsrc1, dmask);
dsrc0.download(src0);
dsrc1.download(src1);
ASSERT_EQ(2, src0.cols);
ASSERT_EQ(2, src1.cols);
ASSERT_TRUE(src0.at<Point2f>(0,0) == Point2f(0,0));
ASSERT_TRUE(src0.at<Point2f>(0,1) == Point2f(0,2));
ASSERT_TRUE(src1.at<Point2f>(0,0) == Point2f(1,0));
ASSERT_TRUE(src1.at<Point2f>(0,1) == Point2f(1,2));
}
INSTANTIATE_TEST_CASE_P(CUDA_GlobalMotion, CompactPoints, ALL_DEVICES);
#endif // HAVE_CUDA

View File

@ -1,45 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
CV_CUDA_TEST_MAIN("gpu")

View File

@ -1,66 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include <fstream>
#include "opencv2/ts.hpp"
#include "opencv2/ts/cuda_test.hpp"
#include "opencv2/cuda.hpp"
#include "opencv2/core.hpp"
#include "opencv2/core/opengl.hpp"
#include "opencv2/calib3d.hpp"
#include "cvconfig.h"
#endif

View File

@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Background Segmentation")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
ocv_define_module(cudabgsegm opencv_video OPTIONAL opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc)
ocv_define_module(cudabgsegm opencv_video)

View File

@ -147,115 +147,6 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
bool detectShadows = true);
////////////////////////////////////////////////////
// GMG
/** @brief Background/Foreground Segmentation Algorithm.
The class discriminates between foreground and background pixels by building and maintaining a model
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
class implements algorithm described in @cite Gold2012 .
*/
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
virtual int getMaxFeatures() const = 0;
virtual void setMaxFeatures(int maxFeatures) = 0;
virtual double getDefaultLearningRate() const = 0;
virtual void setDefaultLearningRate(double lr) = 0;
virtual int getNumFrames() const = 0;
virtual void setNumFrames(int nframes) = 0;
virtual int getQuantizationLevels() const = 0;
virtual void setQuantizationLevels(int nlevels) = 0;
virtual double getBackgroundPrior() const = 0;
virtual void setBackgroundPrior(double bgprior) = 0;
virtual int getSmoothingRadius() const = 0;
virtual void setSmoothingRadius(int radius) = 0;
virtual double getDecisionThreshold() const = 0;
virtual void setDecisionThreshold(double thresh) = 0;
virtual bool getUpdateBackgroundModel() const = 0;
virtual void setUpdateBackgroundModel(bool update) = 0;
virtual double getMinVal() const = 0;
virtual void setMinVal(double val) = 0;
virtual double getMaxVal() const = 0;
virtual void setMaxVal(double val) = 0;
};
/** @brief Creates GMG Background Subtractor
@param initializationFrames Number of frames of video to use to initialize histograms.
@param decisionThreshold Value above which pixel is determined to be FG.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
////////////////////////////////////////////////////
// FGD
/** @brief The class discriminates between foreground and background pixels by building and maintaining a model
of the background.
Any pixel which does not fit this model is then deemed to be foreground. The class implements
algorithm described in @cite FGD2003 .
@sa BackgroundSubtractor
*/
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
/** @brief Returns the output foreground regions calculated by findContours.
@param foreground_regions Output array (CPU memory).
*/
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
struct CV_EXPORTS FGDParams
{
int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
//!< Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
//!< Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
//!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; //!< Affects color and color co-occurrence quantization, typically set to 2.
float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
//! default Params
FGDParams();
};
/** @brief Creates FGD Background Subtractor
@param params Algorithm's parameters. See @cite FGD2003 for explanation.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorFGD>
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
//! @}
}} // namespace cv { namespace cuda {

View File

@ -42,10 +42,6 @@
#include "perf_precomp.hpp"
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
using namespace std;
using namespace testing;
using namespace perf;
@ -63,83 +59,6 @@ using namespace perf;
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, FGDStatModel,
Values(string("gpu/video/768x576.avi")))
{
const int numIters = 10;
declare.time(60);
const string inputFile = perf::TestBase::getDataPath(GetParam());
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame), foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorFGD> d_fgd = cv::cuda::createBackgroundSubtractorFGD();
d_fgd->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_fgd->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
d_fgd->apply(d_frame, foreground);
}
CUDA_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
#ifdef HAVE_OPENCV_CUDAIMGPROC
cv::cuda::GpuMat background3, background;
d_fgd->getBackgroundImage(background3);
cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA);
CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
#endif
}
else
{
FAIL_NO_CPU();
}
}
#endif
//////////////////////////////////////////////////////
// MOG
@ -484,118 +403,3 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage,
}
#endif
//////////////////////////////////////////////////////
// GMG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int);
PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
Combine(Values(string("gpu/video/768x576.avi")),
CUDA_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const int numIters = 150;
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const int maxFeatures = GET_PARAM(2);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame);
cv::cuda::GpuMat foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
d_gmg->setMaxFeatures(maxFeatures);
d_gmg->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_gmg->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
d_gmg->apply(d_frame, foreground);
}
CUDA_SANITY_CHECK(foreground);
}
else
{
FAIL_NO_CPU();
}
}
#endif

View File

@ -51,16 +51,4 @@
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CUDAARITHM
# include "opencv2/cudaarithm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFILTERS
# include "opencv2/cudafilters.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
#endif /* __OPENCV_PRECOMP_H__ */

View File

@ -6,4 +6,5 @@ set(the_description "CUDA-accelerated Computer Vision (legacy)")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4130 /wd4324 /wd4512 /wd4310 -Wundef -Wmissing-declarations -Wuninitialized -Wshadow)
ocv_define_module(cudalegacy opencv_core OPTIONAL opencv_objdetect)
ocv_define_module(cudalegacy opencv_core opencv_video
OPTIONAL opencv_objdetect opencv_imgproc opencv_calib3d opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc)

View File

@ -49,6 +49,7 @@
#include "opencv2/cudalegacy/NCVPyramid.hpp"
#include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
#include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp"
#include "opencv2/video/background_segm.hpp"
/**
@addtogroup cuda
@ -59,6 +60,13 @@
namespace cv { namespace cuda {
//! @addtogroup cudalegacy
//! @{
//
// ImagePyramid
//
class CV_EXPORTS ImagePyramid : public Algorithm
{
public:
@ -67,6 +75,216 @@ public:
CV_EXPORTS Ptr<ImagePyramid> createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null());
//
// GMG
//
/** @brief Background/Foreground Segmentation Algorithm.
The class discriminates between foreground and background pixels by building and maintaining a model
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
class implements algorithm described in @cite Gold2012 .
*/
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
virtual int getMaxFeatures() const = 0;
virtual void setMaxFeatures(int maxFeatures) = 0;
virtual double getDefaultLearningRate() const = 0;
virtual void setDefaultLearningRate(double lr) = 0;
virtual int getNumFrames() const = 0;
virtual void setNumFrames(int nframes) = 0;
virtual int getQuantizationLevels() const = 0;
virtual void setQuantizationLevels(int nlevels) = 0;
virtual double getBackgroundPrior() const = 0;
virtual void setBackgroundPrior(double bgprior) = 0;
virtual int getSmoothingRadius() const = 0;
virtual void setSmoothingRadius(int radius) = 0;
virtual double getDecisionThreshold() const = 0;
virtual void setDecisionThreshold(double thresh) = 0;
virtual bool getUpdateBackgroundModel() const = 0;
virtual void setUpdateBackgroundModel(bool update) = 0;
virtual double getMinVal() const = 0;
virtual void setMinVal(double val) = 0;
virtual double getMaxVal() const = 0;
virtual void setMaxVal(double val) = 0;
};
/** @brief Creates GMG Background Subtractor
@param initializationFrames Number of frames of video to use to initialize histograms.
@param decisionThreshold Value above which pixel is determined to be FG.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
//
// FGD
//
/** @brief The class discriminates between foreground and background pixels by building and maintaining a model
of the background.
Any pixel which does not fit this model is then deemed to be foreground. The class implements
algorithm described in @cite FGD2003 .
@sa BackgroundSubtractor
*/
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
/** @brief Returns the output foreground regions calculated by findContours.
@param foreground_regions Output array (CPU memory).
*/
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
struct CV_EXPORTS FGDParams
{
int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
//!< Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
//!< Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
//!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; //!< Affects color and color co-occurrence quantization, typically set to 2.
float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
//! default Params
FGDParams();
};
/** @brief Creates FGD Background Subtractor
@param params Algorithm's parameters. See @cite FGD2003 for explanation.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorFGD>
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
//
// Optical flow
//
//! Calculates optical flow for 2 images using block matching algorithm */
CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr,
Size block_size, Size shift_size, Size max_range, bool use_previous,
GpuMat& velx, GpuMat& vely, GpuMat& buf,
Stream& stream = Stream::Null());
class CV_EXPORTS FastOpticalFlowBM
{
public:
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null());
private:
GpuMat buffer;
GpuMat extended_I0;
GpuMat extended_I1;
};
/** @brief Interpolates frames (images) using provided optical flow (displacement field).
@param frame0 First frame (32-bit floating point images, single channel).
@param frame1 Second frame. Must have the same type and size as frame0 .
@param fu Forward horizontal displacement.
@param fv Forward vertical displacement.
@param bu Backward horizontal displacement.
@param bv Backward vertical displacement.
@param pos New frame position.
@param newFrame Output image.
@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
interpolated backward vertical flow.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
const GpuMat& fu, const GpuMat& fv,
const GpuMat& bu, const GpuMat& bv,
float pos, GpuMat& newFrame, GpuMat& buf,
Stream& stream = Stream::Null());
CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
//
// Labeling
//
//!performs labeling via graph cuts of a 2D regular 4-connected graph.
CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels,
GpuMat& buf, Stream& stream = Stream::Null());
//!performs labeling via graph cuts of a 2D regular 8-connected graph.
CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight,
GpuMat& labels,
GpuMat& buf, Stream& stream = Stream::Null());
//! compute mask for Generalized Flood fill componetns labeling.
CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null());
//! performs connected componnents labeling.
CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null());
//
// Calib3d
//
CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
GpuMat& dst, Stream& stream = Stream::Null());
CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst,
Stream& stream = Stream::Null());
/** @brief Finds the object pose from 3D-2D point correspondences.
@param object Single-row matrix of object points.
@param image Single-row matrix of image points.
@param camera_mat 3x3 matrix of intrinsic camera parameters.
@param dist_coef Distortion coefficients. See undistortPoints for details.
@param rvec Output 3D rotation vector.
@param tvec Output 3D translation vector.
@param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an
initial transformation guess. It is not supported for now.
@param num_iters Maximum number of RANSAC iterations.
@param max_dist Euclidean distance threshold to detect whether point is inlier or not.
@param min_inlier_count Flag to indicate that the function must stop if greater or equal number
of inliers is achieved. It is not supported for now.
@param inliers Output vector of inlier indices.
*/
CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false,
int num_iters=100, float max_dist=8.0, int min_inlier_count=100,
std::vector<int>* inliers=NULL);
//! @}
}}
#endif /* __OPENCV_CUDALEGACY_HPP__ */

View File

@ -0,0 +1,249 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
using namespace std;
using namespace testing;
using namespace perf;
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(WIN32) /* assume that we have ffmpeg */
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, FGDStatModel,
Values(string("gpu/video/768x576.avi")))
{
const int numIters = 10;
declare.time(60);
const string inputFile = perf::TestBase::getDataPath(GetParam());
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame), foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorFGD> d_fgd = cv::cuda::createBackgroundSubtractorFGD();
d_fgd->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_fgd->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
d_fgd->apply(d_frame, foreground);
}
}
else
{
FAIL_NO_CPU();
}
SANITY_CHECK_NOTHING();
}
#endif
//////////////////////////////////////////////////////
// GMG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int);
PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
Combine(Values(string("gpu/video/768x576.avi")),
CUDA_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const int numIters = 150;
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const int maxFeatures = GET_PARAM(2);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame);
cv::cuda::GpuMat foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
d_gmg->setMaxFeatures(maxFeatures);
d_gmg->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_gmg->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
d_gmg->apply(d_frame, foreground);
}
}
else
{
FAIL_NO_CPU();
}
SANITY_CHECK_NOTHING();
}
#endif

View File

@ -42,6 +42,10 @@
#include "perf_precomp.hpp"
#ifdef HAVE_OPENCV_CALIB3D
#include "opencv2/calib3d.hpp"
using namespace std;
using namespace testing;
using namespace perf;
@ -133,3 +137,5 @@ PERF_TEST_P(Count, Calib3D_SolvePnPRansac,
CPU_SANITY_CHECK(tvec, 1e-6);
}
}
#endif

View File

@ -44,4 +44,4 @@
using namespace perf;
CV_PERF_TEST_CUDA_MAIN(cuda)
CV_PERF_TEST_CUDA_MAIN(cudalegacy)

View File

@ -54,8 +54,10 @@
#include "opencv2/ts.hpp"
#include "opencv2/ts/cuda_perf.hpp"
#include "opencv2/cuda.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/video.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined

View File

@ -45,7 +45,7 @@
using namespace cv;
using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
#if !defined HAVE_CUDA || !defined HAVE_OPENCV_CALIB3D || defined(CUDA_DISABLER)
void cv::cuda::transformPoints(const GpuMat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }

View File

@ -56,6 +56,22 @@
# include "opencv2/objdetect.hpp"
#endif
#ifdef HAVE_OPENCV_CALIB3D
# include "opencv2/calib3d.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAARITHM
# include "opencv2/cudaarithm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFILTERS
# include "opencv2/cudafilters.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/cudalegacy/private.hpp"

View File

@ -42,7 +42,9 @@
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
#if defined HAVE_CUDA && defined HAVE_OPENCV_CALIB3D
#include "opencv2/calib3d.hpp"
using namespace cvtest;

View File

@ -74,6 +74,8 @@
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/opencv_modules.hpp"
#include "cvconfig.h"
#include "NCVTest.hpp"

View File

@ -61,49 +61,94 @@ namespace cv { namespace cuda {
//! @addtogroup cudaoptflow
//! @{
/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm
(@cite Brox2004). :
//
// Interface
//
/** @brief Base interface for dense optical flow algorithms.
*/
class CV_EXPORTS BroxOpticalFlow
class CV_EXPORTS DenseOpticalFlow : public Algorithm
{
public:
BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) :
alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_),
inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_)
{
}
/** @brief Calculates a dense optical flow.
//! Compute optical flow
//! frame0 - source frame (supports only CV_32FC1 type)
//! frame1 - frame to track (with the same size and type as frame0)
//! u - flow horizontal component (along x axis)
//! v - flow vertical component (along y axis)
void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null());
//! flow smoothness
float alpha;
//! gradient constancy importance
float gamma;
//! pyramid scale factor
float scale_factor;
//! number of lagged non-linearity iterations (inner loop)
int inner_iterations;
//! number of warping iterations (number of pyramid levels)
int outer_iterations;
//! number of linear system solver iterations
int solver_iterations;
GpuMat buf;
@param I0 first input image.
@param I1 second input image of the same size and the same type as I0.
@param flow computed flow image that has the same size as I0 and type CV_32FC2.
@param stream Stream for the asynchronous version.
*/
virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream = Stream::Null()) = 0;
};
/** @brief Class used for calculating an optical flow.
/** @brief Base interface for sparse optical flow algorithms.
*/
class CV_EXPORTS SparseOpticalFlow : public Algorithm
{
public:
/** @brief Calculates a sparse optical flow.
The class can calculate an optical flow for a sparse feature set or dense optical flow using the
@param prevImg First input image.
@param nextImg Second input image of the same size and the same type as prevImg.
@param prevPts Vector of 2D points for which the flow needs to be found.
@param nextPts Output vector of 2D points containing the calculated new positions of input features in the second image.
@param status Output status vector. Each element of the vector is set to 1 if the
flow for the corresponding features has been found. Otherwise, it is set to 0.
@param err Optional output vector that contains error response for each point (inverse confidence).
@param stream Stream for the asynchronous version.
*/
virtual void calc(InputArray prevImg, InputArray nextImg,
InputArray prevPts, InputOutputArray nextPts,
OutputArray status,
OutputArray err = cv::noArray(),
Stream& stream = Stream::Null()) = 0;
};
//
// BroxOpticalFlow
//
/** @brief Class computing the optical flow for two images using Brox et al Optical Flow algorithm (@cite Brox2004).
*/
class CV_EXPORTS BroxOpticalFlow : public DenseOpticalFlow
{
public:
virtual double getFlowSmoothness() const = 0;
virtual void setFlowSmoothness(double alpha) = 0;
virtual double getGradientConstancyImportance() const = 0;
virtual void setGradientConstancyImportance(double gamma) = 0;
virtual double getPyramidScaleFactor() const = 0;
virtual void setPyramidScaleFactor(double scale_factor) = 0;
//! number of lagged non-linearity iterations (inner loop)
virtual int getInnerIterations() const = 0;
virtual void setInnerIterations(int inner_iterations) = 0;
//! number of warping iterations (number of pyramid levels)
virtual int getOuterIterations() const = 0;
virtual void setOuterIterations(int outer_iterations) = 0;
//! number of linear system solver iterations
virtual int getSolverIterations() const = 0;
virtual void setSolverIterations(int solver_iterations) = 0;
static Ptr<BroxOpticalFlow> create(
double alpha = 0.197,
double gamma = 50.0,
double scale_factor = 0.8,
int inner_iterations = 5,
int outer_iterations = 150,
int solver_iterations = 10);
};
//
// PyrLKOpticalFlow
//
/** @brief Class used for calculating a sparse optical flow.
The class can calculate an optical flow for a sparse feature set using the
iterative Lucas-Kanade method with pyramids.
@sa calcOpticalFlowPyrLK
@ -112,158 +157,116 @@ iterative Lucas-Kanade method with pyramids.
- An example of the Lucas Kanade optical flow algorithm can be found at
opencv_source_code/samples/gpu/pyrlk_optical_flow.cpp
*/
class CV_EXPORTS PyrLKOpticalFlow
class CV_EXPORTS SparsePyrLKOpticalFlow : public SparseOpticalFlow
{
public:
PyrLKOpticalFlow();
virtual Size getWinSize() const = 0;
virtual void setWinSize(Size winSize) = 0;
/** @brief Calculate an optical flow for a sparse feature set.
virtual int getMaxLevel() const = 0;
virtual void setMaxLevel(int maxLevel) = 0;
@param prevImg First 8-bit input image (supports both grayscale and color images).
@param nextImg Second input image of the same size and the same type as prevImg .
@param prevPts Vector of 2D points for which the flow needs to be found. It must be one row matrix
with CV_32FC2 type.
@param nextPts Output vector of 2D points (with single-precision floating-point coordinates)
containing the calculated new positions of input features in the second image. When useInitialFlow
is true, the vector must have the same size as in the input.
@param status Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the
flow for the corresponding features has been found. Otherwise, it is set to 0.
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
@sa calcOpticalFlowPyrLK
*/
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err = 0);
virtual bool getUseInitialFlow() const = 0;
virtual void setUseInitialFlow(bool useInitialFlow) = 0;
/** @brief Calculate dense optical flow.
@param prevImg First 8-bit grayscale input image.
@param nextImg Second input image of the same size and the same type as prevImg .
@param u Horizontal component of the optical flow of the same size as input images, 32-bit
floating-point, single-channel
@param v Vertical component of the optical flow of the same size as input images, 32-bit
floating-point, single-channel
@param err Output vector (CV_32FC1 type) that contains the difference between patches around the
original and moved points or min eigen value if getMinEigenVals is checked. It can be NULL, if not
needed.
*/
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
/** @brief Releases inner buffers memory.
*/
void releaseMemory();
Size winSize;
int maxLevel;
int iters;
bool useInitialFlow;
private:
std::vector<GpuMat> prevPyr_;
std::vector<GpuMat> nextPyr_;
GpuMat buf_;
GpuMat uPyr_[2];
GpuMat vPyr_[2];
static Ptr<SparsePyrLKOpticalFlow> create(
Size winSize = Size(21, 21),
int maxLevel = 3,
int iters = 30,
bool useInitialFlow = false);
};
/** @brief Class computing a dense optical flow using the Gunnar Farnebacks algorithm. :
/** @brief Class used for calculating a dense optical flow.
The class can calculate an optical flow for a dense optical flow using the
iterative Lucas-Kanade method with pyramids.
*/
class CV_EXPORTS FarnebackOpticalFlow
class CV_EXPORTS DensePyrLKOpticalFlow : public DenseOpticalFlow
{
public:
FarnebackOpticalFlow()
{
numLevels = 5;
pyrScale = 0.5;
fastPyramids = false;
winSize = 13;
numIters = 10;
polyN = 5;
polySigma = 1.1;
flags = 0;
}
virtual Size getWinSize() const = 0;
virtual void setWinSize(Size winSize) = 0;
int numLevels;
double pyrScale;
bool fastPyramids;
int winSize;
int numIters;
int polyN;
double polySigma;
int flags;
virtual int getMaxLevel() const = 0;
virtual void setMaxLevel(int maxLevel) = 0;
/** @brief Computes a dense optical flow using the Gunnar Farnebacks algorithm.
virtual int getNumIters() const = 0;
virtual void setNumIters(int iters) = 0;
@param frame0 First 8-bit gray-scale input image
@param frame1 Second 8-bit gray-scale input image
@param flowx Flow horizontal component
@param flowy Flow vertical component
@param s Stream
virtual bool getUseInitialFlow() const = 0;
virtual void setUseInitialFlow(bool useInitialFlow) = 0;
@sa calcOpticalFlowFarneback
*/
void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null());
/** @brief Releases unused auxiliary memory buffers.
*/
void releaseMemory()
{
frames_[0].release();
frames_[1].release();
pyrLevel_[0].release();
pyrLevel_[1].release();
M_.release();
bufM_.release();
R_[0].release();
R_[1].release();
blurredFrame_[0].release();
blurredFrame_[1].release();
pyramid0_.clear();
pyramid1_.clear();
}
private:
void prepareGaussian(
int n, double sigma, float *g, float *xg, float *xxg,
double &ig11, double &ig03, double &ig33, double &ig55);
void setPolynomialExpansionConsts(int n, double sigma);
void updateFlow_boxFilter(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
void updateFlow_gaussianBlur(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
GpuMat frames_[2];
GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];
std::vector<GpuMat> pyramid0_, pyramid1_;
static Ptr<DensePyrLKOpticalFlow> create(
Size winSize = Size(13, 13),
int maxLevel = 3,
int iters = 30,
bool useInitialFlow = false);
};
// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
//
// see reference:
// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
class CV_EXPORTS OpticalFlowDual_TVL1_CUDA
// FarnebackOpticalFlow
//
/** @brief Class computing a dense optical flow using the Gunnar Farnebacks algorithm.
*/
class CV_EXPORTS FarnebackOpticalFlow : public DenseOpticalFlow
{
public:
OpticalFlowDual_TVL1_CUDA();
virtual int getNumLevels() const = 0;
virtual void setNumLevels(int numLevels) = 0;
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy);
virtual double getPyrScale() const = 0;
virtual void setPyrScale(double pyrScale) = 0;
void collectGarbage();
virtual bool getFastPyramids() const = 0;
virtual void setFastPyramids(bool fastPyramids) = 0;
virtual int getWinSize() const = 0;
virtual void setWinSize(int winSize) = 0;
virtual int getNumIters() const = 0;
virtual void setNumIters(int numIters) = 0;
virtual int getPolyN() const = 0;
virtual void setPolyN(int polyN) = 0;
virtual double getPolySigma() const = 0;
virtual void setPolySigma(double polySigma) = 0;
virtual int getFlags() const = 0;
virtual void setFlags(int flags) = 0;
static Ptr<FarnebackOpticalFlow> create(
int numLevels = 5,
double pyrScale = 0.5,
bool fastPyramids = false,
int winSize = 13,
int numIters = 10,
int polyN = 5,
double polySigma = 1.1,
int flags = 0);
};
//
// OpticalFlowDual_TVL1
//
/** @brief Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method.
*
* @sa C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
* @sa Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
*/
class CV_EXPORTS OpticalFlowDual_TVL1 : public DenseOpticalFlow
{
public:
/**
* Time step of the numerical scheme.
*/
double tau;
virtual double getTau() const = 0;
virtual void setTau(double tau) = 0;
/**
* Weight parameter for the data term, attachment parameter.
@ -271,7 +274,8 @@ public:
* The smaller this parameter is, the smoother the solutions we obtain.
* It depends on the range of motions of the images, so its value should be adapted to each image sequence.
*/
double lambda;
virtual double getLambda() const = 0;
virtual void setLambda(double lambda) = 0;
/**
* Weight parameter for (u - v)^2, tightness parameter.
@ -279,20 +283,23 @@ public:
* In theory, it should have a small value in order to maintain both parts in correspondence.
* The method is stable for a large range of values of this parameter.
*/
virtual double getGamma() const = 0;
virtual void setGamma(double gamma) = 0;
double gamma;
/**
* parameter used for motion estimation. It adds a variable allowing for illumination variations
* Set this parameter to 1. if you have varying illumination.
* See: Chambolle et al, A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging
* Journal of Mathematical imaging and vision, may 2011 Vol 40 issue 1, pp 120-145
*/
double theta;
* parameter used for motion estimation. It adds a variable allowing for illumination variations
* Set this parameter to 1. if you have varying illumination.
* See: Chambolle et al, A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging
* Journal of Mathematical imaging and vision, may 2011 Vol 40 issue 1, pp 120-145
*/
virtual double getTheta() const = 0;
virtual void setTheta(double theta) = 0;
/**
* Number of scales used to create the pyramid of images.
*/
int nscales;
virtual int getNumScales() const = 0;
virtual void setNumScales(int nscales) = 0;
/**
* Number of warpings per scale.
@ -300,94 +307,41 @@ public:
* This is a parameter that assures the stability of the method.
* It also affects the running time, so it is a compromise between speed and accuracy.
*/
int warps;
virtual int getNumWarps() const = 0;
virtual void setNumWarps(int warps) = 0;
/**
* Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
* A small value will yield more accurate solutions at the expense of a slower convergence.
*/
double epsilon;
virtual double getEpsilon() const = 0;
virtual void setEpsilon(double epsilon) = 0;
/**
* Stopping criterion iterations number used in the numerical scheme.
*/
int iterations;
virtual int getNumIterations() const = 0;
virtual void setNumIterations(int iterations) = 0;
double scaleStep;
virtual double getScaleStep() const = 0;
virtual void setScaleStep(double scaleStep) = 0;
bool useInitialFlow;
virtual bool getUseInitialFlow() const = 0;
virtual void setUseInitialFlow(bool useInitialFlow) = 0;
private:
void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3);
std::vector<GpuMat> I0s;
std::vector<GpuMat> I1s;
std::vector<GpuMat> u1s;
std::vector<GpuMat> u2s;
std::vector<GpuMat> u3s;
GpuMat I1x_buf;
GpuMat I1y_buf;
GpuMat I1w_buf;
GpuMat I1wx_buf;
GpuMat I1wy_buf;
GpuMat grad_buf;
GpuMat rho_c_buf;
GpuMat p11_buf;
GpuMat p12_buf;
GpuMat p21_buf;
GpuMat p22_buf;
GpuMat p31_buf;
GpuMat p32_buf;
GpuMat diff_buf;
GpuMat norm_buf;
static Ptr<OpticalFlowDual_TVL1> create(
double tau = 0.25,
double lambda = 0.15,
double theta = 0.3,
int nscales = 5,
int warps = 5,
double epsilon = 0.01,
int iterations = 300,
double scaleStep = 0.8,
double gamma = 0.0,
bool useInitialFlow = false);
};
//! Calculates optical flow for 2 images using block matching algorithm */
CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr,
Size block_size, Size shift_size, Size max_range, bool use_previous,
GpuMat& velx, GpuMat& vely, GpuMat& buf,
Stream& stream = Stream::Null());
class CV_EXPORTS FastOpticalFlowBM
{
public:
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null());
private:
GpuMat buffer;
GpuMat extended_I0;
GpuMat extended_I1;
};
/** @brief Interpolates frames (images) using provided optical flow (displacement field).
@param frame0 First frame (32-bit floating point images, single channel).
@param frame1 Second frame. Must have the same type and size as frame0 .
@param fu Forward horizontal displacement.
@param fv Forward vertical displacement.
@param bu Backward horizontal displacement.
@param bv Backward vertical displacement.
@param pos New frame position.
@param newFrame Output image.
@param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
interpolated backward vertical flow.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
const GpuMat& fu, const GpuMat& fv,
const GpuMat& bu, const GpuMat& bv,
float pos, GpuMat& newFrame, GpuMat& buf,
Stream& stream = Stream::Null());
CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
//! @}
}} // namespace cv { namespace cuda {

View File

@ -46,91 +46,10 @@ using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////
// InterpolateFrames
typedef pair<string, string> pair_string;
DEF_PARAM_TEST_1(ImagePair, pair_string);
PERF_TEST_P(ImagePair, InterpolateFrames,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat d_fu, d_fv;
cv::cuda::GpuMat d_bu, d_bv;
cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
d_flow(d_frame0, d_frame1, d_fu, d_fv);
d_flow(d_frame1, d_frame0, d_bu, d_bv);
cv::cuda::GpuMat newFrame;
cv::cuda::GpuMat d_buf;
TEST_CYCLE() cv::cuda::interpolateFrames(d_frame0, d_frame1, d_fu, d_fv, d_bu, d_bv, 0.5f, newFrame, d_buf);
CUDA_SANITY_CHECK(newFrame, 1e-4);
}
else
{
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////
// CreateOpticalFlowNeedleMap
PERF_TEST_P(ImagePair, CreateOpticalFlowNeedleMap,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
frame0.convertTo(frame0, CV_32FC1, 1.0 / 255.0);
frame1.convertTo(frame1, CV_32FC1, 1.0 / 255.0);
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u;
cv::cuda::GpuMat v;
cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
d_flow(d_frame0, d_frame1, u, v);
cv::cuda::GpuMat vertex, colors;
TEST_CYCLE() cv::cuda::createOpticalFlowNeedleMap(u, v, vertex, colors);
CUDA_SANITY_CHECK(vertex, 1e-6);
CUDA_SANITY_CHECK(colors);
}
else
{
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////
// BroxOpticalFlow
@ -152,13 +71,19 @@ PERF_TEST_P(ImagePair, BroxOpticalFlow,
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u;
cv::cuda::GpuMat v;
cv::cuda::GpuMat flow;
cv::cuda::BroxOpticalFlow d_flow(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
cv::Ptr<cv::cuda::BroxOpticalFlow> d_alg =
cv::cuda::BroxOpticalFlow::create(0.197 /*alpha*/, 50.0 /*gamma*/, 0.8 /*scale_factor*/,
10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
TEST_CYCLE() d_flow(d_frame0, d_frame1, u, v);
TEST_CYCLE() d_alg->calc(d_frame0, d_frame1, flow);
cv::cuda::GpuMat flows[2];
cv::cuda::split(flow, flows);
cv::cuda::GpuMat u = flows[0];
cv::cuda::GpuMat v = flows[1];
CUDA_SANITY_CHECK(u, 1e-1);
CUDA_SANITY_CHECK(v, 1e-1);
@ -210,17 +135,17 @@ PERF_TEST_P(ImagePair_Gray_NPts_WinSz_Levels_Iters, PyrLKOpticalFlowSparse,
{
const cv::cuda::GpuMat d_pts(pts.reshape(2, 1));
cv::cuda::PyrLKOpticalFlow d_pyrLK;
d_pyrLK.winSize = cv::Size(winSize, winSize);
d_pyrLK.maxLevel = levels - 1;
d_pyrLK.iters = iters;
cv::Ptr<cv::cuda::SparsePyrLKOpticalFlow> d_pyrLK =
cv::cuda::SparsePyrLKOpticalFlow::create(cv::Size(winSize, winSize),
levels - 1,
iters);
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat nextPts;
cv::cuda::GpuMat status;
TEST_CYCLE() d_pyrLK.sparse(d_frame0, d_frame1, d_pts, nextPts, status);
TEST_CYCLE() d_pyrLK->calc(d_frame0, d_frame1, d_pts, nextPts, status);
CUDA_SANITY_CHECK(nextPts);
CUDA_SANITY_CHECK(status);
@ -270,15 +195,20 @@ PERF_TEST_P(ImagePair_WinSz_Levels_Iters, PyrLKOpticalFlowDense,
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u;
cv::cuda::GpuMat v;
cv::cuda::GpuMat flow;
cv::cuda::PyrLKOpticalFlow d_pyrLK;
d_pyrLK.winSize = cv::Size(winSize, winSize);
d_pyrLK.maxLevel = levels - 1;
d_pyrLK.iters = iters;
cv::Ptr<cv::cuda::DensePyrLKOpticalFlow> d_pyrLK =
cv::cuda::DensePyrLKOpticalFlow::create(cv::Size(winSize, winSize),
levels - 1,
iters);
TEST_CYCLE() d_pyrLK.dense(d_frame0, d_frame1, u, v);
TEST_CYCLE() d_pyrLK->calc(d_frame0, d_frame1, flow);
cv::cuda::GpuMat flows[2];
cv::cuda::split(flow, flows);
cv::cuda::GpuMat u = flows[0];
cv::cuda::GpuMat v = flows[1];
CUDA_SANITY_CHECK(u);
CUDA_SANITY_CHECK(v);
@ -315,19 +245,19 @@ PERF_TEST_P(ImagePair, FarnebackOpticalFlow,
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u;
cv::cuda::GpuMat v;
cv::cuda::GpuMat flow;
cv::cuda::FarnebackOpticalFlow d_farneback;
d_farneback.numLevels = numLevels;
d_farneback.pyrScale = pyrScale;
d_farneback.winSize = winSize;
d_farneback.numIters = numIters;
d_farneback.polyN = polyN;
d_farneback.polySigma = polySigma;
d_farneback.flags = flags;
cv::Ptr<cv::cuda::FarnebackOpticalFlow> d_farneback =
cv::cuda::FarnebackOpticalFlow::create(numLevels, pyrScale, false, winSize,
numIters, polyN, polySigma, flags);
TEST_CYCLE() d_farneback(d_frame0, d_frame1, u, v);
TEST_CYCLE() d_farneback->calc(d_frame0, d_frame1, flow);
cv::cuda::GpuMat flows[2];
cv::cuda::split(flow, flows);
cv::cuda::GpuMat u = flows[0];
cv::cuda::GpuMat v = flows[1];
CUDA_SANITY_CHECK(u, 1e-4);
CUDA_SANITY_CHECK(v, 1e-4);
@ -360,12 +290,18 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1,
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u;
cv::cuda::GpuMat v;
cv::cuda::GpuMat flow;
cv::cuda::OpticalFlowDual_TVL1_CUDA d_alg;
cv::Ptr<cv::cuda::OpticalFlowDual_TVL1> d_alg =
cv::cuda::OpticalFlowDual_TVL1::create();
TEST_CYCLE() d_alg(d_frame0, d_frame1, u, v);
TEST_CYCLE() d_alg->calc(d_frame0, d_frame1, flow);
cv::cuda::GpuMat flows[2];
cv::cuda::split(flow, flows);
cv::cuda::GpuMat u = flows[0];
cv::cuda::GpuMat v = flows[1];
CUDA_SANITY_CHECK(u, 1e-1);
CUDA_SANITY_CHECK(v, 1e-1);
@ -383,72 +319,3 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1,
CPU_SANITY_CHECK(flow);
}
}
//////////////////////////////////////////////////////
// OpticalFlowBM
PERF_TEST_P(ImagePair, OpticalFlowBM,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
declare.time(400);
const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
const cv::Size block_size(16, 16);
const cv::Size shift_size(1, 1);
const cv::Size max_range(16, 16);
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u, v, buf;
TEST_CYCLE() cv::cuda::calcOpticalFlowBM(d_frame0, d_frame1, block_size, shift_size, max_range, false, u, v, buf);
CUDA_SANITY_CHECK(u);
CUDA_SANITY_CHECK(v);
}
else
{
FAIL_NO_CPU();
}
}
PERF_TEST_P(ImagePair, DISABLED_FastOpticalFlowBM,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
declare.time(400);
const cv::Mat frame0 = readImage(GetParam().first, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
const cv::Mat frame1 = readImage(GetParam().second, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
const cv::Size block_size(16, 16);
const cv::Size shift_size(1, 1);
const cv::Size max_range(16, 16);
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_frame0(frame0);
const cv::cuda::GpuMat d_frame1(frame1);
cv::cuda::GpuMat u, v;
cv::cuda::FastOpticalFlowBM fastBM;
TEST_CYCLE() fastBM(d_frame0, d_frame1, u, v, max_range.width, block_size.width);
CUDA_SANITY_CHECK(u, 2);
CUDA_SANITY_CHECK(v, 2);
}
else
{
FAIL_NO_CPU();
}
}

View File

@ -55,6 +55,7 @@
#include "opencv2/ts/cuda_perf.hpp"
#include "opencv2/cudaoptflow.hpp"
#include "opencv2/cudaarithm.hpp"
#include "opencv2/video.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY

View File

@ -47,84 +47,148 @@ using namespace cv::cuda;
#if !defined (HAVE_CUDA) || !defined (HAVE_OPENCV_CUDALEGACY) || defined (CUDA_DISABLER)
void cv::cuda::BroxOpticalFlow::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
Ptr<BroxOpticalFlow> cv::cuda::BroxOpticalFlow::create(double, double, double, int, int, int) { throw_no_cuda(); return Ptr<BroxOpticalFlow>(); }
#else
namespace
{
size_t getBufSize(const NCVBroxOpticalFlowDescriptor& desc, const NCVMatrix<Ncv32f>& frame0, const NCVMatrix<Ncv32f>& frame1,
NCVMatrix<Ncv32f>& u, NCVMatrix<Ncv32f>& v, const cudaDeviceProp& devProp)
namespace {
class BroxOpticalFlowImpl : public BroxOpticalFlow
{
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
public:
BroxOpticalFlowImpl(double alpha, double gamma, double scale_factor,
int inner_iterations, int outer_iterations, int solver_iterations) :
alpha_(alpha), gamma_(gamma), scale_factor_(scale_factor),
inner_iterations_(inner_iterations), outer_iterations_(outer_iterations),
solver_iterations_(solver_iterations)
{
}
virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream);
virtual double getFlowSmoothness() const { return alpha_; }
virtual void setFlowSmoothness(double alpha) { alpha_ = static_cast<float>(alpha); }
virtual double getGradientConstancyImportance() const { return gamma_; }
virtual void setGradientConstancyImportance(double gamma) { gamma_ = static_cast<float>(gamma); }
virtual double getPyramidScaleFactor() const { return scale_factor_; }
virtual void setPyramidScaleFactor(double scale_factor) { scale_factor_ = static_cast<float>(scale_factor); }
//! number of lagged non-linearity iterations (inner loop)
virtual int getInnerIterations() const { return inner_iterations_; }
virtual void setInnerIterations(int inner_iterations) { inner_iterations_ = inner_iterations; }
//! number of warping iterations (number of pyramid levels)
virtual int getOuterIterations() const { return outer_iterations_; }
virtual void setOuterIterations(int outer_iterations) { outer_iterations_ = outer_iterations; }
//! number of linear system solver iterations
virtual int getSolverIterations() const { return solver_iterations_; }
virtual void setSolverIterations(int solver_iterations) { solver_iterations_ = solver_iterations; }
private:
//! flow smoothness
float alpha_;
//! gradient constancy importance
float gamma_;
//! pyramid scale factor
float scale_factor_;
//! number of lagged non-linearity iterations (inner loop)
int inner_iterations_;
//! number of warping iterations (number of pyramid levels)
int outer_iterations_;
//! number of linear system solver iterations
int solver_iterations_;
};
static size_t getBufSize(const NCVBroxOpticalFlowDescriptor& desc,
const NCVMatrix<Ncv32f>& frame0, const NCVMatrix<Ncv32f>& frame1,
NCVMatrix<Ncv32f>& u, NCVMatrix<Ncv32f>& v,
size_t textureAlignment)
{
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(textureAlignment));
ncvSafeCall( NCVBroxOpticalFlow(desc, gpuCounter, frame0, frame1, u, v, 0) );
return gpuCounter.maxSize();
}
static void outputHandler(const String &msg)
{
CV_Error(cv::Error::GpuApiCallError, msg.c_str());
}
void BroxOpticalFlowImpl::calc(InputArray _I0, InputArray _I1, InputOutputArray _flow, Stream& stream)
{
const GpuMat frame0 = _I0.getGpuMat();
const GpuMat frame1 = _I1.getGpuMat();
CV_Assert( frame0.type() == CV_32FC1 );
CV_Assert( frame1.size() == frame0.size() && frame1.type() == frame0.type() );
ncvSetDebugOutputHandler(outputHandler);
BufferPool pool(stream);
GpuMat u = pool.getBuffer(frame0.size(), CV_32FC1);
GpuMat v = pool.getBuffer(frame0.size(), CV_32FC1);
NCVBroxOpticalFlowDescriptor desc;
desc.alpha = alpha_;
desc.gamma = gamma_;
desc.scale_factor = scale_factor_;
desc.number_of_inner_iterations = inner_iterations_;
desc.number_of_outer_iterations = outer_iterations_;
desc.number_of_solver_iterations = solver_iterations_;
NCVMemSegment frame0MemSeg;
frame0MemSeg.begin.memtype = NCVMemoryTypeDevice;
frame0MemSeg.begin.ptr = const_cast<uchar*>(frame0.data);
frame0MemSeg.size = frame0.step * frame0.rows;
NCVMemSegment frame1MemSeg;
frame1MemSeg.begin.memtype = NCVMemoryTypeDevice;
frame1MemSeg.begin.ptr = const_cast<uchar*>(frame1.data);
frame1MemSeg.size = frame1.step * frame1.rows;
NCVMemSegment uMemSeg;
uMemSeg.begin.memtype = NCVMemoryTypeDevice;
uMemSeg.begin.ptr = u.ptr();
uMemSeg.size = u.step * u.rows;
NCVMemSegment vMemSeg;
vMemSeg.begin.memtype = NCVMemoryTypeDevice;
vMemSeg.begin.ptr = v.ptr();
vMemSeg.size = v.step * v.rows;
DeviceInfo devInfo;
size_t textureAlignment = devInfo.textureAlignment();
NCVMatrixReuse<Ncv32f> frame0Mat(frame0MemSeg, static_cast<Ncv32u>(textureAlignment), frame0.cols, frame0.rows, static_cast<Ncv32u>(frame0.step));
NCVMatrixReuse<Ncv32f> frame1Mat(frame1MemSeg, static_cast<Ncv32u>(textureAlignment), frame1.cols, frame1.rows, static_cast<Ncv32u>(frame1.step));
NCVMatrixReuse<Ncv32f> uMat(uMemSeg, static_cast<Ncv32u>(textureAlignment), u.cols, u.rows, static_cast<Ncv32u>(u.step));
NCVMatrixReuse<Ncv32f> vMat(vMemSeg, static_cast<Ncv32u>(textureAlignment), v.cols, v.rows, static_cast<Ncv32u>(v.step));
size_t bufSize = getBufSize(desc, frame0Mat, frame1Mat, uMat, vMat, textureAlignment);
GpuMat buf = pool.getBuffer(1, static_cast<int>(bufSize), CV_8UC1);
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(textureAlignment), buf.ptr());
ncvSafeCall( NCVBroxOpticalFlow(desc, gpuAllocator, frame0Mat, frame1Mat, uMat, vMat, StreamAccessor::getStream(stream)) );
GpuMat flows[] = {u, v};
cuda::merge(flows, 2, _flow, stream);
}
}
namespace
Ptr<BroxOpticalFlow> cv::cuda::BroxOpticalFlow::create(double alpha, double gamma, double scale_factor, int inner_iterations, int outer_iterations, int solver_iterations)
{
static void outputHandler(const String &msg) { CV_Error(cv::Error::GpuApiCallError, msg.c_str()); }
}
void cv::cuda::BroxOpticalFlow::operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& s)
{
ncvSetDebugOutputHandler(outputHandler);
CV_Assert(frame0.type() == CV_32FC1);
CV_Assert(frame1.size() == frame0.size() && frame1.type() == frame0.type());
u.create(frame0.size(), CV_32FC1);
v.create(frame0.size(), CV_32FC1);
cudaDeviceProp devProp;
cudaSafeCall( cudaGetDeviceProperties(&devProp, getDevice()) );
NCVBroxOpticalFlowDescriptor desc;
desc.alpha = alpha;
desc.gamma = gamma;
desc.scale_factor = scale_factor;
desc.number_of_inner_iterations = inner_iterations;
desc.number_of_outer_iterations = outer_iterations;
desc.number_of_solver_iterations = solver_iterations;
NCVMemSegment frame0MemSeg;
frame0MemSeg.begin.memtype = NCVMemoryTypeDevice;
frame0MemSeg.begin.ptr = const_cast<uchar*>(frame0.data);
frame0MemSeg.size = frame0.step * frame0.rows;
NCVMemSegment frame1MemSeg;
frame1MemSeg.begin.memtype = NCVMemoryTypeDevice;
frame1MemSeg.begin.ptr = const_cast<uchar*>(frame1.data);
frame1MemSeg.size = frame1.step * frame1.rows;
NCVMemSegment uMemSeg;
uMemSeg.begin.memtype = NCVMemoryTypeDevice;
uMemSeg.begin.ptr = u.ptr();
uMemSeg.size = u.step * u.rows;
NCVMemSegment vMemSeg;
vMemSeg.begin.memtype = NCVMemoryTypeDevice;
vMemSeg.begin.ptr = v.ptr();
vMemSeg.size = v.step * v.rows;
NCVMatrixReuse<Ncv32f> frame0Mat(frame0MemSeg, static_cast<Ncv32u>(devProp.textureAlignment), frame0.cols, frame0.rows, static_cast<Ncv32u>(frame0.step));
NCVMatrixReuse<Ncv32f> frame1Mat(frame1MemSeg, static_cast<Ncv32u>(devProp.textureAlignment), frame1.cols, frame1.rows, static_cast<Ncv32u>(frame1.step));
NCVMatrixReuse<Ncv32f> uMat(uMemSeg, static_cast<Ncv32u>(devProp.textureAlignment), u.cols, u.rows, static_cast<Ncv32u>(u.step));
NCVMatrixReuse<Ncv32f> vMat(vMemSeg, static_cast<Ncv32u>(devProp.textureAlignment), v.cols, v.rows, static_cast<Ncv32u>(v.step));
cudaStream_t stream = StreamAccessor::getStream(s);
size_t bufSize = getBufSize(desc, frame0Mat, frame1Mat, uMat, vMat, devProp);
ensureSizeIsEnough(1, static_cast<int>(bufSize), CV_8UC1, buf);
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), buf.ptr());
ncvSafeCall( NCVBroxOpticalFlow(desc, gpuAllocator, frame0Mat, frame1Mat, uMat, vMat, stream) );
return makePtr<BroxOpticalFlowImpl>(alpha, gamma, scale_factor, inner_iterations, outer_iterations, solver_iterations);
}
#endif /* HAVE_CUDA */

View File

@ -472,16 +472,16 @@ namespace pyrlk
}
}
void loadConstants(int2 winSize, int iters)
void loadConstants(int2 winSize, int iters, cudaStream_t stream)
{
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_x, &winSize.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) );
cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_y, &winSize.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) );
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_x, &halfWin.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) );
cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_y, &halfWin.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) );
cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbolAsync(c_iters, &iters, sizeof(int), 0, cudaMemcpyHostToDevice, stream) );
}
void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,

View File

@ -66,15 +66,16 @@ namespace tvl1flow
dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x));
}
void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy)
void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
centeredGradientKernel<<<grid, block>>>(src, dx, dy);
centeredGradientKernel<<<grid, block, 0, stream>>>(src, dx, dy);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
if (!stream)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
@ -164,7 +165,10 @@ namespace tvl1flow
rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val;
}
void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho)
void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y,
PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx,
PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho,
cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y));
@ -173,10 +177,11 @@ namespace tvl1flow
bindTexture(&tex_I1x, I1x);
bindTexture(&tex_I1y, I1y);
warpBackwardKernel<<<grid, block>>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho);
warpBackwardKernel<<<grid, block, 0, stream>>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
if (!stream)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
@ -292,15 +297,17 @@ namespace tvl1flow
PtrStepSzf grad, PtrStepSzf rho_c,
PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32,
PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error,
float l_t, float theta, float gamma, bool calcError)
float l_t, float theta, float gamma, bool calcError,
cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y));
estimateUKernel<<<grid, block>>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError);
estimateUKernel<<<grid, block, 0, stream>>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
if (!stream)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
@ -346,15 +353,19 @@ namespace tvl1flow
}
}
void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, float gamma)
void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3,
PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32,
float taut, float gamma,
cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y));
estimateDualVariablesKernel<<<grid, block>>>(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma);
estimateDualVariablesKernel<<<grid, block, 0, stream>>>(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
if (!stream)
cudaSafeCall( cudaDeviceSynchronize() );
}
}

View File

@ -42,23 +42,21 @@
#include "precomp.hpp"
#define MIN_SIZE 32
#define S(x) StreamAccessor::getStream(x)
// CUDA resize() is fast, but it differs from the CPU analog. Disabling this flag
// leads to an inefficient code. It's for debug purposes only.
#define ENABLE_CUDA_RESIZE 1
using namespace cv;
using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
void cv::cuda::FarnebackOpticalFlow::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
Ptr<FarnebackOpticalFlow> cv::cuda::FarnebackOpticalFlow::create(int, double, bool, int, int, int, double, int) { throw_no_cuda(); return Ptr<BroxOpticalFlow>(); }
#else
#define MIN_SIZE 32
// CUDA resize() is fast, but it differs from the CPU analog. Disabling this flag
// leads to an inefficient code. It's for debug purposes only.
#define ENABLE_CUDA_RESIZE 1
namespace cv { namespace cuda { namespace device { namespace optflow_farneback
{
void setPolynomialExpansionConsts(
@ -76,8 +74,6 @@ namespace cv { namespace cuda { namespace device { namespace optflow_farneback
void updateFlowGpu(
const PtrStepSzf M, PtrStepSzf flowx, PtrStepSzf flowy, cudaStream_t stream);
/*void boxFilterGpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream);*/
void boxFilter5Gpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream);
void boxFilter5Gpu_CC11(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream);
@ -93,10 +89,93 @@ namespace cv { namespace cuda { namespace device { namespace optflow_farneback
void gaussianBlur5Gpu_CC11(
const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderType, cudaStream_t stream);
}}}} // namespace cv { namespace cuda { namespace cudev { namespace optflow_farneback
}}}}
namespace
{
class FarnebackOpticalFlowImpl : public FarnebackOpticalFlow
{
public:
FarnebackOpticalFlowImpl(int numLevels, double pyrScale, bool fastPyramids, int winSize,
int numIters, int polyN, double polySigma, int flags) :
numLevels_(numLevels), pyrScale_(pyrScale), fastPyramids_(fastPyramids), winSize_(winSize),
numIters_(numIters), polyN_(polyN), polySigma_(polySigma), flags_(flags)
{
}
virtual int getNumLevels() const { return numLevels_; }
virtual void setNumLevels(int numLevels) { numLevels_ = numLevels; }
virtual double getPyrScale() const { return pyrScale_; }
virtual void setPyrScale(double pyrScale) { pyrScale_ = pyrScale; }
virtual bool getFastPyramids() const { return fastPyramids_; }
virtual void setFastPyramids(bool fastPyramids) { fastPyramids_ = fastPyramids; }
virtual int getWinSize() const { return winSize_; }
virtual void setWinSize(int winSize) { winSize_ = winSize; }
virtual int getNumIters() const { return numIters_; }
virtual void setNumIters(int numIters) { numIters_ = numIters; }
virtual int getPolyN() const { return polyN_; }
virtual void setPolyN(int polyN) { polyN_ = polyN; }
virtual double getPolySigma() const { return polySigma_; }
virtual void setPolySigma(double polySigma) { polySigma_ = polySigma; }
virtual int getFlags() const { return flags_; }
virtual void setFlags(int flags) { flags_ = flags; }
virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream);
private:
int numLevels_;
double pyrScale_;
bool fastPyramids_;
int winSize_;
int numIters_;
int polyN_;
double polySigma_;
int flags_;
private:
void prepareGaussian(
int n, double sigma, float *g, float *xg, float *xxg,
double &ig11, double &ig03, double &ig33, double &ig55);
void setPolynomialExpansionConsts(int n, double sigma);
void updateFlow_boxFilter(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
void updateFlow_gaussianBlur(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
void calcImpl(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &stream);
GpuMat frames_[2];
GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];
std::vector<GpuMat> pyramid0_, pyramid1_;
};
void FarnebackOpticalFlowImpl::calc(InputArray _frame0, InputArray _frame1, InputOutputArray _flow, Stream& stream)
{
const GpuMat frame0 = _frame0.getGpuMat();
const GpuMat frame1 = _frame1.getGpuMat();
BufferPool pool(stream);
GpuMat flowx = pool.getBuffer(frame0.size(), CV_32FC1);
GpuMat flowy = pool.getBuffer(frame0.size(), CV_32FC1);
calcImpl(frame0, frame1, flowx, flowy, stream);
GpuMat flows[] = {flowx, flowy};
cuda::merge(flows, 2, _flow, stream);
}
GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat& mat)
{
if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols)
@ -104,285 +183,287 @@ namespace
return mat = GpuMat(rows, cols, type);
}
}
void cv::cuda::FarnebackOpticalFlow::prepareGaussian(
int n, double sigma, float *g, float *xg, float *xxg,
double &ig11, double &ig03, double &ig33, double &ig55)
{
double s = 0.;
for (int x = -n; x <= n; x++)
{
g[x] = (float)std::exp(-x*x/(2*sigma*sigma));
s += g[x];
}
s = 1./s;
for (int x = -n; x <= n; x++)
{
g[x] = (float)(g[x]*s);
xg[x] = (float)(x*g[x]);
xxg[x] = (float)(x*x*g[x]);
}
Mat_<double> G(6, 6);
G.setTo(0);
for (int y = -n; y <= n; y++)
void FarnebackOpticalFlowImpl::prepareGaussian(
int n, double sigma, float *g, float *xg, float *xxg,
double &ig11, double &ig03, double &ig33, double &ig55)
{
double s = 0.;
for (int x = -n; x <= n; x++)
{
G(0,0) += g[y]*g[x];
G(1,1) += g[y]*g[x]*x*x;
G(3,3) += g[y]*g[x]*x*x*x*x;
G(5,5) += g[y]*g[x]*x*x*y*y;
}
}
//G[0][0] = 1.;
G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1);
G(4,4) = G(3,3);
G(3,4) = G(4,3) = G(5,5);
// invG:
// [ x e e ]
// [ y ]
// [ y ]
// [ e z ]
// [ e z ]
// [ u ]
Mat_<double> invG = G.inv(DECOMP_CHOLESKY);
ig11 = invG(1,1);
ig03 = invG(0,3);
ig33 = invG(3,3);
ig55 = invG(5,5);
}
void cv::cuda::FarnebackOpticalFlow::setPolynomialExpansionConsts(int n, double sigma)
{
std::vector<float> buf(n*6 + 3);
float* g = &buf[0] + n;
float* xg = g + n*2 + 1;
float* xxg = xg + n*2 + 1;
if (sigma < FLT_EPSILON)
sigma = n*0.3;
double ig11, ig03, ig33, ig55;
prepareGaussian(n, sigma, g, xg, xxg, ig11, ig03, ig33, ig55);
device::optflow_farneback::setPolynomialExpansionConsts(n, g, xg, xxg, static_cast<float>(ig11), static_cast<float>(ig03), static_cast<float>(ig33), static_cast<float>(ig55));
}
void cv::cuda::FarnebackOpticalFlow::updateFlow_boxFilter(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[])
{
if (deviceSupports(FEATURE_SET_COMPUTE_12))
device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, S(streams[0]));
else
device::optflow_farneback::boxFilter5Gpu_CC11(M, blockSize/2, bufM, S(streams[0]));
swap(M, bufM);
for (int i = 1; i < 5; ++i)
streams[i].waitForCompletion();
device::optflow_farneback::updateFlowGpu(M, flowx, flowy, S(streams[0]));
if (updateMatrices)
device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, S(streams[0]));
}
void cv::cuda::FarnebackOpticalFlow::updateFlow_gaussianBlur(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[])
{
if (deviceSupports(FEATURE_SET_COMPUTE_12))
device::optflow_farneback::gaussianBlur5Gpu(
M, blockSize/2, bufM, BORDER_REPLICATE, S(streams[0]));
else
device::optflow_farneback::gaussianBlur5Gpu_CC11(
M, blockSize/2, bufM, BORDER_REPLICATE, S(streams[0]));
swap(M, bufM);
device::optflow_farneback::updateFlowGpu(M, flowx, flowy, S(streams[0]));
if (updateMatrices)
device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, S(streams[0]));
}
void cv::cuda::FarnebackOpticalFlow::operator ()(
const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s)
{
CV_Assert(frame0.channels() == 1 && frame1.channels() == 1);
CV_Assert(frame0.size() == frame1.size());
CV_Assert(polyN == 5 || polyN == 7);
CV_Assert(!fastPyramids || std::abs(pyrScale - 0.5) < 1e-6);
Stream streams[5];
if (S(s))
streams[0] = s;
Size size = frame0.size();
GpuMat prevFlowX, prevFlowY, curFlowX, curFlowY;
flowx.create(size, CV_32F);
flowy.create(size, CV_32F);
GpuMat flowx0 = flowx;
GpuMat flowy0 = flowy;
// Crop unnecessary levels
double scale = 1;
int numLevelsCropped = 0;
for (; numLevelsCropped < numLevels; numLevelsCropped++)
{
scale *= pyrScale;
if (size.width*scale < MIN_SIZE || size.height*scale < MIN_SIZE)
break;
}
frame0.convertTo(frames_[0], CV_32F, streams[0]);
frame1.convertTo(frames_[1], CV_32F, streams[1]);
if (fastPyramids)
{
// Build Gaussian pyramids using pyrDown()
pyramid0_.resize(numLevelsCropped + 1);
pyramid1_.resize(numLevelsCropped + 1);
pyramid0_[0] = frames_[0];
pyramid1_[0] = frames_[1];
for (int i = 1; i <= numLevelsCropped; ++i)
{
cuda::pyrDown(pyramid0_[i - 1], pyramid0_[i], streams[0]);
cuda::pyrDown(pyramid1_[i - 1], pyramid1_[i], streams[1]);
}
}
setPolynomialExpansionConsts(polyN, polySigma);
device::optflow_farneback::setUpdateMatricesConsts();
for (int k = numLevelsCropped; k >= 0; k--)
{
streams[0].waitForCompletion();
scale = 1;
for (int i = 0; i < k; i++)
scale *= pyrScale;
double sigma = (1./scale - 1) * 0.5;
int smoothSize = cvRound(sigma*5) | 1;
smoothSize = std::max(smoothSize, 3);
int width = cvRound(size.width*scale);
int height = cvRound(size.height*scale);
if (fastPyramids)
{
width = pyramid0_[k].cols;
height = pyramid0_[k].rows;
g[x] = (float)std::exp(-x*x/(2*sigma*sigma));
s += g[x];
}
if (k > 0)
s = 1./s;
for (int x = -n; x <= n; x++)
{
curFlowX.create(height, width, CV_32F);
curFlowY.create(height, width, CV_32F);
}
else
{
curFlowX = flowx0;
curFlowY = flowy0;
g[x] = (float)(g[x]*s);
xg[x] = (float)(x*g[x]);
xxg[x] = (float)(x*x*g[x]);
}
if (!prevFlowX.data)
Mat_<double> G(6, 6);
G.setTo(0);
for (int y = -n; y <= n; y++)
{
if (flags & OPTFLOW_USE_INITIAL_FLOW)
for (int x = -n; x <= n; x++)
{
cuda::resize(flowx0, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]);
cuda::resize(flowy0, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]);
curFlowX.convertTo(curFlowX, curFlowX.depth(), scale, streams[0]);
curFlowY.convertTo(curFlowY, curFlowY.depth(), scale, streams[1]);
G(0,0) += g[y]*g[x];
G(1,1) += g[y]*g[x]*x*x;
G(3,3) += g[y]*g[x]*x*x*x*x;
G(5,5) += g[y]*g[x]*x*x*y*y;
}
}
//G[0][0] = 1.;
G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1);
G(4,4) = G(3,3);
G(3,4) = G(4,3) = G(5,5);
// invG:
// [ x e e ]
// [ y ]
// [ y ]
// [ e z ]
// [ e z ]
// [ u ]
Mat_<double> invG = G.inv(DECOMP_CHOLESKY);
ig11 = invG(1,1);
ig03 = invG(0,3);
ig33 = invG(3,3);
ig55 = invG(5,5);
}
void FarnebackOpticalFlowImpl::setPolynomialExpansionConsts(int n, double sigma)
{
std::vector<float> buf(n*6 + 3);
float* g = &buf[0] + n;
float* xg = g + n*2 + 1;
float* xxg = xg + n*2 + 1;
if (sigma < FLT_EPSILON)
sigma = n*0.3;
double ig11, ig03, ig33, ig55;
prepareGaussian(n, sigma, g, xg, xxg, ig11, ig03, ig33, ig55);
device::optflow_farneback::setPolynomialExpansionConsts(n, g, xg, xxg, static_cast<float>(ig11), static_cast<float>(ig03), static_cast<float>(ig33), static_cast<float>(ig55));
}
void FarnebackOpticalFlowImpl::updateFlow_boxFilter(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[])
{
if (deviceSupports(FEATURE_SET_COMPUTE_12))
device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, StreamAccessor::getStream(streams[0]));
else
device::optflow_farneback::boxFilter5Gpu_CC11(M, blockSize/2, bufM, StreamAccessor::getStream(streams[0]));
swap(M, bufM);
for (int i = 1; i < 5; ++i)
streams[i].waitForCompletion();
device::optflow_farneback::updateFlowGpu(M, flowx, flowy, StreamAccessor::getStream(streams[0]));
if (updateMatrices)
device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, StreamAccessor::getStream(streams[0]));
}
void FarnebackOpticalFlowImpl::updateFlow_gaussianBlur(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[])
{
if (deviceSupports(FEATURE_SET_COMPUTE_12))
device::optflow_farneback::gaussianBlur5Gpu(
M, blockSize/2, bufM, BORDER_REPLICATE, StreamAccessor::getStream(streams[0]));
else
device::optflow_farneback::gaussianBlur5Gpu_CC11(
M, blockSize/2, bufM, BORDER_REPLICATE, StreamAccessor::getStream(streams[0]));
swap(M, bufM);
device::optflow_farneback::updateFlowGpu(M, flowx, flowy, StreamAccessor::getStream(streams[0]));
if (updateMatrices)
device::optflow_farneback::updateMatricesGpu(flowx, flowy, R0, R1, M, StreamAccessor::getStream(streams[0]));
}
void FarnebackOpticalFlowImpl::calcImpl(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &stream)
{
CV_Assert(frame0.channels() == 1 && frame1.channels() == 1);
CV_Assert(frame0.size() == frame1.size());
CV_Assert(polyN_ == 5 || polyN_ == 7);
CV_Assert(!fastPyramids_ || std::abs(pyrScale_ - 0.5) < 1e-6);
Stream streams[5];
if (stream)
streams[0] = stream;
Size size = frame0.size();
GpuMat prevFlowX, prevFlowY, curFlowX, curFlowY;
flowx.create(size, CV_32F);
flowy.create(size, CV_32F);
GpuMat flowx0 = flowx;
GpuMat flowy0 = flowy;
// Crop unnecessary levels
double scale = 1;
int numLevelsCropped = 0;
for (; numLevelsCropped < numLevels_; numLevelsCropped++)
{
scale *= pyrScale_;
if (size.width*scale < MIN_SIZE || size.height*scale < MIN_SIZE)
break;
}
frame0.convertTo(frames_[0], CV_32F, streams[0]);
frame1.convertTo(frames_[1], CV_32F, streams[1]);
if (fastPyramids_)
{
// Build Gaussian pyramids using pyrDown()
pyramid0_.resize(numLevelsCropped + 1);
pyramid1_.resize(numLevelsCropped + 1);
pyramid0_[0] = frames_[0];
pyramid1_[0] = frames_[1];
for (int i = 1; i <= numLevelsCropped; ++i)
{
cuda::pyrDown(pyramid0_[i - 1], pyramid0_[i], streams[0]);
cuda::pyrDown(pyramid1_[i - 1], pyramid1_[i], streams[1]);
}
}
setPolynomialExpansionConsts(polyN_, polySigma_);
device::optflow_farneback::setUpdateMatricesConsts();
for (int k = numLevelsCropped; k >= 0; k--)
{
streams[0].waitForCompletion();
scale = 1;
for (int i = 0; i < k; i++)
scale *= pyrScale_;
double sigma = (1./scale - 1) * 0.5;
int smoothSize = cvRound(sigma*5) | 1;
smoothSize = std::max(smoothSize, 3);
int width = cvRound(size.width*scale);
int height = cvRound(size.height*scale);
if (fastPyramids_)
{
width = pyramid0_[k].cols;
height = pyramid0_[k].rows;
}
if (k > 0)
{
curFlowX.create(height, width, CV_32F);
curFlowY.create(height, width, CV_32F);
}
else
{
curFlowX.setTo(0, streams[0]);
curFlowY.setTo(0, streams[1]);
curFlowX = flowx0;
curFlowY = flowy0;
}
}
else
{
cuda::resize(prevFlowX, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]);
cuda::resize(prevFlowY, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]);
curFlowX.convertTo(curFlowX, curFlowX.depth(), 1./pyrScale, streams[0]);
curFlowY.convertTo(curFlowY, curFlowY.depth(), 1./pyrScale, streams[1]);
}
GpuMat M = allocMatFromBuf(5*height, width, CV_32F, M_);
GpuMat bufM = allocMatFromBuf(5*height, width, CV_32F, bufM_);
GpuMat R[2] =
{
allocMatFromBuf(5*height, width, CV_32F, R_[0]),
allocMatFromBuf(5*height, width, CV_32F, R_[1])
};
if (fastPyramids)
{
device::optflow_farneback::polynomialExpansionGpu(pyramid0_[k], polyN, R[0], S(streams[0]));
device::optflow_farneback::polynomialExpansionGpu(pyramid1_[k], polyN, R[1], S(streams[1]));
}
else
{
GpuMat blurredFrame[2] =
if (!prevFlowX.data)
{
allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[0]),
allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[1])
};
GpuMat pyrLevel[2] =
{
allocMatFromBuf(height, width, CV_32F, pyrLevel_[0]),
allocMatFromBuf(height, width, CV_32F, pyrLevel_[1])
};
Mat g = getGaussianKernel(smoothSize, sigma, CV_32F);
device::optflow_farneback::setGaussianBlurKernel(g.ptr<float>(smoothSize/2), smoothSize/2);
for (int i = 0; i < 2; i++)
{
device::optflow_farneback::gaussianBlurGpu(
frames_[i], smoothSize/2, blurredFrame[i], BORDER_REFLECT101, S(streams[i]));
cuda::resize(blurredFrame[i], pyrLevel[i], Size(width, height), 0.0, 0.0, INTER_LINEAR, streams[i]);
device::optflow_farneback::polynomialExpansionGpu(pyrLevel[i], polyN, R[i], S(streams[i]));
if (flags_ & OPTFLOW_USE_INITIAL_FLOW)
{
cuda::resize(flowx0, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]);
cuda::resize(flowy0, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]);
curFlowX.convertTo(curFlowX, curFlowX.depth(), scale, streams[0]);
curFlowY.convertTo(curFlowY, curFlowY.depth(), scale, streams[1]);
}
else
{
curFlowX.setTo(0, streams[0]);
curFlowY.setTo(0, streams[1]);
}
}
}
streams[1].waitForCompletion();
device::optflow_farneback::updateMatricesGpu(curFlowX, curFlowY, R[0], R[1], M, S(streams[0]));
if (flags & OPTFLOW_FARNEBACK_GAUSSIAN)
{
Mat g = getGaussianKernel(winSize, winSize/2*0.3f, CV_32F);
device::optflow_farneback::setGaussianBlurKernel(g.ptr<float>(winSize/2), winSize/2);
}
for (int i = 0; i < numIters; i++)
{
if (flags & OPTFLOW_FARNEBACK_GAUSSIAN)
updateFlow_gaussianBlur(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1, streams);
else
updateFlow_boxFilter(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize, i < numIters-1, streams);
{
cuda::resize(prevFlowX, curFlowX, Size(width, height), 0, 0, INTER_LINEAR, streams[0]);
cuda::resize(prevFlowY, curFlowY, Size(width, height), 0, 0, INTER_LINEAR, streams[1]);
curFlowX.convertTo(curFlowX, curFlowX.depth(), 1./pyrScale_, streams[0]);
curFlowY.convertTo(curFlowY, curFlowY.depth(), 1./pyrScale_, streams[1]);
}
GpuMat M = allocMatFromBuf(5*height, width, CV_32F, M_);
GpuMat bufM = allocMatFromBuf(5*height, width, CV_32F, bufM_);
GpuMat R[2] =
{
allocMatFromBuf(5*height, width, CV_32F, R_[0]),
allocMatFromBuf(5*height, width, CV_32F, R_[1])
};
if (fastPyramids_)
{
device::optflow_farneback::polynomialExpansionGpu(pyramid0_[k], polyN_, R[0], StreamAccessor::getStream(streams[0]));
device::optflow_farneback::polynomialExpansionGpu(pyramid1_[k], polyN_, R[1], StreamAccessor::getStream(streams[1]));
}
else
{
GpuMat blurredFrame[2] =
{
allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[0]),
allocMatFromBuf(size.height, size.width, CV_32F, blurredFrame_[1])
};
GpuMat pyrLevel[2] =
{
allocMatFromBuf(height, width, CV_32F, pyrLevel_[0]),
allocMatFromBuf(height, width, CV_32F, pyrLevel_[1])
};
Mat g = getGaussianKernel(smoothSize, sigma, CV_32F);
device::optflow_farneback::setGaussianBlurKernel(g.ptr<float>(smoothSize/2), smoothSize/2);
for (int i = 0; i < 2; i++)
{
device::optflow_farneback::gaussianBlurGpu(
frames_[i], smoothSize/2, blurredFrame[i], BORDER_REFLECT101, StreamAccessor::getStream(streams[i]));
cuda::resize(blurredFrame[i], pyrLevel[i], Size(width, height), 0.0, 0.0, INTER_LINEAR, streams[i]);
device::optflow_farneback::polynomialExpansionGpu(pyrLevel[i], polyN_, R[i], StreamAccessor::getStream(streams[i]));
}
}
streams[1].waitForCompletion();
device::optflow_farneback::updateMatricesGpu(curFlowX, curFlowY, R[0], R[1], M, StreamAccessor::getStream(streams[0]));
if (flags_ & OPTFLOW_FARNEBACK_GAUSSIAN)
{
Mat g = getGaussianKernel(winSize_, winSize_/2*0.3f, CV_32F);
device::optflow_farneback::setGaussianBlurKernel(g.ptr<float>(winSize_/2), winSize_/2);
}
for (int i = 0; i < numIters_; i++)
{
if (flags_ & OPTFLOW_FARNEBACK_GAUSSIAN)
updateFlow_gaussianBlur(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize_, i < numIters_-1, streams);
else
updateFlow_boxFilter(R[0], R[1], curFlowX, curFlowY, M, bufM, winSize_, i < numIters_-1, streams);
}
prevFlowX = curFlowX;
prevFlowY = curFlowY;
}
prevFlowX = curFlowX;
prevFlowY = curFlowY;
flowx = curFlowX;
flowy = curFlowY;
if (!stream)
streams[0].waitForCompletion();
}
}
flowx = curFlowX;
flowy = curFlowY;
if (!S(s))
streams[0].waitForCompletion();
Ptr<FarnebackOpticalFlow> cv::cuda::FarnebackOpticalFlow::create(int numLevels, double pyrScale, bool fastPyramids, int winSize,
int numIters, int polyN, double polySigma, int flags)
{
return makePtr<FarnebackOpticalFlowImpl>(numLevels, pyrScale, fastPyramids, winSize,
numIters, polyN, polySigma, flags);
}
#endif

View File

@ -47,37 +47,54 @@ using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
cv::cuda::PyrLKOpticalFlow::PyrLKOpticalFlow() { throw_no_cuda(); }
void cv::cuda::PyrLKOpticalFlow::sparse(const GpuMat&, const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_no_cuda(); }
void cv::cuda::PyrLKOpticalFlow::dense(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_no_cuda(); }
void cv::cuda::PyrLKOpticalFlow::releaseMemory() {}
Ptr<SparsePyrLKOpticalFlow> cv::cuda::SparsePyrLKOpticalFlow::create(Size, int, int, bool) { throw_no_cuda(); return Ptr<SparsePyrLKOpticalFlow>(); }
Ptr<DensePyrLKOpticalFlow> cv::cuda::DensePyrLKOpticalFlow::create(Size, int, int, bool) { throw_no_cuda(); return Ptr<SparsePyrLKOpticalFlow>(); }
#else /* !defined (HAVE_CUDA) */
namespace pyrlk
{
void loadConstants(int2 winSize, int iters);
void loadConstants(int2 winSize, int iters, cudaStream_t stream);
void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream = 0);
int level, dim3 block, dim3 patch, cudaStream_t stream);
void sparse4(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream = 0);
int level, dim3 block, dim3 patch, cudaStream_t stream);
void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV,
PtrStepSzf err, int2 winSize, cudaStream_t stream = 0);
}
cv::cuda::PyrLKOpticalFlow::PyrLKOpticalFlow()
{
winSize = Size(21, 21);
maxLevel = 3;
iters = 30;
useInitialFlow = false;
PtrStepSzf err, int2 winSize, cudaStream_t stream);
}
namespace
{
void calcPatchSize(cv::Size winSize, dim3& block, dim3& patch)
class PyrLKOpticalFlowBase
{
public:
PyrLKOpticalFlowBase(Size winSize, int maxLevel, int iters, bool useInitialFlow);
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err, Stream& stream);
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, Stream& stream);
protected:
Size winSize_;
int maxLevel_;
int iters_;
bool useInitialFlow_;
private:
std::vector<GpuMat> prevPyr_;
std::vector<GpuMat> nextPyr_;
};
PyrLKOpticalFlowBase::PyrLKOpticalFlowBase(Size winSize, int maxLevel, int iters, bool useInitialFlow) :
winSize_(winSize), maxLevel_(maxLevel), iters_(iters), useInitialFlow_(useInitialFlow)
{
}
void calcPatchSize(Size winSize, dim3& block, dim3& patch)
{
if (winSize.width > 32 && winSize.width > 2 * winSize.height)
{
@ -95,156 +112,239 @@ namespace
block.z = patch.z = 1;
}
}
void cv::cuda::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err)
{
if (prevPts.empty())
void PyrLKOpticalFlowBase::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err, Stream& stream)
{
nextPts.release();
status.release();
if (err) err->release();
return;
}
dim3 block, patch;
calcPatchSize(winSize, block, patch);
CV_Assert(prevImg.channels() == 1 || prevImg.channels() == 3 || prevImg.channels() == 4);
CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type());
CV_Assert(maxLevel >= 0);
CV_Assert(winSize.width > 2 && winSize.height > 2);
CV_Assert(patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6);
CV_Assert(prevPts.rows == 1 && prevPts.type() == CV_32FC2);
if (useInitialFlow)
CV_Assert(nextPts.size() == prevPts.size() && nextPts.type() == CV_32FC2);
else
ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts);
GpuMat temp1 = (useInitialFlow ? nextPts : prevPts).reshape(1);
GpuMat temp2 = nextPts.reshape(1);
cuda::multiply(temp1, Scalar::all(1.0 / (1 << maxLevel) / 2.0), temp2);
ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status);
status.setTo(Scalar::all(1));
if (err)
ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err);
// build the image pyramids.
prevPyr_.resize(maxLevel + 1);
nextPyr_.resize(maxLevel + 1);
int cn = prevImg.channels();
if (cn == 1 || cn == 4)
{
prevImg.convertTo(prevPyr_[0], CV_32F);
nextImg.convertTo(nextPyr_[0], CV_32F);
}
else
{
cuda::cvtColor(prevImg, buf_, COLOR_BGR2BGRA);
buf_.convertTo(prevPyr_[0], CV_32F);
cuda::cvtColor(nextImg, buf_, COLOR_BGR2BGRA);
buf_.convertTo(nextPyr_[0], CV_32F);
}
for (int level = 1; level <= maxLevel; ++level)
{
cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level]);
cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level]);
}
pyrlk::loadConstants(make_int2(winSize.width, winSize.height), iters);
for (int level = maxLevel; level >= 0; level--)
{
if (cn == 1)
if (prevPts.empty())
{
pyrlk::sparse1(prevPyr_[level], nextPyr_[level],
prevPts.ptr<float2>(), nextPts.ptr<float2>(), status.ptr(), level == 0 && err ? err->ptr<float>() : 0, prevPts.cols,
level, block, patch);
nextPts.release();
status.release();
if (err) err->release();
return;
}
dim3 block, patch;
calcPatchSize(winSize_, block, patch);
CV_Assert( prevImg.channels() == 1 || prevImg.channels() == 3 || prevImg.channels() == 4 );
CV_Assert( prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type() );
CV_Assert( maxLevel_ >= 0 );
CV_Assert( winSize_.width > 2 && winSize_.height > 2 );
CV_Assert( patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6 );
CV_Assert( prevPts.rows == 1 && prevPts.type() == CV_32FC2 );
if (useInitialFlow_)
CV_Assert( nextPts.size() == prevPts.size() && nextPts.type() == prevPts.type() );
else
ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts);
GpuMat temp1 = (useInitialFlow_ ? nextPts : prevPts).reshape(1);
GpuMat temp2 = nextPts.reshape(1);
cuda::multiply(temp1, Scalar::all(1.0 / (1 << maxLevel_) / 2.0), temp2, 1, -1, stream);
ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status);
status.setTo(Scalar::all(1), stream);
if (err)
ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err);
// build the image pyramids.
BufferPool pool(stream);
prevPyr_.resize(maxLevel_ + 1);
nextPyr_.resize(maxLevel_ + 1);
int cn = prevImg.channels();
if (cn == 1 || cn == 4)
{
prevImg.convertTo(prevPyr_[0], CV_32F, stream);
nextImg.convertTo(nextPyr_[0], CV_32F, stream);
}
else
{
pyrlk::sparse4(prevPyr_[level], nextPyr_[level],
prevPts.ptr<float2>(), nextPts.ptr<float2>(), status.ptr(), level == 0 && err ? err->ptr<float>() : 0, prevPts.cols,
level, block, patch);
GpuMat buf = pool.getBuffer(prevImg.size(), CV_MAKE_TYPE(prevImg.depth(), 4));
cuda::cvtColor(prevImg, buf, COLOR_BGR2BGRA, 0, stream);
buf.convertTo(prevPyr_[0], CV_32F, stream);
cuda::cvtColor(nextImg, buf, COLOR_BGR2BGRA, 0, stream);
buf.convertTo(nextPyr_[0], CV_32F, stream);
}
for (int level = 1; level <= maxLevel_; ++level)
{
cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level], stream);
cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level], stream);
}
pyrlk::loadConstants(make_int2(winSize_.width, winSize_.height), iters_, StreamAccessor::getStream(stream));
for (int level = maxLevel_; level >= 0; level--)
{
if (cn == 1)
{
pyrlk::sparse1(prevPyr_[level], nextPyr_[level],
prevPts.ptr<float2>(), nextPts.ptr<float2>(),
status.ptr(),
level == 0 && err ? err->ptr<float>() : 0, prevPts.cols,
level, block, patch,
StreamAccessor::getStream(stream));
}
else
{
pyrlk::sparse4(prevPyr_[level], nextPyr_[level],
prevPts.ptr<float2>(), nextPts.ptr<float2>(),
status.ptr(),
level == 0 && err ? err->ptr<float>() : 0, prevPts.cols,
level, block, patch,
StreamAccessor::getStream(stream));
}
}
}
}
void cv::cuda::PyrLKOpticalFlow::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err)
{
CV_Assert(prevImg.type() == CV_8UC1);
CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type());
CV_Assert(maxLevel >= 0);
CV_Assert(winSize.width > 2 && winSize.height > 2);
if (err)
err->create(prevImg.size(), CV_32FC1);
// build the image pyramids.
prevPyr_.resize(maxLevel + 1);
nextPyr_.resize(maxLevel + 1);
prevPyr_[0] = prevImg;
nextImg.convertTo(nextPyr_[0], CV_32F);
for (int level = 1; level <= maxLevel; ++level)
void PyrLKOpticalFlowBase::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, Stream& stream)
{
cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level]);
cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level]);
CV_Assert( prevImg.type() == CV_8UC1 );
CV_Assert( prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type() );
CV_Assert( maxLevel_ >= 0 );
CV_Assert( winSize_.width > 2 && winSize_.height > 2 );
// build the image pyramids.
prevPyr_.resize(maxLevel_ + 1);
nextPyr_.resize(maxLevel_ + 1);
prevPyr_[0] = prevImg;
nextImg.convertTo(nextPyr_[0], CV_32F, stream);
for (int level = 1; level <= maxLevel_; ++level)
{
cuda::pyrDown(prevPyr_[level - 1], prevPyr_[level], stream);
cuda::pyrDown(nextPyr_[level - 1], nextPyr_[level], stream);
}
BufferPool pool(stream);
GpuMat uPyr[] = {
pool.getBuffer(prevImg.size(), CV_32FC1),
pool.getBuffer(prevImg.size(), CV_32FC1),
};
GpuMat vPyr[] = {
pool.getBuffer(prevImg.size(), CV_32FC1),
pool.getBuffer(prevImg.size(), CV_32FC1),
};
uPyr[0].setTo(Scalar::all(0), stream);
vPyr[0].setTo(Scalar::all(0), stream);
uPyr[1].setTo(Scalar::all(0), stream);
vPyr[1].setTo(Scalar::all(0), stream);
int2 winSize2i = make_int2(winSize_.width, winSize_.height);
pyrlk::loadConstants(winSize2i, iters_, StreamAccessor::getStream(stream));
int idx = 0;
for (int level = maxLevel_; level >= 0; level--)
{
int idx2 = (idx + 1) & 1;
pyrlk::dense(prevPyr_[level], nextPyr_[level],
uPyr[idx], vPyr[idx], uPyr[idx2], vPyr[idx2],
PtrStepSzf(), winSize2i,
StreamAccessor::getStream(stream));
if (level > 0)
idx = idx2;
}
uPyr[idx].copyTo(u, stream);
vPyr[idx].copyTo(v, stream);
}
ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[0]);
ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[0]);
ensureSizeIsEnough(prevImg.size(), CV_32FC1, uPyr_[1]);
ensureSizeIsEnough(prevImg.size(), CV_32FC1, vPyr_[1]);
uPyr_[0].setTo(Scalar::all(0));
vPyr_[0].setTo(Scalar::all(0));
uPyr_[1].setTo(Scalar::all(0));
vPyr_[1].setTo(Scalar::all(0));
int2 winSize2i = make_int2(winSize.width, winSize.height);
pyrlk::loadConstants(winSize2i, iters);
PtrStepSzf derr = err ? *err : PtrStepSzf();
int idx = 0;
for (int level = maxLevel; level >= 0; level--)
class SparsePyrLKOpticalFlowImpl : public SparsePyrLKOpticalFlow, private PyrLKOpticalFlowBase
{
int idx2 = (idx + 1) & 1;
public:
SparsePyrLKOpticalFlowImpl(Size winSize, int maxLevel, int iters, bool useInitialFlow) :
PyrLKOpticalFlowBase(winSize, maxLevel, iters, useInitialFlow)
{
}
pyrlk::dense(prevPyr_[level], nextPyr_[level], uPyr_[idx], vPyr_[idx], uPyr_[idx2], vPyr_[idx2],
level == 0 ? derr : PtrStepSzf(), winSize2i);
virtual Size getWinSize() const { return winSize_; }
virtual void setWinSize(Size winSize) { winSize_ = winSize; }
if (level > 0)
idx = idx2;
}
virtual int getMaxLevel() const { return maxLevel_; }
virtual void setMaxLevel(int maxLevel) { maxLevel_ = maxLevel; }
uPyr_[idx].copyTo(u);
vPyr_[idx].copyTo(v);
virtual int getNumIters() const { return iters_; }
virtual void setNumIters(int iters) { iters_ = iters; }
virtual bool getUseInitialFlow() const { return useInitialFlow_; }
virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; }
virtual void calc(InputArray _prevImg, InputArray _nextImg,
InputArray _prevPts, InputOutputArray _nextPts,
OutputArray _status,
OutputArray _err,
Stream& stream)
{
const GpuMat prevImg = _prevImg.getGpuMat();
const GpuMat nextImg = _nextImg.getGpuMat();
const GpuMat prevPts = _prevPts.getGpuMat();
GpuMat& nextPts = _nextPts.getGpuMatRef();
GpuMat& status = _status.getGpuMatRef();
GpuMat* err = _err.needed() ? &(_err.getGpuMatRef()) : NULL;
sparse(prevImg, nextImg, prevPts, nextPts, status, err, stream);
}
};
class DensePyrLKOpticalFlowImpl : public DensePyrLKOpticalFlow, private PyrLKOpticalFlowBase
{
public:
DensePyrLKOpticalFlowImpl(Size winSize, int maxLevel, int iters, bool useInitialFlow) :
PyrLKOpticalFlowBase(winSize, maxLevel, iters, useInitialFlow)
{
}
virtual Size getWinSize() const { return winSize_; }
virtual void setWinSize(Size winSize) { winSize_ = winSize; }
virtual int getMaxLevel() const { return maxLevel_; }
virtual void setMaxLevel(int maxLevel) { maxLevel_ = maxLevel; }
virtual int getNumIters() const { return iters_; }
virtual void setNumIters(int iters) { iters_ = iters; }
virtual bool getUseInitialFlow() const { return useInitialFlow_; }
virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; }
virtual void calc(InputArray _prevImg, InputArray _nextImg, InputOutputArray _flow, Stream& stream)
{
const GpuMat prevImg = _prevImg.getGpuMat();
const GpuMat nextImg = _nextImg.getGpuMat();
BufferPool pool(stream);
GpuMat u = pool.getBuffer(prevImg.size(), CV_32FC1);
GpuMat v = pool.getBuffer(prevImg.size(), CV_32FC1);
dense(prevImg, nextImg, u, v, stream);
GpuMat flows[] = {u, v};
cuda::merge(flows, 2, _flow, stream);
}
};
}
void cv::cuda::PyrLKOpticalFlow::releaseMemory()
Ptr<SparsePyrLKOpticalFlow> cv::cuda::SparsePyrLKOpticalFlow::create(Size winSize, int maxLevel, int iters, bool useInitialFlow)
{
prevPyr_.clear();
nextPyr_.clear();
return makePtr<SparsePyrLKOpticalFlowImpl>(winSize, maxLevel, iters, useInitialFlow);
}
buf_.release();
uPyr_[0].release();
vPyr_[0].release();
uPyr_[1].release();
vPyr_[1].release();
Ptr<DensePyrLKOpticalFlow> cv::cuda::DensePyrLKOpticalFlow::create(Size winSize, int maxLevel, int iters, bool useInitialFlow)
{
return makePtr<DensePyrLKOpticalFlowImpl>(winSize, maxLevel, iters, useInitialFlow);
}
#endif /* !defined (HAVE_CUDA) */

View File

@ -44,257 +44,338 @@
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA() { throw_no_cuda(); }
void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage() {}
void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
Ptr<OpticalFlowDual_TVL1> cv::cuda::OpticalFlowDual_TVL1::create(double, double, double, int, int, double, int, double, double, bool) { throw_no_cuda(); return Ptr<OpticalFlowDual_TVL1>(); }
#else
using namespace cv;
using namespace cv::cuda;
cv::cuda::OpticalFlowDual_TVL1_CUDA::OpticalFlowDual_TVL1_CUDA()
{
tau = 0.25;
lambda = 0.15;
theta = 0.3;
nscales = 5;
warps = 5;
epsilon = 0.01;
iterations = 300;
scaleStep = 0.8;
gamma = 0.0;
useInitialFlow = false;
}
void cv::cuda::OpticalFlowDual_TVL1_CUDA::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy)
{
CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 );
CV_Assert( I0.size() == I1.size() );
CV_Assert( I0.type() == I1.type() );
CV_Assert( !useInitialFlow || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) );
CV_Assert( nscales > 0 );
// allocate memory for the pyramid structure
I0s.resize(nscales);
I1s.resize(nscales);
u1s.resize(nscales);
u2s.resize(nscales);
u3s.resize(nscales);
I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0);
I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0);
if (!useInitialFlow)
{
flowx.create(I0.size(), CV_32FC1);
flowy.create(I0.size(), CV_32FC1);
}
u1s[0] = flowx;
u2s[0] = flowy;
if (gamma)
u3s[0].create(I0.size(), CV_32FC1);
I1x_buf.create(I0.size(), CV_32FC1);
I1y_buf.create(I0.size(), CV_32FC1);
I1w_buf.create(I0.size(), CV_32FC1);
I1wx_buf.create(I0.size(), CV_32FC1);
I1wy_buf.create(I0.size(), CV_32FC1);
grad_buf.create(I0.size(), CV_32FC1);
rho_c_buf.create(I0.size(), CV_32FC1);
p11_buf.create(I0.size(), CV_32FC1);
p12_buf.create(I0.size(), CV_32FC1);
p21_buf.create(I0.size(), CV_32FC1);
p22_buf.create(I0.size(), CV_32FC1);
if (gamma)
{
p31_buf.create(I0.size(), CV_32FC1);
p32_buf.create(I0.size(), CV_32FC1);
}
diff_buf.create(I0.size(), CV_32FC1);
// create the scales
for (int s = 1; s < nscales; ++s)
{
cuda::resize(I0s[s-1], I0s[s], Size(), scaleStep, scaleStep);
cuda::resize(I1s[s-1], I1s[s], Size(), scaleStep, scaleStep);
if (I0s[s].cols < 16 || I0s[s].rows < 16)
{
nscales = s;
break;
}
if (useInitialFlow)
{
cuda::resize(u1s[s-1], u1s[s], Size(), scaleStep, scaleStep);
cuda::resize(u2s[s-1], u2s[s], Size(), scaleStep, scaleStep);
cuda::multiply(u1s[s], Scalar::all(scaleStep), u1s[s]);
cuda::multiply(u2s[s], Scalar::all(scaleStep), u2s[s]);
}
else
{
u1s[s].create(I0s[s].size(), CV_32FC1);
u2s[s].create(I0s[s].size(), CV_32FC1);
}
if (gamma)
u3s[s].create(I0s[s].size(), CV_32FC1);
}
if (!useInitialFlow)
{
u1s[nscales-1].setTo(Scalar::all(0));
u2s[nscales-1].setTo(Scalar::all(0));
}
if (gamma)
u3s[nscales - 1].setTo(Scalar::all(0));
// pyramidal structure for computing the optical flow
for (int s = nscales - 1; s >= 0; --s)
{
// compute the optical flow at the current scale
procOneScale(I0s[s], I1s[s], u1s[s], u2s[s], u3s[s]);
// if this was the last scale, finish now
if (s == 0)
break;
// otherwise, upsample the optical flow
// zoom the optical flow for the next finer scale
cuda::resize(u1s[s], u1s[s - 1], I0s[s - 1].size());
cuda::resize(u2s[s], u2s[s - 1], I0s[s - 1].size());
if (gamma)
cuda::resize(u3s[s], u3s[s - 1], I0s[s - 1].size());
// scale the optical flow with the appropriate zoom factor
cuda::multiply(u1s[s - 1], Scalar::all(1/scaleStep), u1s[s - 1]);
cuda::multiply(u2s[s - 1], Scalar::all(1/scaleStep), u2s[s - 1]);
}
}
namespace tvl1flow
{
void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy);
void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho);
void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, cudaStream_t stream);
void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y,
PtrStepSzf u1, PtrStepSzf u2,
PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy,
PtrStepSzf grad, PtrStepSzf rho,
cudaStream_t stream);
void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy,
PtrStepSzf grad, PtrStepSzf rho_c,
PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32,
PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error,
float l_t, float theta, float gamma, bool calcError);
void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, const float gamma);
float l_t, float theta, float gamma, bool calcError,
cudaStream_t stream);
void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3,
PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32,
float taut, float gamma,
cudaStream_t stream);
}
void cv::cuda::OpticalFlowDual_TVL1_CUDA::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3)
namespace
{
using namespace tvl1flow;
const double scaledEpsilon = epsilon * epsilon * I0.size().area();
CV_DbgAssert( I1.size() == I0.size() );
CV_DbgAssert( I1.type() == I0.type() );
CV_DbgAssert( u1.size() == I0.size() );
CV_DbgAssert( u2.size() == u1.size() );
GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows));
centeredGradient(I1, I1x, I1y);
GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p31, p32;
if (gamma)
class OpticalFlowDual_TVL1_Impl : public OpticalFlowDual_TVL1
{
p31 = p31_buf(Rect(0, 0, I0.cols, I0.rows));
p32 = p32_buf(Rect(0, 0, I0.cols, I0.rows));
}
p11.setTo(Scalar::all(0));
p12.setTo(Scalar::all(0));
p21.setTo(Scalar::all(0));
p22.setTo(Scalar::all(0));
if (gamma)
{
p31.setTo(Scalar::all(0));
p32.setTo(Scalar::all(0));
}
GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows));
const float l_t = static_cast<float>(lambda * theta);
const float taut = static_cast<float>(tau / theta);
for (int warpings = 0; warpings < warps; ++warpings)
{
warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c);
double error = std::numeric_limits<double>::max();
double prevError = 0.0;
for (int n = 0; error > scaledEpsilon && n < iterations; ++n)
public:
OpticalFlowDual_TVL1_Impl(double tau, double lambda, double theta, int nscales, int warps, double epsilon,
int iterations, double scaleStep, double gamma, bool useInitialFlow) :
tau_(tau), lambda_(lambda), gamma_(gamma), theta_(theta), nscales_(nscales), warps_(warps),
epsilon_(epsilon), iterations_(iterations), scaleStep_(scaleStep), useInitialFlow_(useInitialFlow)
{
// some tweaks to make sum operation less frequently
bool calcError = (epsilon > 0) && (n & 0x1) && (prevError < scaledEpsilon);
cv::Mat m1(u3);
estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, diff, l_t, static_cast<float>(theta), gamma, calcError);
if (calcError)
}
virtual double getTau() const { return tau_; }
virtual void setTau(double tau) { tau_ = tau; }
virtual double getLambda() const { return lambda_; }
virtual void setLambda(double lambda) { lambda_ = lambda; }
virtual double getGamma() const { return gamma_; }
virtual void setGamma(double gamma) { gamma_ = gamma; }
virtual double getTheta() const { return theta_; }
virtual void setTheta(double theta) { theta_ = theta; }
virtual int getNumScales() const { return nscales_; }
virtual void setNumScales(int nscales) { nscales_ = nscales; }
virtual int getNumWarps() const { return warps_; }
virtual void setNumWarps(int warps) { warps_ = warps; }
virtual double getEpsilon() const { return epsilon_; }
virtual void setEpsilon(double epsilon) { epsilon_ = epsilon; }
virtual int getNumIterations() const { return iterations_; }
virtual void setNumIterations(int iterations) { iterations_ = iterations; }
virtual double getScaleStep() const { return scaleStep_; }
virtual void setScaleStep(double scaleStep) { scaleStep_ = scaleStep; }
virtual bool getUseInitialFlow() const { return useInitialFlow_; }
virtual void setUseInitialFlow(bool useInitialFlow) { useInitialFlow_ = useInitialFlow; }
virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow, Stream& stream);
private:
double tau_;
double lambda_;
double gamma_;
double theta_;
int nscales_;
int warps_;
double epsilon_;
int iterations_;
double scaleStep_;
bool useInitialFlow_;
private:
void calcImpl(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, Stream& stream);
void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3, Stream& stream);
std::vector<GpuMat> I0s;
std::vector<GpuMat> I1s;
std::vector<GpuMat> u1s;
std::vector<GpuMat> u2s;
std::vector<GpuMat> u3s;
GpuMat I1x_buf;
GpuMat I1y_buf;
GpuMat I1w_buf;
GpuMat I1wx_buf;
GpuMat I1wy_buf;
GpuMat grad_buf;
GpuMat rho_c_buf;
GpuMat p11_buf;
GpuMat p12_buf;
GpuMat p21_buf;
GpuMat p22_buf;
GpuMat p31_buf;
GpuMat p32_buf;
GpuMat diff_buf;
GpuMat norm_buf;
};
void OpticalFlowDual_TVL1_Impl::calc(InputArray _frame0, InputArray _frame1, InputOutputArray _flow, Stream& stream)
{
const GpuMat frame0 = _frame0.getGpuMat();
const GpuMat frame1 = _frame1.getGpuMat();
BufferPool pool(stream);
GpuMat flowx = pool.getBuffer(frame0.size(), CV_32FC1);
GpuMat flowy = pool.getBuffer(frame0.size(), CV_32FC1);
calcImpl(frame0, frame1, flowx, flowy, stream);
GpuMat flows[] = {flowx, flowy};
cuda::merge(flows, 2, _flow, stream);
}
void OpticalFlowDual_TVL1_Impl::calcImpl(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, Stream& stream)
{
CV_Assert( I0.type() == CV_8UC1 || I0.type() == CV_32FC1 );
CV_Assert( I0.size() == I1.size() );
CV_Assert( I0.type() == I1.type() );
CV_Assert( !useInitialFlow_ || (flowx.size() == I0.size() && flowx.type() == CV_32FC1 && flowy.size() == flowx.size() && flowy.type() == flowx.type()) );
CV_Assert( nscales_ > 0 );
// allocate memory for the pyramid structure
I0s.resize(nscales_);
I1s.resize(nscales_);
u1s.resize(nscales_);
u2s.resize(nscales_);
u3s.resize(nscales_);
I0.convertTo(I0s[0], CV_32F, I0.depth() == CV_8U ? 1.0 : 255.0, stream);
I1.convertTo(I1s[0], CV_32F, I1.depth() == CV_8U ? 1.0 : 255.0, stream);
if (!useInitialFlow_)
{
flowx.create(I0.size(), CV_32FC1);
flowy.create(I0.size(), CV_32FC1);
}
u1s[0] = flowx;
u2s[0] = flowy;
if (gamma_)
{
u3s[0].create(I0.size(), CV_32FC1);
}
I1x_buf.create(I0.size(), CV_32FC1);
I1y_buf.create(I0.size(), CV_32FC1);
I1w_buf.create(I0.size(), CV_32FC1);
I1wx_buf.create(I0.size(), CV_32FC1);
I1wy_buf.create(I0.size(), CV_32FC1);
grad_buf.create(I0.size(), CV_32FC1);
rho_c_buf.create(I0.size(), CV_32FC1);
p11_buf.create(I0.size(), CV_32FC1);
p12_buf.create(I0.size(), CV_32FC1);
p21_buf.create(I0.size(), CV_32FC1);
p22_buf.create(I0.size(), CV_32FC1);
if (gamma_)
{
p31_buf.create(I0.size(), CV_32FC1);
p32_buf.create(I0.size(), CV_32FC1);
}
diff_buf.create(I0.size(), CV_32FC1);
// create the scales
for (int s = 1; s < nscales_; ++s)
{
cuda::resize(I0s[s-1], I0s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream);
cuda::resize(I1s[s-1], I1s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream);
if (I0s[s].cols < 16 || I0s[s].rows < 16)
{
error = cuda::sum(diff, norm_buf)[0];
prevError = error;
nscales_ = s;
break;
}
if (useInitialFlow_)
{
cuda::resize(u1s[s-1], u1s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream);
cuda::resize(u2s[s-1], u2s[s], Size(), scaleStep_, scaleStep_, INTER_LINEAR, stream);
cuda::multiply(u1s[s], Scalar::all(scaleStep_), u1s[s], 1, -1, stream);
cuda::multiply(u2s[s], Scalar::all(scaleStep_), u2s[s], 1, -1, stream);
}
else
{
error = std::numeric_limits<double>::max();
prevError -= scaledEpsilon;
u1s[s].create(I0s[s].size(), CV_32FC1);
u2s[s].create(I0s[s].size(), CV_32FC1);
}
if (gamma_)
{
u3s[s].create(I0s[s].size(), CV_32FC1);
}
}
if (!useInitialFlow_)
{
u1s[nscales_-1].setTo(Scalar::all(0), stream);
u2s[nscales_-1].setTo(Scalar::all(0), stream);
}
if (gamma_)
{
u3s[nscales_ - 1].setTo(Scalar::all(0), stream);
}
// pyramidal structure for computing the optical flow
for (int s = nscales_ - 1; s >= 0; --s)
{
// compute the optical flow at the current scale
procOneScale(I0s[s], I1s[s], u1s[s], u2s[s], u3s[s], stream);
// if this was the last scale, finish now
if (s == 0)
break;
// otherwise, upsample the optical flow
// zoom the optical flow for the next finer scale
cuda::resize(u1s[s], u1s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream);
cuda::resize(u2s[s], u2s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream);
if (gamma_)
{
cuda::resize(u3s[s], u3s[s - 1], I0s[s - 1].size(), 0, 0, INTER_LINEAR, stream);
}
estimateDualVariables(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma);
// scale the optical flow with the appropriate zoom factor
cuda::multiply(u1s[s - 1], Scalar::all(1/scaleStep_), u1s[s - 1], 1, -1, stream);
cuda::multiply(u2s[s - 1], Scalar::all(1/scaleStep_), u2s[s - 1], 1, -1, stream);
}
}
void OpticalFlowDual_TVL1_Impl::procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2, GpuMat& u3, Stream& _stream)
{
using namespace tvl1flow;
cudaStream_t stream = StreamAccessor::getStream(_stream);
const double scaledEpsilon = epsilon_ * epsilon_ * I0.size().area();
CV_DbgAssert( I1.size() == I0.size() );
CV_DbgAssert( I1.type() == I0.type() );
CV_DbgAssert( u1.size() == I0.size() );
CV_DbgAssert( u2.size() == u1.size() );
GpuMat I1x = I1x_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1y = I1y_buf(Rect(0, 0, I0.cols, I0.rows));
centeredGradient(I1, I1x, I1y, stream);
GpuMat I1w = I1w_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1wx = I1wx_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat I1wy = I1wy_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat grad = grad_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat rho_c = rho_c_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p11 = p11_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p12 = p12_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p21 = p21_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p22 = p22_buf(Rect(0, 0, I0.cols, I0.rows));
GpuMat p31, p32;
if (gamma_)
{
p31 = p31_buf(Rect(0, 0, I0.cols, I0.rows));
p32 = p32_buf(Rect(0, 0, I0.cols, I0.rows));
}
p11.setTo(Scalar::all(0), _stream);
p12.setTo(Scalar::all(0), _stream);
p21.setTo(Scalar::all(0), _stream);
p22.setTo(Scalar::all(0), _stream);
if (gamma_)
{
p31.setTo(Scalar::all(0), _stream);
p32.setTo(Scalar::all(0), _stream);
}
GpuMat diff = diff_buf(Rect(0, 0, I0.cols, I0.rows));
const float l_t = static_cast<float>(lambda_ * theta_);
const float taut = static_cast<float>(tau_ / theta_);
for (int warpings = 0; warpings < warps_; ++warpings)
{
warpBackward(I0, I1, I1x, I1y, u1, u2, I1w, I1wx, I1wy, grad, rho_c, stream);
double error = std::numeric_limits<double>::max();
double prevError = 0.0;
for (int n = 0; error > scaledEpsilon && n < iterations_; ++n)
{
// some tweaks to make sum operation less frequently
bool calcError = (epsilon_ > 0) && (n & 0x1) && (prevError < scaledEpsilon);
estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, diff, l_t, static_cast<float>(theta_), gamma_, calcError, stream);
if (calcError)
{
_stream.waitForCompletion();
error = cuda::sum(diff, norm_buf)[0];
prevError = error;
}
else
{
error = std::numeric_limits<double>::max();
prevError -= scaledEpsilon;
}
estimateDualVariables(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma_, stream);
}
}
}
}
void cv::cuda::OpticalFlowDual_TVL1_CUDA::collectGarbage()
Ptr<OpticalFlowDual_TVL1> cv::cuda::OpticalFlowDual_TVL1::create(
double tau, double lambda, double theta, int nscales, int warps,
double epsilon, int iterations, double scaleStep, double gamma, bool useInitialFlow)
{
I0s.clear();
I1s.clear();
u1s.clear();
u2s.clear();
u3s.clear();
I1x_buf.release();
I1y_buf.release();
I1w_buf.release();
I1wx_buf.release();
I1wy_buf.release();
grad_buf.release();
rho_c_buf.release();
p11_buf.release();
p12_buf.release();
p21_buf.release();
p22_buf.release();
if (gamma)
{
p31_buf.release();
p32_buf.release();
}
diff_buf.release();
norm_buf.release();
return makePtr<OpticalFlowDual_TVL1_Impl>(tau, lambda, theta, nscales, warps,
epsilon, iterations, scaleStep, gamma, useInitialFlow);
}
#endif // !defined HAVE_CUDA || defined(CUDA_DISABLER)

Some files were not shown because too many files have changed in this diff Show More