diff --git a/3rdparty/libjpeg/CMakeLists.txt b/3rdparty/libjpeg/CMakeLists.txt index 028a583cff..65a9d1c8aa 100644 --- a/3rdparty/libjpeg/CMakeLists.txt +++ b/3rdparty/libjpeg/CMakeLists.txt @@ -9,7 +9,7 @@ ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR}) file(GLOB lib_srcs *.c) file(GLOB lib_hdrs *.h) -if(ANDROID OR IOS) +if(ANDROID OR IOS OR APPLE) ocv_list_filterout(lib_srcs jmemansi.c) else() ocv_list_filterout(lib_srcs jmemnobs.c) diff --git a/3rdparty/readme.txt b/3rdparty/readme.txt index ca46fbd576..64e2563a85 100644 --- a/3rdparty/readme.txt +++ b/3rdparty/readme.txt @@ -1,5 +1,5 @@ This folder contains libraries and headers of a few very popular still image codecs -used by highgui module. +used by imgcodecs module. The libraries and headers are preferably to build Win32 and Win64 versions of OpenCV. On UNIX systems all the libraries are automatically detected by configure script. In order to use these versions of libraries instead of system ones on UNIX systems you @@ -11,7 +11,7 @@ libjpeg 8d (8.4) - The Independent JPEG Group's JPEG software. See IGJ home page http://www.ijg.org for details and links to the source code - HAVE_JPEG preprocessor flag must be set to make highgui use libjpeg. + HAVE_JPEG preprocessor flag must be set to make imgcodecs use libjpeg. On UNIX systems configure script takes care of it. ------------------------------------------------------------------------------------ libpng 1.5.12 - Portable Network Graphics library. @@ -19,7 +19,7 @@ libpng 1.5.12 - Portable Network Graphics library. See libpng home page http://www.libpng.org for details and links to the source code - HAVE_PNG preprocessor flag must be set to make highgui use libpng. + HAVE_PNG preprocessor flag must be set to make imgcodecs use libpng. On UNIX systems configure script takes care of it. ------------------------------------------------------------------------------------ libtiff 4.0.2 - Tag Image File Format (TIFF) Software @@ -28,7 +28,7 @@ libtiff 4.0.2 - Tag Image File Format (TIFF) Software See libtiff home page http://www.remotesensing.org/libtiff/ for details and links to the source code - HAVE_TIFF preprocessor flag must be set to make highgui use libtiff. + HAVE_TIFF preprocessor flag must be set to make imgcodecs use libtiff. On UNIX systems configure script takes care of it. In this build support for ZIP (LZ77 compression) is turned on. ------------------------------------------------------------------------------------ @@ -37,7 +37,7 @@ zlib 1.2.7 - General purpose LZ77 compression library See zlib home page http://www.zlib.net for details and links to the source code - No preprocessor definition is needed to make highgui use this library - + No preprocessor definition is needed to make imgcodecs use this library - it is included automatically if either libpng or libtiff are used. ------------------------------------------------------------------------------------ jasper-1.900.1 - JasPer is a collection of software diff --git a/CMakeLists.txt b/CMakeLists.txt index 5abf449804..dd2054d977 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,6 +6,8 @@ # # ---------------------------------------------------------------------------- + + include(cmake/OpenCVMinDepVersions.cmake) if(CMAKE_GENERATOR MATCHES Xcode AND XCODE_VERSION VERSION_GREATER 4.3) @@ -135,6 +137,7 @@ OCV_OPTION(WITH_WEBP "Include WebP support" ON OCV_OPTION(WITH_OPENEXR "Include ILM support via OpenEXR" ON IF (NOT IOS) ) OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID) ) OCV_OPTION(WITH_OPENNI "Include OpenNI support" OFF IF (NOT ANDROID AND NOT IOS) ) +OCV_OPTION(WITH_OPENNI2 "Include OpenNI2 support" OFF IF (NOT ANDROID AND NOT IOS) ) OCV_OPTION(WITH_PNG "Include PNG support" ON) OCV_OPTION(WITH_PVAPI "Include Prosilica GigE support" ON IF (NOT ANDROID AND NOT IOS) ) OCV_OPTION(WITH_GIGEAPI "Include Smartek GigE support" ON IF (NOT ANDROID AND NOT IOS) ) @@ -148,8 +151,8 @@ OCV_OPTION(WITH_TIFF "Include TIFF support" ON OCV_OPTION(WITH_UNICAP "Include Unicap support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) ) OCV_OPTION(WITH_LIBV4L "Use libv4l for Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) ) -OCV_OPTION(WITH_DSHOW "Build HighGUI with DirectShow support" ON IF (WIN32 AND NOT ARM) ) -OCV_OPTION(WITH_MSMF "Build HighGUI with Media Foundation support" OFF IF WIN32 ) +OCV_OPTION(WITH_DSHOW "Build VideoIO with DirectShow support" ON IF (WIN32 AND NOT ARM) ) +OCV_OPTION(WITH_MSMF "Build VideoIO with Media Foundation support" OFF IF WIN32 ) OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF IF (NOT ANDROID AND NOT APPLE) ) OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) OCV_OPTION(WITH_CLP "Include Clp support (EPL)" OFF) @@ -865,6 +868,11 @@ if(DEFINED WITH_OPENNI) THEN "YES (${OPENNI_PRIME_SENSOR_MODULE})" ELSE NO) endif(DEFINED WITH_OPENNI) +if(DEFINED WITH_OPENNI2) + status(" OpenNI2:" HAVE_OPENNI2 THEN "YES (ver ${OPENNI2_VERSION_STRING}, build ${OPENNI2_VERSION_BUILD})" + ELSE NO) +endif(DEFINED WITH_OPENNI2) + if(DEFINED WITH_PVAPI) status(" PvAPI:" HAVE_PVAPI THEN YES ELSE NO) endif(DEFINED WITH_PVAPI) diff --git a/apps/traincascade/CMakeLists.txt b/apps/traincascade/CMakeLists.txt index e560ed815c..cca56361e3 100644 --- a/apps/traincascade/CMakeLists.txt +++ b/apps/traincascade/CMakeLists.txt @@ -1,4 +1,4 @@ -set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_photo opencv_objdetect opencv_highgui opencv_calib3d opencv_video opencv_features2d) +set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_photo opencv_objdetect opencv_imgcodecs opencv_videoio opencv_highgui opencv_calib3d opencv_video opencv_features2d) ocv_check_dependencies(${OPENCV_TRAINCASCADE_DEPS}) if(NOT OCV_DEPENDENCIES_FOUND) diff --git a/apps/traincascade/imagestorage.cpp b/apps/traincascade/imagestorage.cpp index e69a7df1ad..2f791240e4 100644 --- a/apps/traincascade/imagestorage.cpp +++ b/apps/traincascade/imagestorage.cpp @@ -1,6 +1,7 @@ #include "opencv2/core.hpp" +#include "opencv2/core/core_c.h" #include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "imagestorage.h" #include diff --git a/apps/traincascade/imagestorage.h b/apps/traincascade/imagestorage.h index fb68e25cb2..38ca52a8b5 100644 --- a/apps/traincascade/imagestorage.h +++ b/apps/traincascade/imagestorage.h @@ -1,9 +1,6 @@ #ifndef _OPENCV_IMAGESTORAGE_H_ #define _OPENCV_IMAGESTORAGE_H_ -#include "highgui.h" - - class CvCascadeImageReader { diff --git a/cmake/OpenCVConfig.cmake b/cmake/OpenCVConfig.cmake index 2d80f765b5..896a1901c1 100644 --- a/cmake/OpenCVConfig.cmake +++ b/cmake/OpenCVConfig.cmake @@ -11,7 +11,7 @@ # # Or you can search for specific OpenCV modules: # -# FIND_PACKAGE(OpenCV REQUIRED core highgui) +# FIND_PACKAGE(OpenCV REQUIRED core imgcodecs) # # If the module is found then OPENCV__FOUND is set to TRUE. # diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 5520d05521..fec7be4d84 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -131,7 +131,7 @@ if(WITH_1394) if(HAVE_DC1394_2) ocv_parse_pkg("libdc1394-2" "${DC1394_2_LIB_DIR}/pkgconfig" "") ocv_include_directories(${DC1394_2_INCLUDE_PATH}) - set(HIGHGUI_LIBRARIES ${HIGHGUI_LIBRARIES} + set(VIDEOIO_LIBRARIES ${VIDEOIO_LIBRARIES} "${DC1394_2_LIB_DIR}/libdc1394.a" "${CMU1394_LIB_DIR}/lib1394camera.a") endif(HAVE_DC1394_2) @@ -166,6 +166,11 @@ if(WITH_OPENNI) include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindOpenNI.cmake") endif(WITH_OPENNI) +ocv_clear_vars(HAVE_OPENNI2) +if(WITH_OPENNI2) + include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindOpenNI2.cmake") +endif(WITH_OPENNI2) + # --- XIMEA --- ocv_clear_vars(HAVE_XIMEA) if(WITH_XIMEA) @@ -234,7 +239,7 @@ if(WITH_FFMPEG) endif() endif(FFMPEG_INCLUDE_DIR) if(HAVE_FFMPEG) - set(HIGHGUI_LIBRARIES ${HIGHGUI_LIBRARIES} "${FFMPEG_LIB_DIR}/libavcodec.a" + set(VIDEOIO_LIBRARIES ${VIDEOIO_LIBRARIES} "${FFMPEG_LIB_DIR}/libavcodec.a" "${FFMPEG_LIB_DIR}/libavformat.a" "${FFMPEG_LIB_DIR}/libavutil.a" "${FFMPEG_LIB_DIR}/libswscale.a") ocv_include_directories(${FFMPEG_INCLUDE_DIR}) @@ -253,14 +258,15 @@ if(WITH_MSMF) check_include_file(Mfapi.h HAVE_MSMF) endif(WITH_MSMF) -# --- Extra HighGUI libs on Windows --- +# --- Extra HighGUI and VideoIO libs on Windows --- if(WIN32) - list(APPEND HIGHGUI_LIBRARIES comctl32 gdi32 ole32 setupapi ws2_32 vfw32) + list(APPEND HIGHGUI_LIBRARIES comctl32 gdi32 ole32 setupapi ws2_32) + list(APPEND VIDEOIO_LIBRARIES vfw32) if(MINGW64) - list(APPEND HIGHGUI_LIBRARIES avifil32 avicap32 winmm msvfw32) - list(REMOVE_ITEM HIGHGUI_LIBRARIES vfw32) + list(APPEND VIDEOIO_LIBRARIES avifil32 avicap32 winmm msvfw32) + list(REMOVE_ITEM VIDEOIO_LIBRARIES vfw32) elseif(MINGW) - list(APPEND HIGHGUI_LIBRARIES winmm) + list(APPEND VIDEOIO_LIBRARIES winmm) endif() endif(WIN32) diff --git a/cmake/OpenCVFindOpenNI2.cmake b/cmake/OpenCVFindOpenNI2.cmake new file mode 100644 index 0000000000..8a5f47ca7c --- /dev/null +++ b/cmake/OpenCVFindOpenNI2.cmake @@ -0,0 +1,61 @@ +# Main variables: +# OPENNI2_LIBRARY and OPENNI2_INCLUDES to link OpenCV modules with OpenNI2 +# HAVE_OPENNI2 for conditional compilation OpenCV with/without OpenNI2 + +if(NOT "${OPENNI2_LIB_DIR}" STREQUAL "${OPENNI2_LIB_DIR_INTERNAL}") + unset(OPENNI2_LIBRARY CACHE) + unset(OPENNI2_LIB_DIR CACHE) +endif() + +if(NOT "${OPENNI2_INCLUDE_DIR}" STREQUAL "${OPENNI2_INCLUDE_DIR_INTERNAL}") + unset(OPENNI2_INCLUDES CACHE) + unset(OPENNI2_INCLUDE_DIR CACHE) +endif() + +if(WIN32) + if(NOT (MSVC64 OR MINGW64)) + find_file(OPENNI2_INCLUDES "OpenNI.h" PATHS "$ENV{OPEN_NI_INSTALL_PATH}Include" DOC "OpenNI2 c++ interface header") + find_library(OPENNI2_LIBRARY "OpenNI2" PATHS $ENV{OPENNI2_LIB} DOC "OpenNI2 library") + else() + find_file(OPENNI2_INCLUDES "OpenNI.h" PATHS "$ENV{OPEN_NI_INSTALL_PATH64}Include" DOC "OpenNI2 c++ interface header") + find_library(OPENNI2_LIBRARY "OpenNI2" PATHS $ENV{OPENNI2_LIB64} DOC "OpenNI2 library") + endif() +elseif(UNIX OR APPLE) + find_file(OPENNI_INCLUDES "OpenNI.h" PATHS "/usr/include/ni2" "/usr/include/openni2" DOC "OpenNI2 c++ interface header") + find_library(OPENNI_LIBRARY "OpenNI2" PATHS "/usr/lib" DOC "OpenNI2 library") +endif() + +if(OPENNI2_LIBRARY AND OPENNI2_INCLUDES) + set(HAVE_OPENNI2 TRUE) +endif() #if(OPENNI_LIBRARY AND OPENNI_INCLUDES) + +get_filename_component(OPENNI2_LIB_DIR "${OPENNI2_LIBRARY}" PATH) +get_filename_component(OPENNI2_INCLUDE_DIR ${OPENNI2_INCLUDES} PATH) + +if(HAVE_OPENNI2) + set(OPENNI2_LIB_DIR "${OPENNI2_LIB_DIR}" CACHE PATH "Path to OpenNI2 libraries" FORCE) + set(OPENNI2_INCLUDE_DIR "${OPENNI2_INCLUDE_DIR}" CACHE PATH "Path to OpenNI2 headers" FORCE) +endif() + +if(OPENNI2_LIBRARY) + set(OPENNI2_LIB_DIR_INTERNAL "${OPENNI2_LIB_DIR}" CACHE INTERNAL "This is the value of the last time OPENNI_LIB_DIR was set successfully." FORCE) +else() + message( WARNING, " OpenNI2 library directory (set by OPENNI2_LIB_DIR variable) is not found or does not have OpenNI2 libraries." ) +endif() + +if(OPENNI2_INCLUDES) + set(OPENNI2_INCLUDE_DIR_INTERNAL "${OPENNI2_INCLUDE_DIR}" CACHE INTERNAL "This is the value of the last time OPENNI2_INCLUDE_DIR was set successfully." FORCE) +else() + message( WARNING, " OpenNI2 include directory (set by OPENNI2_INCLUDE_DIR variable) is not found or does not have OpenNI2 include files." ) +endif() + +mark_as_advanced(FORCE OPENNI2_LIBRARY) +mark_as_advanced(FORCE OPENNI2_INCLUDES) + +if(HAVE_OPENNI2) + ocv_parse_header("${OPENNI2_INCLUDE_DIR}/OniVersion.h" ONI_VERSION_LINE ONI_VERSION_MAJOR ONI_VERSION_MINOR ONI_VERSION_MAINTENANCE ONI_VERSION_BUILD) + if(ONI_VERSION_MAJOR) + set(OPENNI2_VERSION_STRING ${ONI_VERSION_MAJOR}.${ONI_VERSION_MINOR}.${ONI_VERSION_MAINTENANCE} CACHE INTERNAL "OpenNI2 version") + set(OPENNI2_VERSION_BUILD ${ONI_VERSION_BUILD} CACHE INTERNAL "OpenNI2 build version") + endif() +endif() diff --git a/cmake/OpenCVGenInfoPlist.cmake b/cmake/OpenCVGenInfoPlist.cmake index 97c674ceb7..db418d1253 100644 --- a/cmake/OpenCVGenInfoPlist.cmake +++ b/cmake/OpenCVGenInfoPlist.cmake @@ -1,4 +1,7 @@ if(IOS) configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.plist.in" "${CMAKE_BINARY_DIR}/ios/Info.plist") +elseif(APPLE) + configure_file("${OpenCV_SOURCE_DIR}/platforms/osx/Info.plist.in" + "${CMAKE_BINARY_DIR}/osx/Info.plist") endif() diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index e6fa199119..3f4da5f106 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -704,8 +704,8 @@ function(ocv_add_perf_tests) if(BUILD_PERF_TESTS AND EXISTS "${perf_path}") __ocv_parse_test_sources(PERF ${ARGN}) - # opencv_highgui is required for imread/imwrite - set(perf_deps ${the_module} opencv_ts opencv_highgui ${OPENCV_PERF_${the_module}_DEPS} ${OPENCV_MODULE_opencv_ts_DEPS}) + # opencv_imgcodecs is required for imread/imwrite + set(perf_deps ${the_module} opencv_ts opencv_imgcodecs ${OPENCV_PERF_${the_module}_DEPS} ${OPENCV_MODULE_opencv_ts_DEPS}) ocv_check_dependencies(${perf_deps}) if(OCV_DEPENDENCIES_FOUND) @@ -757,8 +757,8 @@ function(ocv_add_accuracy_tests) if(BUILD_TESTS AND EXISTS "${test_path}") __ocv_parse_test_sources(TEST ${ARGN}) - # opencv_highgui is required for imread/imwrite - set(test_deps ${the_module} opencv_ts opencv_highgui ${OPENCV_TEST_${the_module}_DEPS} ${OPENCV_MODULE_opencv_ts_DEPS}) + # opencv_imgcodecs is required for imread/imwrite + set(test_deps ${the_module} opencv_ts opencv_imgcodecs opencv_videoio ${OPENCV_TEST_${the_module}_DEPS} ${OPENCV_MODULE_opencv_ts_DEPS}) ocv_check_dependencies(${test_deps}) if(OCV_DEPENDENCIES_FOUND) @@ -811,7 +811,7 @@ function(ocv_add_samples) string(REGEX REPLACE "^opencv_" "" module_id ${the_module}) if(BUILD_EXAMPLES AND EXISTS "${samples_path}") - set(samples_deps ${the_module} ${OPENCV_MODULE_${the_module}_DEPS} opencv_highgui ${ARGN}) + set(samples_deps ${the_module} ${OPENCV_MODULE_${the_module}_DEPS} opencv_imgcodecs opencv_videoio opencv_highgui ${ARGN}) ocv_check_dependencies(${samples_deps}) if(OCV_DEPENDENCIES_FOUND) diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index f2a0197f82..d8171770de 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -265,16 +265,19 @@ macro(CHECK_MODULE module_name define) set(${define} 1) foreach(P "${ALIAS_INCLUDE_DIRS}") if(${P}) + list(APPEND VIDEOIO_INCLUDE_DIRS ${${P}}) list(APPEND HIGHGUI_INCLUDE_DIRS ${${P}}) endif() endforeach() foreach(P "${ALIAS_LIBRARY_DIRS}") if(${P}) + list(APPEND VIDEOIO_LIBRARY_DIRS ${${P}}) list(APPEND HIGHGUI_LIBRARY_DIRS ${${P}}) endif() endforeach() + list(APPEND VIDEOIO_LIBRARIES ${${ALIAS_LIBRARIES}}) list(APPEND HIGHGUI_LIBRARIES ${${ALIAS_LIBRARIES}}) endif() endif() diff --git a/cmake/templates/OpenCVConfig.cmake.in b/cmake/templates/OpenCVConfig.cmake.in index 24a9374ef8..e3bde4bbe3 100644 --- a/cmake/templates/OpenCVConfig.cmake.in +++ b/cmake/templates/OpenCVConfig.cmake.in @@ -12,7 +12,7 @@ # # Or you can search for specific OpenCV modules: # -# find_package(OpenCV REQUIRED core highgui) +# find_package(OpenCV REQUIRED core videoio) # # If the module is found then OPENCV__FOUND is set to TRUE. # diff --git a/cmake/templates/cvconfig.h.in b/cmake/templates/cvconfig.h.in index f81049495a..3f77a1bbe7 100644 --- a/cmake/templates/cvconfig.h.in +++ b/cmake/templates/cvconfig.h.in @@ -129,6 +129,9 @@ /* OpenNI library */ #cmakedefine HAVE_OPENNI +/* OpenNI library */ +#cmakedefine HAVE_OPENNI2 + /* PNG codec */ #cmakedefine HAVE_PNG diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 1c284539ee..f796d474ca 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -33,7 +33,7 @@ if(BUILD_DOCS AND HAVE_SPHINX) endif() endforeach() - set(FIXED_ORDER_MODULES core imgproc highgui video calib3d features2d objdetect ml flann photo stitching nonfree contrib legacy) + set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching nonfree contrib legacy) list(REMOVE_ITEM BASE_MODULES ${FIXED_ORDER_MODULES}) diff --git a/doc/check_docs.py b/doc/check_docs.py index 0290fc70f4..96f62f068d 100755 --- a/doc/check_docs.py +++ b/doc/check_docs.py @@ -14,6 +14,8 @@ opencv_hdr_list = [ "../modules/video/include/opencv2/video/tracking.hpp", "../modules/video/include/opencv2/video/background_segm.hpp", "../modules/objdetect/include/opencv2/objdetect.hpp", +"../modules/imgcodecs/include/opencv2/imgcodecs.hpp", +"../modules/videoio/include/opencv2/videoio.hpp", "../modules/highgui/include/opencv2/highgui.hpp", ] @@ -24,6 +26,8 @@ opencv_module_list = [ "features2d", "video", "objdetect", +"imgcodecs", +"videoio", "highgui", "ml" ] diff --git a/doc/conf.py b/doc/conf.py index a0c231c3b5..a30ee8aea4 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -302,14 +302,16 @@ man_pages = [ extlinks = { 'basicstructures' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#%s', None), 'oldbasicstructures' : ('http://docs.opencv.org/modules/core/doc/old_basic_structures.html#%s', None), - 'readwriteimagevideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None), + 'readwriteimage' : ('http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html#%s', None), + 'readwritevideo' : ('http://docs.opencv.org/modules/videoio/doc/reading_and_writing_video.html#%s', None), 'operationsonarrays' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#%s', None), 'utilitysystemfunctions' : ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None), 'imgprocfilter' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None), 'svms' : ('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None), 'drawingfunc' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None), 'xmlymlpers' : ('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None), - 'hgvideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None), + 'rwimg' : ('http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html#%s', None), + 'hgvideo' : ('http://docs.opencv.org/modules/videoio/doc/reading_and_writing_video.html#%s', None), 'gpuinit' : ('http://docs.opencv.org/modules/gpu/doc/initalization_and_information.html#%s', None), 'gpudatastructure' : ('http://docs.opencv.org/modules/gpu/doc/data_structures.html#%s', None), 'gpuopmatrices' : ('http://docs.opencv.org/modules/gpu/doc/operations_on_matrices.html#%s', None), @@ -329,8 +331,8 @@ extlinks = { 'how_to_contribute' : ('http://code.opencv.org/projects/opencv/wiki/How_to_contribute/%s', None), 'cvt_color' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None), - 'imread' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None), - 'imwrite' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None), + 'imread' : ('http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html?highlight=imread#imread%s', None), + 'imwrite' : ('http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html?highlight=imwrite#imwrite%s', None), 'imshow' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None), 'named_window' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None), 'wait_key' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None), @@ -418,7 +420,7 @@ extlinks = { 'background_subtractor' : ('http://docs.opencv.org/modules/video/doc/motion_analysis_and_object_tracking.html?highlight=backgroundsubtractor#backgroundsubtractor%s', None), 'background_subtractor_mog' : ('http://docs.opencv.org/modules/video/doc/motion_analysis_and_object_tracking.html?highlight=backgroundsubtractorMOG#backgroundsubtractormog%s', None), 'background_subtractor_mog_two' : ('http://docs.opencv.org/modules/video/doc/motion_analysis_and_object_tracking.html?highlight=backgroundsubtractorMOG2#backgroundsubtractormog2%s', None), - 'video_capture' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=videocapture#videocapture%s', None), + 'video_capture' : ('http://docs.opencv.org/modules/videoio/doc/reading_and_writing_video.html?highlight=videocapture#videocapture%s', None), 'ippa_convert': ('http://docs.opencv.org/modules/core/doc/ipp_async_converters.html#%s', None), 'ptr':('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=Ptr#Ptr%s', None) } diff --git a/doc/opencv_cheatsheet.tex b/doc/opencv_cheatsheet.tex index d6c339916d..01d5c275d9 100644 --- a/doc/opencv_cheatsheet.tex +++ b/doc/opencv_cheatsheet.tex @@ -522,9 +522,9 @@ samples on what are the contours and how to use them. \begin{tabbing} \textbf{Wr}\=\textbf{iting and reading raster images}\\ -\texttt{\href{http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html\#imwrite}{imwrite}("myimage.jpg", image);}\\ -\texttt{Mat image\_color\_copy = \href{http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 1);}\\ -\texttt{Mat image\_grayscale\_copy = \href{http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html\#imread}{imread}("myimage.jpg", 0);}\\ +\texttt{\href{http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html\#imwrite}{imwrite}("myimage.jpg", image);}\\ +\texttt{Mat image\_color\_copy = \href{http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html\#imread}{imread}("myimage.jpg", 1);}\\ +\texttt{Mat image\_grayscale\_copy = \href{http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html\#imread}{imread}("myimage.jpg", 0);}\\ \end{tabbing} \emph{The functions can read/write images in the following formats: \textbf{BMP (.bmp), JPEG (.jpg, .jpeg), TIFF (.tif, .tiff), PNG (.png), PBM/PGM/PPM (.p?m), Sun Raster (.sr), JPEG 2000 (.jp2)}. Every format supports 8-bit, 1- or 3-channel images. Some formats (PNG, JPEG 2000) support 16 bits per channel.} diff --git a/doc/py_tutorials/py_gui/py_video_display/py_video_display.rst b/doc/py_tutorials/py_gui/py_video_display/py_video_display.rst index 56946bc5db..5bdf4fcb87 100644 --- a/doc/py_tutorials/py_gui/py_video_display/py_video_display.rst +++ b/doc/py_tutorials/py_gui/py_video_display/py_video_display.rst @@ -46,7 +46,7 @@ To capture a video, you need to create a **VideoCapture** object. Its argument c Sometimes, ``cap`` may not have initialized the capture. In that case, this code shows error. You can check whether it is initialized or not by the method **cap.isOpened()**. If it is True, OK. Otherwise open it using **cap.open()**. -You can also access some of the features of this video using **cap.get(propId)** method where propId is a number from 0 to 18. Each number denotes a property of the video (if it is applicable to that video) and full details can be seen here: `Property Identifier `_. Some of these values can be modified using **cap.set(propId, value)**. Value is the new value you want. +You can also access some of the features of this video using **cap.get(propId)** method where propId is a number from 0 to 18. Each number denotes a property of the video (if it is applicable to that video) and full details can be seen here: `Property Identifier `_. Some of these values can be modified using **cap.set(propId, value)**. Value is the new value you want. For example, I can check the frame width and height by ``cap.get(3)`` and ``cap.get(4)``. It gives me 640x480 by default. But I want to modify it to 320x240. Just use ``ret = cap.set(3,320)`` and ``ret = cap.set(4,240)``. diff --git a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst index b7cf446687..ca3d75dca3 100644 --- a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst +++ b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst @@ -25,7 +25,7 @@ Here's a sample usage of :operationsonarrays:`dft() ` : :language: cpp :linenos: :tab-width: 4 - :lines: 1-3, 5, 19-20, 23-78 + :lines: 1-4, 6, 20-21, 24-79 Explanation =========== diff --git a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst index b6a18fee88..6162985ddb 100644 --- a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst +++ b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst @@ -45,7 +45,7 @@ The final argument is optional. If given the image will be loaded in gray scale .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 48-60 + :lines: 49-61 Here we first use the C++ *stringstream* class to convert the third command line argument from text to an integer format. Then we use a simple look and the upper formula to calculate the lookup table. No OpenCV specific stuff here. @@ -99,7 +99,7 @@ When it comes to performance you cannot beat the classic C style operator[] (poi .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 125-152 + :lines: 126-153 Here we basically just acquire a pointer to the start of each row and go through it until it ends. In the special case that the matrix is stored in a continues manner we only need to request the pointer a single time and go all the way to the end. We need to look out for color images: we have three channels so we need to pass through three times more items in each row. @@ -122,7 +122,7 @@ In case of the efficient way making sure that you pass through the right amount .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 154-182 + :lines: 155-183 In case of color images we have three uchar items per column. This may be considered a short vector of uchar items, that has been baptized in OpenCV with the *Vec3b* name. To access the n-th sub column we use simple operator[] access. It's important to remember that OpenCV iterators go through the columns and automatically skip to the next row. Therefore in case of color images if you use a simple *uchar* iterator you'll be able to access only the blue channel values. @@ -134,7 +134,7 @@ The final method isn't recommended for scanning. It was made to acquire or modif .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 184-216 + :lines: 185-217 The functions takes your input type and coordinates and calculates on the fly the address of the queried item. Then returns a reference to that. This may be a constant when you *get* the value and non-constant when you *set* the value. As a safety step in **debug mode only*** there is performed a check that your input coordinates are valid and does exist. If this isn't the case you'll get a nice output message of this on the standard error output stream. Compared to the efficient way in release mode the only difference in using this is that for every element of the image you'll get a new row pointer for what we use the C operator[] to acquire the column element. @@ -148,14 +148,14 @@ This is a bonus method of achieving lookup table modification in an image. Becau .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 107-110 + :lines: 108-111 Finally call the function (I is our input image and J the output one): .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 115 + :lines: 116 Performance Difference ====================== diff --git a/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst b/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst index 9285509b07..9d4189363f 100644 --- a/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst +++ b/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst @@ -77,7 +77,7 @@ Now that you have the basics done :download:`here's <../../../../samples/cpp/tut :language: cpp :linenos: :tab-width: 4 - :lines: 1-9, 22-25, 27-44 + :lines: 1-10, 23-26, 29-46 Here you can observe that with the new structure we have no pointer problems, although it is possible to use the old functions and in the end just transform the result to a *Mat* object. @@ -85,7 +85,7 @@ Here you can observe that with the new structure we have no pointer problems, al :language: cpp :linenos: :tab-width: 4 - :lines: 46-51 + :lines: 48-53 Because, we want to mess around with the images luma component we first convert from the default RGB to the YUV color space and then split the result up into separate planes. Here the program splits: in the first example it processes each plane using one of the three major image scanning algorithms in OpenCV (C [] operator, iterator, individual element access). In a second variant we add to the image some Gaussian noise and then mix together the channels according to some formula. @@ -95,7 +95,7 @@ The scanning version looks like: :language: cpp :linenos: :tab-width: 4 - :lines: 55-75 + :lines: 57-77 Here you can observe that we may go through all the pixels of an image in three fashions: an iterator, a C pointer and an individual element access style. You can read a more in-depth description of these in the :ref:`howToScanImagesOpenCV` tutorial. Converting from the old function names is easy. Just remove the cv prefix and use the new *Mat* data structure. Here's an example of this by using the weighted addition function: @@ -103,7 +103,7 @@ Here you can observe that we may go through all the pixels of an image in three :language: cpp :linenos: :tab-width: 4 - :lines: 79-112 + :lines: 81-113 As you may observe the *planes* variable is of type *Mat*. However, converting from *Mat* to *IplImage* is easy and made automatically with a simple assignment operator. @@ -111,7 +111,7 @@ As you may observe the *planes* variable is of type *Mat*. However, converting f :language: cpp :linenos: :tab-width: 4 - :lines: 115-127 + :lines: 117-129 The new *imshow* highgui function accepts both the *Mat* and *IplImage* data structures. Compile and run the program and if the first image below is your input you may get either the first or second as output: diff --git a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst index 736aceb02d..67517d32f1 100644 --- a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst +++ b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst @@ -86,7 +86,7 @@ Each of the building components has their own valid domains. This leads to the d Creating a *Mat* object explicitly ================================== -In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readwriteimagevideo:`imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices. +In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readwriteimage:`imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices. Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways: diff --git a/doc/tutorials/core/table_of_content_core/table_of_content_core.rst b/doc/tutorials/core/table_of_content_core/table_of_content_core.rst index 1fcf4ee2b1..ea5756da23 100644 --- a/doc/tutorials/core/table_of_content_core/table_of_content_core.rst +++ b/doc/tutorials/core/table_of_content_core/table_of_content_core.rst @@ -200,7 +200,6 @@ Here you will learn the about the basic building blocks of the library. A must r :height: 90pt :width: 90pt - =============== ====================================================== + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -221,8 +220,6 @@ Here you will learn the about the basic building blocks of the library. A must r :width: 90pt .. |Author_ElenaG| unicode:: Elena U+0020 Gvozdeva - =============== ====================================================== - .. raw:: latex \pagebreak diff --git a/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst b/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst index 6f5476cf05..8f63bf1a2e 100644 --- a/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst +++ b/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst @@ -22,7 +22,7 @@ As a test case where to show off these using OpenCV I've created a small program :language: cpp :linenos: :tab-width: 4 - :lines: 1-14, 28-29, 31-205 + :lines: 1-15, 29-31, 33-208 How to read a video stream (online-camera or offline-file)? =========================================================== diff --git a/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.rst b/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.rst index 248abdf6d2..0ad47863eb 100644 --- a/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.rst +++ b/doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.rst @@ -656,7 +656,7 @@ classes we're going to use: Results: Stored in vars *1, *2, *3, an exception in *e user=> (import '[org.opencv.core Mat Size CvType] - '[org.opencv.highgui Highgui] + '[org.opencv.imgcodecs Imgcodecs] '[org.opencv.imgproc Imgproc]) org.opencv.imgproc.Imgproc diff --git a/doc/tutorials/introduction/desktop_java/java_dev_intro.rst b/doc/tutorials/introduction/desktop_java/java_dev_intro.rst index d5cb31f894..513f39d106 100644 --- a/doc/tutorials/introduction/desktop_java/java_dev_intro.rst +++ b/doc/tutorials/introduction/desktop_java/java_dev_intro.rst @@ -373,7 +373,7 @@ Now modify src/main/java/HelloOpenCV.java so it contains the following Java code import org.opencv.core.Point; import org.opencv.core.Rect; import org.opencv.core.Scalar; - import org.opencv.highgui.Highgui; + import org.opencv.imgcodecs.Imgcodecs; import org.opencv.objdetect.CascadeClassifier; // @@ -387,7 +387,7 @@ Now modify src/main/java/HelloOpenCV.java so it contains the following Java code // Create a face detector from the cascade file in the resources // directory. CascadeClassifier faceDetector = new CascadeClassifier(getClass().getResource("/lbpcascade_frontalface.xml").getPath()); - Mat image = Highgui.imread(getClass().getResource("/lena.png").getPath()); + Mat image = Imgcodecs.imread(getClass().getResource("/lena.png").getPath()); // Detect faces in the image. // MatOfRect is a special container class for Rect. @@ -404,7 +404,7 @@ Now modify src/main/java/HelloOpenCV.java so it contains the following Java code // Save the visualized detection. String filename = "faceDetection.png"; System.out.println(String.format("Writing %s", filename)); - Highgui.imwrite(filename, image); + Imgcodecs.imwrite(filename, image); } } diff --git a/doc/tutorials/introduction/display_image/display_image.rst b/doc/tutorials/introduction/display_image/display_image.rst index 6b30b7c295..fc6e6ca5cc 100644 --- a/doc/tutorials/introduction/display_image/display_image.rst +++ b/doc/tutorials/introduction/display_image/display_image.rst @@ -39,28 +39,28 @@ You'll almost always end up using the: .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp :tab-width: 4 - :lines: 1-3 + :lines: 1-4 We also include the *iostream* to facilitate console line output and input. To avoid data structure and function name conflicts with other libraries, OpenCV has its own namespace: *cv*. To avoid the need appending prior each of these the *cv::* keyword you can import the namespace in the whole file by using the lines: .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp :tab-width: 4 - :lines: 5-6 + :lines: 6-7 This is true for the STL library too (used for console I/O). Now, let's analyze the *main* function. We start up assuring that we acquire a valid image name argument from the command line. .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp :tab-width: 4 - :lines: 10-14 + :lines: 11-15 Then create a *Mat* object that will store the data of the loaded image. .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp :tab-width: 4 - :lines: 16 + :lines: 17 Now we call the :imread:`imread <>` function which loads the image name specified by the first argument (*argv[1]*). The second argument specifies the format in what we want the image. This may be: @@ -73,7 +73,7 @@ Now we call the :imread:`imread <>` function which loads the image name specifie .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp :tab-width: 4 - :lines: 17 + :lines: 18 .. note:: @@ -88,21 +88,21 @@ After checking that the image data was loaded correctly, we want to display our .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp - :lines: 25 + :lines: 26 :tab-width: 4 Finally, to update the content of the OpenCV window with a new image use the :imshow:`imshow <>` function. Specify the OpenCV window name to update and the image to use during this operation: .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp - :lines: 26 + :lines: 27 :tab-width: 4 Because we want our window to be displayed until the user presses a key (otherwise the program would end far too quickly), we use the :wait_key:`waitKey <>` function whose only parameter is just how long should it wait for a user input (measured in milliseconds). Zero means to wait forever. .. literalinclude:: ../../../../samples/cpp/tutorial_code/introduction/display_image/display_image.cpp :language: cpp - :lines: 28 + :lines: 29 :tab-width: 4 Result diff --git a/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst b/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst index 5ae5062492..7696be4a13 100644 --- a/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst +++ b/doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst @@ -349,7 +349,7 @@ Now here's our recommendation for the structure of the tutorial (although, remem :language: cpp :linenos: :tab-width: 4 - :lines: 1-8, 21-22, 24- + :lines: 1-8, 21-23, 25- After the directive you specify a relative path to the file from what to import. It has four options: the language to use, if you add the ``:linenos:`` the line numbers will be shown, you can specify the tab size with the ``:tab-width:`` and you do not need to load the whole file, you can show just the important lines. Use the *lines* option to do not show redundant information (such as the *help* function). Here basically you specify ranges, if the second range line number is missing than that means that until the end of the file. The ranges specified here do no need to be in an ascending order, you may even reorganize the structure of how you want to show your sample inside the tutorial. @@ -361,16 +361,16 @@ Now here's our recommendation for the structure of the tutorial (although, remem # ---- External links for tutorials ----------------- extlinks = { - 'hgvideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None) + 'rwimg' : ('http://docs.opencv.org/modules/imgcodecs/doc/reading_and_writing_images.html#%s', None) } - In short here we defined a new **hgvideo** directive that refers to an external webpage link. Its usage is: + In short here we defined a new **rwimg** directive that refers to an external webpage link. Its usage is: .. code-block:: rst - A sample function of the highgui modules image write and read page is the :hgvideo:`imread() function `. + A sample function of the highgui modules image write and read page is the :rwimg:`imread() function `. - Which turns to: A sample function of the highgui modules image write and read page is the :hgvideo:`imread() function `. The argument you give between the <> will be put in place of the ``%s`` in the upper definition, and as the link will anchor to the correct function. To find out the anchor of a given function just open up a web page, search for the function and click on it. In the address bar it should appear like: ``http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#imread`` . Look here for the name of the directives for each page of the OpenCV reference manual. If none present for one of them feel free to add one for it. + Which turns to: A sample function of the highgui modules image write and read page is the :rwimg:`imread() function `. The argument you give between the <> will be put in place of the ``%s`` in the upper definition, and as the link will anchor to the correct function. To find out the anchor of a given function just open up a web page, search for the function and click on it. In the address bar it should appear like: ``http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images.html#imread`` . Look here for the name of the directives for each page of the OpenCV reference manual. If none present for one of them feel free to add one for it. For formulas you can add LATEX code that will translate in the web pages into images. You do this by using the *math* directive. A usage tip: diff --git a/doc/tutorials/introduction/load_save_image/load_save_image.rst b/doc/tutorials/introduction/load_save_image/load_save_image.rst index dec75c1958..cde81e12b5 100644 --- a/doc/tutorials/introduction/load_save_image/load_save_image.rst +++ b/doc/tutorials/introduction/load_save_image/load_save_image.rst @@ -5,7 +5,7 @@ Load, Modify, and Save an Image .. note:: - We assume that by now you know how to load an image using :readwriteimagevideo:`imread ` and to display it in a window (using :user_interface:`imshow `). Read the :ref:`Display_Image` tutorial otherwise. + We assume that by now you know how to load an image using :readwriteimage:`imread ` and to display it in a window (using :user_interface:`imshow `). Read the :ref:`Display_Image` tutorial otherwise. Goals ====== @@ -14,9 +14,9 @@ In this tutorial you will learn how to: .. container:: enumeratevisibleitemswithsquare - * Load an image using :readwriteimagevideo:`imread ` + * Load an image using :readwriteimage:`imread ` * Transform an image from BGR to Grayscale format by using :miscellaneous_transformations:`cvtColor ` - * Save your transformed image in a file on disk (using :readwriteimagevideo:`imwrite `) + * Save your transformed image in a file on disk (using :readwriteimage:`imwrite `) Code ====== @@ -62,7 +62,7 @@ Here it is: Explanation ============ -#. We begin by loading an image using :readwriteimagevideo:`imread `, located in the path given by *imageName*. For this example, assume you are loading a RGB image. +#. We begin by loading an image using :readwriteimage:`imread `, located in the path given by *imageName*. For this example, assume you are loading a RGB image. #. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations: @@ -76,9 +76,9 @@ Explanation * a source image (*image*) * a destination image (*gray_image*), in which we will save the converted image. - * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :readwriteimagevideo:`imread ` has BGR default channel order in case of color images). + * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :readwriteimage:`imread ` has BGR default channel order in case of color images). -#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :readwriteimagevideo:`imread `: :readwriteimagevideo:`imwrite ` +#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :readwriteimage:`imread `: :readwriteimage:`imwrite ` .. code-block:: cpp diff --git a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst index 5a68dad5c9..21f679c971 100644 --- a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst +++ b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst @@ -32,7 +32,7 @@ Image Watch works with any existing project that uses OpenCV image objects (for #include // std::cout #include // cv::Mat - #include // cv::imread() + #include // cv::imread() #include // cv::Canny() using namespace std; diff --git a/doc/tutorials/ios/video_processing/video_processing.rst b/doc/tutorials/ios/video_processing/video_processing.rst index 84ccfcf971..eb5da5b111 100644 --- a/doc/tutorials/ios/video_processing/video_processing.rst +++ b/doc/tutorials/ios/video_processing/video_processing.rst @@ -80,7 +80,7 @@ We add a camera controller to the view controller and initialize it when the vie .. code-block:: objc :linenos: - #import + #import using namespace cv; diff --git a/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst b/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst index 57e0b1b6ea..bd7fde8775 100644 --- a/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst +++ b/doc/tutorials/ml/non_linear_svms/non_linear_svms.rst @@ -73,7 +73,7 @@ You may also find the source code and these video file in the :file:`samples/cpp :language: cpp :linenos: :tab-width: 4 - :lines: 1-11, 22-23, 26- + :lines: 1-12, 23-24, 27- Explanation =========== diff --git a/include/opencv2/opencv.hpp b/include/opencv2/opencv.hpp index b7c290a49c..fd9ca5898e 100644 --- a/include/opencv2/opencv.hpp +++ b/include/opencv2/opencv.hpp @@ -50,6 +50,8 @@ #include "opencv2/features2d.hpp" #include "opencv2/objdetect.hpp" #include "opencv2/calib3d.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/ml.hpp" diff --git a/modules/calib3d/perf/perf_precomp.hpp b/modules/calib3d/perf/perf_precomp.hpp index 86312de1b8..9c5ab997d1 100644 --- a/modules/calib3d/perf/perf_precomp.hpp +++ b/modules/calib3d/perf/perf_precomp.hpp @@ -11,7 +11,7 @@ #include "opencv2/ts.hpp" #include "opencv2/calib3d.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY diff --git a/modules/calib3d/test/test_precomp.hpp b/modules/calib3d/test/test_precomp.hpp index d670a4c220..ac3371cef3 100644 --- a/modules/calib3d/test/test_precomp.hpp +++ b/modules/calib3d/test/test_precomp.hpp @@ -13,7 +13,7 @@ #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/calib3d.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" namespace cvtest { diff --git a/modules/core/doc/basic_structures.rst b/modules/core/doc/basic_structures.rst index 459a6a9c98..a94fa1731e 100644 --- a/modules/core/doc/basic_structures.rst +++ b/modules/core/doc/basic_structures.rst @@ -2981,7 +2981,7 @@ The class provides the following features for all derived classes: * so called "virtual constructor". That is, each Algorithm derivative is registered at program start and you can get the list of registered algorithms and create instance of a particular algorithm by its name (see ``Algorithm::create``). If you plan to add your own algorithms, it is good practice to add a unique prefix to your algorithms to distinguish them from other algorithms. - * setting/retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV highgui module, you are probably familar with ``cvSetCaptureProperty()``, ``cvGetCaptureProperty()``, ``VideoCapture::set()`` and ``VideoCapture::get()``. ``Algorithm`` provides similar method where instead of integer id's you specify the parameter names as text strings. See ``Algorithm::set`` and ``Algorithm::get`` for details. + * setting/retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV videoio module, you are probably familar with ``cvSetCaptureProperty()``, ``cvGetCaptureProperty()``, ``VideoCapture::set()`` and ``VideoCapture::get()``. ``Algorithm`` provides similar method where instead of integer id's you specify the parameter names as text strings. See ``Algorithm::set`` and ``Algorithm::get`` for details. * reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store all its parameters and then read them back. There is no need to re-implement it each time. diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst index 60b744f6df..e4f6d2f230 100644 --- a/modules/core/doc/drawing_functions.rst +++ b/modules/core/doc/drawing_functions.rst @@ -361,6 +361,37 @@ The function ``line`` draws the line segment between ``pt1`` and ``pt2`` points Antialiased lines are drawn using Gaussian filtering. +arrowedLine +---------------- +Draws a arrow segment pointing from the first point to the second one. + +.. ocv:function:: void arrowedLine(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0, double tipLength=0.1) + + :param img: Image. + + :param pt1: The point the arrow starts from. + + :param pt2: The point the arrow points to. + + :param color: Line color. + + :param thickness: Line thickness. + + :param lineType: Type of the line: + + * **8** (or omitted) - 8-connected line. + + * **4** - 4-connected line. + + * **CV_AA** - antialiased line. + + :param shift: Number of fractional bits in the point coordinates. + + :param tipLength: The length of the arrow tip in relation to the arrow length + +The function ``arrowedLine`` draws an arrow between ``pt1`` and ``pt2`` points in the image. See also :ocv:func:`line`. + + LineIterator ------------ .. ocv:class:: LineIterator diff --git a/modules/core/doc/intro.rst b/modules/core/doc/intro.rst index 6d9fdfca5e..032e7453a5 100644 --- a/modules/core/doc/intro.rst +++ b/modules/core/doc/intro.rst @@ -14,7 +14,8 @@ OpenCV has a modular structure, which means that the package includes several sh * **calib3d** - basic multiple-view geometry algorithms, single and stereo camera calibration, object pose estimation, stereo correspondence algorithms, and elements of 3D reconstruction. * **features2d** - salient feature detectors, descriptors, and descriptor matchers. * **objdetect** - detection of objects and instances of the predefined classes (for example, faces, eyes, mugs, people, cars, and so on). - * **highgui** - an easy-to-use interface to video capturing, image and video codecs, as well as simple UI capabilities. + * **highgui** - an easy-to-use interface to simple UI capabilities. + * **videoio** - an easy-to-use interface to video capturing and video codecs. * **gpu** - GPU-accelerated algorithms from different OpenCV modules. * ... some other helper modules, such as FLANN and Google test wrappers, Python bindings, and others. diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index a5b9235152..b5249c9f50 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -510,6 +510,10 @@ CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* CV_EXPORTS_W void line(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, int thickness = 1, int lineType = LINE_8, int shift = 0); +//! draws an arrow from pt1 to pt2 in the image +CV_EXPORTS_W void arrowedLine(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int line_type=8, int shift=0, double tipLength=0.1); + //! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image CV_EXPORTS_W void rectangle(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, int thickness = 1, diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index d399265b0b..945b450303 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -360,7 +360,7 @@ struct CV_EXPORTS UMatData { enum { COPY_ON_MAP=1, HOST_COPY_OBSOLETE=2, DEVICE_COPY_OBSOLETE=4, TEMP_UMAT=8, TEMP_COPIED_UMAT=24, - USER_ALLOCATED=32 }; + USER_ALLOCATED=32, DEVICE_MEM_MAPPED=64}; UMatData(const MatAllocator* allocator); ~UMatData(); @@ -370,11 +370,13 @@ struct CV_EXPORTS UMatData bool hostCopyObsolete() const; bool deviceCopyObsolete() const; + bool deviceMemMapped() const; bool copyOnMap() const; bool tempUMat() const; bool tempCopiedUMat() const; void markHostCopyObsolete(bool flag); void markDeviceCopyObsolete(bool flag); + void markDeviceMemMapped(bool flag); const MatAllocator* prevAllocator; const MatAllocator* currAllocator; diff --git a/modules/core/include/opencv2/core/mat.inl.hpp b/modules/core/include/opencv2/core/mat.inl.hpp index d463eec671..dae0e137a8 100644 --- a/modules/core/include/opencv2/core/mat.inl.hpp +++ b/modules/core/include/opencv2/core/mat.inl.hpp @@ -3350,10 +3350,19 @@ size_t UMat::total() const inline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; } inline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; } +inline bool UMatData::deviceMemMapped() const { return (flags & DEVICE_MEM_MAPPED) != 0; } inline bool UMatData::copyOnMap() const { return (flags & COPY_ON_MAP) != 0; } inline bool UMatData::tempUMat() const { return (flags & TEMP_UMAT) != 0; } inline bool UMatData::tempCopiedUMat() const { return (flags & TEMP_COPIED_UMAT) == TEMP_COPIED_UMAT; } +inline void UMatData::markDeviceMemMapped(bool flag) +{ + if(flag) + flags |= DEVICE_MEM_MAPPED; + else + flags &= ~DEVICE_MEM_MAPPED; +} + inline void UMatData::markHostCopyObsolete(bool flag) { if(flag) diff --git a/modules/core/perf/opencl/perf_matop.cpp b/modules/core/perf/opencl/perf_matop.cpp index 9bb375587f..67d382239c 100644 --- a/modules/core/perf/opencl/perf_matop.cpp +++ b/modules/core/perf/opencl/perf_matop.cpp @@ -139,6 +139,7 @@ OCL_PERF_TEST_P(CopyToFixture, CopyToWithMaskUninit, dst.release(); startTimer(); src.copyTo(dst, mask); + cv::ocl::finish(); stopTimer(); } diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 751659684c..b98bf830e4 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -2980,8 +2980,187 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op) namespace cv { -template static void -inRange_(const T* src1, size_t step1, const T* src2, size_t step2, +template +struct InRange_SSE +{ + int operator () (const T *, const T *, const T *, uchar *, int) const + { + return 0; + } +}; + +#if CV_SSE2 + +template <> +struct InRange_SSE +{ + int operator () (const uchar * src1, const uchar * src2, const uchar * src3, + uchar * dst, int len) const + { + int x = 0; + + if (USE_SSE2) + { + __m128i v_full = _mm_set1_epi8(-1), v_128 = _mm_set1_epi8(-128); + + for ( ; x <= len - 16; x += 16 ) + { + __m128i v_src = _mm_add_epi8(_mm_loadu_si128((const __m128i *)(src1 + x)), v_128); + __m128i v_mask1 = _mm_cmpgt_epi8(_mm_add_epi8(_mm_loadu_si128((const __m128i *)(src2 + x)), v_128), v_src); + __m128i v_mask2 = _mm_cmpgt_epi8(v_src, _mm_add_epi8(_mm_loadu_si128((const __m128i *)(src3 + x)), v_128)); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(_mm_or_si128(v_mask1, v_mask2), v_full)); + } + } + + return x; + } +}; + +template <> +struct InRange_SSE +{ + int operator () (const schar * src1, const schar * src2, const schar * src3, + uchar * dst, int len) const + { + int x = 0; + + if (USE_SSE2) + { + __m128i v_full = _mm_set1_epi8(-1); + + for ( ; x <= len - 16; x += 16 ) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_mask1 = _mm_cmpgt_epi8(_mm_loadu_si128((const __m128i *)(src2 + x)), v_src); + __m128i v_mask2 = _mm_cmpgt_epi8(v_src, _mm_loadu_si128((const __m128i *)(src3 + x))); + _mm_storeu_si128((__m128i *)(dst + x), _mm_andnot_si128(_mm_or_si128(v_mask1, v_mask2), v_full)); + } + } + + return x; + } +}; + +template <> +struct InRange_SSE +{ + int operator () (const ushort * src1, const ushort * src2, const ushort * src3, + uchar * dst, int len) const + { + int x = 0; + + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128(), v_full = _mm_set1_epi16(-1), v_32768 = _mm_set1_epi16(-32768); + + for ( ; x <= len - 8; x += 8 ) + { + __m128i v_src = _mm_add_epi16(_mm_loadu_si128((const __m128i *)(src1 + x)), v_32768); + __m128i v_mask1 = _mm_cmpgt_epi16(_mm_add_epi16(_mm_loadu_si128((const __m128i *)(src2 + x)), v_32768), v_src); + __m128i v_mask2 = _mm_cmpgt_epi16(v_src, _mm_add_epi16(_mm_loadu_si128((const __m128i *)(src3 + x)), v_32768)); + __m128i v_res = _mm_andnot_si128(_mm_or_si128(v_mask1, v_mask2), v_full); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(_mm_srli_epi16(v_res, 8), v_zero)); + } + } + + return x; + } +}; + +template <> +struct InRange_SSE +{ + int operator () (const short * src1, const short * src2, const short * src3, + uchar * dst, int len) const + { + int x = 0; + + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128(), v_full = _mm_set1_epi16(-1); + + for ( ; x <= len - 8; x += 8 ) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_mask1 = _mm_cmpgt_epi16(_mm_loadu_si128((const __m128i *)(src2 + x)), v_src); + __m128i v_mask2 = _mm_cmpgt_epi16(v_src, _mm_loadu_si128((const __m128i *)(src3 + x))); + __m128i v_res = _mm_andnot_si128(_mm_or_si128(v_mask1, v_mask2), v_full); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(_mm_srli_epi16(v_res, 8), v_zero)); + } + } + + return x; + } +}; + +template <> +struct InRange_SSE +{ + int operator () (const int * src1, const int * src2, const int * src3, + uchar * dst, int len) const + { + int x = 0; + + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128(), v_full = _mm_set1_epi32(-1); + + for ( ; x <= len - 8; x += 8 ) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(src1 + x)); + __m128i v_res1 = _mm_or_si128(_mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src2 + x)), v_src), + _mm_cmpgt_epi32(v_src, _mm_loadu_si128((const __m128i *)(src3 + x)))); + + v_src = _mm_loadu_si128((const __m128i *)(src1 + x + 4)); + __m128i v_res2 = _mm_or_si128(_mm_cmpgt_epi32(_mm_loadu_si128((const __m128i *)(src2 + x + 4)), v_src), + _mm_cmpgt_epi32(v_src, _mm_loadu_si128((const __m128i *)(src3 + x + 4)))); + + __m128i v_res = _mm_packs_epi32(_mm_srli_epi32(_mm_andnot_si128(v_res1, v_full), 16), + _mm_srli_epi32(_mm_andnot_si128(v_res2, v_full), 16)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_res, v_zero)); + } + } + + return x; + } +}; + +template <> +struct InRange_SSE +{ + int operator () (const float * src1, const float * src2, const float * src3, + uchar * dst, int len) const + { + int x = 0; + + if (USE_SSE2) + { + __m128i v_zero = _mm_setzero_si128(); + + for ( ; x <= len - 8; x += 8 ) + { + __m128 v_src = _mm_loadu_ps(src1 + x); + __m128 v_res1 = _mm_and_ps(_mm_cmple_ps(_mm_loadu_ps(src2 + x), v_src), + _mm_cmple_ps(v_src, _mm_loadu_ps(src3 + x))); + + v_src = _mm_loadu_ps(src1 + x + 4); + __m128 v_res2 = _mm_and_ps(_mm_cmple_ps(_mm_loadu_ps(src2 + x + 4), v_src), + _mm_cmple_ps(v_src, _mm_loadu_ps(src3 + x + 4))); + + __m128i v_res1i = _mm_cvtps_epi32(v_res1), v_res2i = _mm_cvtps_epi32(v_res2); + __m128i v_res = _mm_packs_epi32(_mm_srli_epi32(v_res1i, 16), _mm_srli_epi32(v_res2i, 16)); + _mm_storel_epi64((__m128i *)(dst + x), _mm_packus_epi16(v_res, v_zero)); + } + } + + return x; + } +}; + +#endif + +template +static void inRange_(const T* src1, size_t step1, const T* src2, size_t step2, const T* src3, size_t step3, uchar* dst, size_t step, Size size) { @@ -2989,9 +3168,11 @@ inRange_(const T* src1, size_t step1, const T* src2, size_t step2, step2 /= sizeof(src2[0]); step3 /= sizeof(src3[0]); + InRange_SSE vop; + for( ; size.height--; src1 += step1, src2 += step2, src3 += step3, dst += step ) { - int x = 0; + int x = vop(src1, src2, src3, dst, size.width); #if CV_ENABLE_UNROLLED for( ; x <= size.width - 4; x += 4 ) { diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 7b2684c85e..21d5bdaca7 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -1729,22 +1729,18 @@ static bool ocl_LUT(InputArray _src, InputArray _lut, OutputArray _dst) UMat src = _src.getUMat(), lut = _lut.getUMat(); _dst.create(src.size(), CV_MAKETYPE(ddepth, dcn)); UMat dst = _dst.getUMat(); - bool bAligned = (1 == lcn) && (0 == (src.offset % 4)) && (0 == ((dcn * src.cols) % 4)); - // dst.cols == src.cols by params of dst.create + int kercn = lcn == 1 ? std::min(4, ocl::predictOptimalVectorWidth(_dst)) : dcn; ocl::Kernel k("LUT", ocl::core::lut_oclsrc, - format("-D dcn=%d -D lcn=%d -D srcT=%s -D dstT=%s", bAligned ? 4 : dcn, lcn, - ocl::typeToStr(src.depth()), ocl::memopTypeToStr(ddepth) - )); + format("-D dcn=%d -D lcn=%d -D srcT=%s -D dstT=%s", kercn, lcn, + ocl::typeToStr(src.depth()), ocl::memopTypeToStr(ddepth))); if (k.empty()) return false; - int cols = bAligned ? dcn * dst.cols / 4 : dst.cols; - k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::ReadOnlyNoSize(lut), - ocl::KernelArg::WriteOnlyNoSize(dst), dst.rows, cols); + ocl::KernelArg::WriteOnly(dst, dcn, kercn)); - size_t globalSize[2] = { cols, (dst.rows + 3) / 4 }; + size_t globalSize[2] = { dst.cols * dcn / kercn, (dst.rows + 3) / 4 }; return k.run(2, globalSize, NULL, false); } diff --git a/modules/core/src/drawing.cpp b/modules/core/src/drawing.cpp index 0ba932163d..0b11aea7ea 100644 --- a/modules/core/src/drawing.cpp +++ b/modules/core/src/drawing.cpp @@ -1584,6 +1584,24 @@ void line( InputOutputArray _img, Point pt1, Point pt2, const Scalar& color, ThickLine( img, pt1, pt2, buf, thickness, line_type, 3, shift ); } +void arrowedLine(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, + int thickness, int line_type, int shift, double tipLength) +{ + const double tipSize = norm(pt1-pt2)*tipLength; // Factor to normalize the size of the tip depending on the length of the arrow + + line(img, pt1, pt2, color, thickness, line_type, shift); + + const double angle = atan2( (double) pt1.y - pt2.y, (double) pt1.x - pt2.x ); + + Point p(cvRound(pt2.x + tipSize * cos(angle + CV_PI / 4)), + cvRound(pt2.y + tipSize * sin(angle + CV_PI / 4))); + line(img, p, pt2, color, thickness, line_type, shift); + + p.x = cvRound(pt2.x + tipSize * cos(angle - CV_PI / 4)); + p.y = cvRound(pt2.y + tipSize * sin(angle - CV_PI / 4)); + line(img, p, pt2, color, thickness, line_type, shift); +} + void rectangle( InputOutputArray _img, Point pt1, Point pt2, const Scalar& color, int thickness, int lineType, int shift ) diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp index 2a08899167..d3327940db 100644 --- a/modules/core/src/dxt.cpp +++ b/modules/core/src/dxt.cpp @@ -2080,32 +2080,32 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) { if ((flags & DFT_ROWS) == 0) { - if (!real_transform) + if (src.channels() == 2 && !(inv && (flags & DFT_REAL_OUTPUT))) { - if (ippi_DFT_C_32F(src,dst, inv, ipp_norm_flag)) + if (ippi_DFT_C_32F(src, dst, inv, ipp_norm_flag)) return; setIppErrorStatus(); } - else if (inv || !(flags & DFT_COMPLEX_OUTPUT)) + if (src.channels() == 1 && (inv || !(flags & DFT_COMPLEX_OUTPUT))) { - if (ippi_DFT_R_32F(src,dst, inv, ipp_norm_flag)) + if (ippi_DFT_R_32F(src, dst, inv, ipp_norm_flag)) return; setIppErrorStatus(); } } else { - if (!real_transform) + if (src.channels() == 2 && !(inv && (flags & DFT_REAL_OUTPUT))) { ippiDFT_C_Func ippiFunc = inv ? (ippiDFT_C_Func)ippiDFTInv_CToC_32fc_C1R : (ippiDFT_C_Func)ippiDFTFwd_CToC_32fc_C1R; - if (Dft_C_IPPLoop(src,dst, IPPDFT_C_Functor(ippiFunc),ipp_norm_flag)) + if (Dft_C_IPPLoop(src, dst, IPPDFT_C_Functor(ippiFunc),ipp_norm_flag)) return; setIppErrorStatus(); } - else if (inv || !(flags & DFT_COMPLEX_OUTPUT)) + if (src.channels() == 1 && (inv || !(flags & DFT_COMPLEX_OUTPUT))) { ippiDFT_R_Func ippiFunc = inv ? (ippiDFT_R_Func)ippiDFTInv_PackToR_32f_C1R : (ippiDFT_R_Func)ippiDFTFwd_RToPack_32f_C1R; - if (Dft_R_IPPLoop(src,dst, IPPDFT_R_Functor(ippiFunc),ipp_norm_flag)) + if (Dft_R_IPPLoop(src, dst, IPPDFT_R_Functor(ippiFunc),ipp_norm_flag)) return; setIppErrorStatus(); } diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index 7a02bd6d72..f36e268d0d 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -348,7 +348,18 @@ static void InvSqrt_32f(const float* src, float* dst, int len) static void InvSqrt_64f(const double* src, double* dst, int len) { - for( int i = 0; i < len; i++ ) + int i = 0; + +#if CV_SSE2 + if (USE_SSE2) + { + __m128d v_1 = _mm_set1_pd(1.0); + for ( ; i <= len - 2; i += 2) + _mm_storeu_pd(dst + i, _mm_div_pd(v_1, _mm_sqrt_pd(_mm_loadu_pd(src + i)))); + } +#endif + + for( ; i < len; i++ ) dst[i] = 1/std::sqrt(src[i]); } @@ -2543,12 +2554,33 @@ void patchNaNs( InputOutputArray _a, double _val ) NAryMatIterator it(arrays, (uchar**)ptrs); size_t len = it.size*a.channels(); Cv32suf val; - val.f = (float)_val; + float fval = (float)_val; + val.f = fval; + +#if CV_SSE2 + __m128i v_mask1 = _mm_set1_epi32(0x7fffffff), v_mask2 = _mm_set1_epi32(0x7f800000); + __m128i v_val = _mm_set1_epi32(val.i); +#endif for( size_t i = 0; i < it.nplanes; i++, ++it ) { int* tptr = ptrs[0]; - for( size_t j = 0; j < len; j++ ) + size_t j = 0; + +#if CV_SSE2 + if (USE_SSE2) + { + for ( ; j < len; j += 4) + { + __m128i v_src = _mm_loadu_si128((__m128i const *)(tptr + j)); + __m128i v_cmp_mask = _mm_cmplt_epi32(v_mask2, _mm_and_si128(v_src, v_mask1)); + __m128i v_res = _mm_or_si128(_mm_andnot_si128(v_cmp_mask, v_src), _mm_and_si128(v_cmp_mask, v_val)); + _mm_storeu_si128((__m128i *)(tptr + j), v_res); + } + } +#endif + + for( ; j < len; j++ ) if( (tptr[j] & 0x7fffffff) > 0x7f800000 ) tptr[j] = val.i; } diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index f199cb2534..ba6df7261a 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -2758,21 +2758,30 @@ namespace cv { static bool ocl_setIdentity( InputOutputArray _m, const Scalar& s ) { - int type = _m.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), - sctype = CV_MAKE_TYPE(depth, cn == 3 ? 4 : cn), + int type = _m.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), kercn = cn; + if (cn == 1) + { + kercn = std::min(ocl::predictOptimalVectorWidth(_m), 4); + if (kercn != 4) + kercn = 1; + } + int sctype = CV_MAKE_TYPE(depth, cn == 3 ? 4 : cn), rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1; ocl::Kernel k("setIdentity", ocl::core::set_identity_oclsrc, - format("-D T=%s -D T1=%s -D cn=%d -D ST=%s", ocl::memopTypeToStr(type), - ocl::memopTypeToStr(depth), cn, ocl::memopTypeToStr(sctype))); + format("-D T=%s -D T1=%s -D cn=%d -D ST=%s -D kercn=%d -D rowsPerWI=%d", + ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)), + ocl::memopTypeToStr(depth), cn, + ocl::memopTypeToStr(sctype), + kercn, rowsPerWI)); if (k.empty()) return false; UMat m = _m.getUMat(); - k.args(ocl::KernelArg::WriteOnly(m), ocl::KernelArg::Constant(Mat(1, 1, sctype, s)), - rowsPerWI); + k.args(ocl::KernelArg::WriteOnly(m, cn, kercn), + ocl::KernelArg::Constant(Mat(1, 1, sctype, s))); - size_t globalsize[2] = { m.cols, (m.rows + rowsPerWI - 1) / rowsPerWI }; + size_t globalsize[2] = { m.cols * cn / kercn, (m.rows + rowsPerWI - 1) / rowsPerWI }; return k.run(2, globalsize, NULL, false); } @@ -3441,8 +3450,11 @@ static bool ocl_reduce(InputArray _src, OutputArray _dst, const int min_opt_cols = 128, buf_cols = 32; int sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype), ddepth = CV_MAT_DEPTH(dtype), ddepth0 = ddepth; - bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0, - useOptimized = 1 == dim && _src.cols() > min_opt_cols; + const ocl::Device &defDev = ocl::Device::getDefault(); + bool doubleSupport = defDev.doubleFPConfig() > 0; + + size_t wgs = defDev.maxWorkGroupSize(); + bool useOptimized = 1 == dim && _src.cols() > min_opt_cols && (wgs >= buf_cols); if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) return false; @@ -3455,78 +3467,80 @@ static bool ocl_reduce(InputArray _src, OutputArray _dst, const char * const ops[4] = { "OCL_CV_REDUCE_SUM", "OCL_CV_REDUCE_AVG", "OCL_CV_REDUCE_MAX", "OCL_CV_REDUCE_MIN" }; - char cvt[2][40]; - int wdepth = std::max(ddepth, CV_32F); - cv::String build_opt = format("-D %s -D dim=%d -D cn=%d -D ddepth=%d" - " -D srcT=%s -D dstT=%s -D dstT0=%s -D convertToWT=%s" - " -D convertToDT=%s -D convertToDT0=%s%s", - ops[op], dim, cn, ddepth, ocl::typeToStr(useOptimized ? ddepth : sdepth), - ocl::typeToStr(ddepth), ocl::typeToStr(ddepth0), - ocl::convertTypeStr(ddepth, wdepth, 1, cvt[0]), - ocl::convertTypeStr(sdepth, ddepth, 1, cvt[0]), - ocl::convertTypeStr(wdepth, ddepth0, 1, cvt[1]), - doubleSupport ? " -D DOUBLE_SUPPORT" : ""); - if (useOptimized) { - cv::String build_opt_pre = format("-D OP_REDUCE_PRE -D BUF_COLS=%d -D %s -D dim=1" - " -D cn=%d -D ddepth=%d -D srcT=%s -D dstT=%s -D convertToDT=%s%s", - buf_cols, ops[op], cn, ddepth, ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), - ocl::convertTypeStr(sdepth, ddepth, 1, cvt[0]), - doubleSupport ? " -D DOUBLE_SUPPORT" : ""); - ocl::Kernel kpre("reduce_horz_pre", ocl::core::reduce2_oclsrc, build_opt_pre); - if (kpre.empty()) + size_t tileHeight = (size_t)(wgs / buf_cols); + if (defDev.isIntel()) + { + static const size_t maxItemInGroupCount = 16; + tileHeight = min(tileHeight, defDev.localMemSize() / buf_cols / CV_ELEM_SIZE(CV_MAKETYPE(wdepth, cn)) / maxItemInGroupCount); + } + char cvt[3][40]; + cv::String build_opt = format("-D OP_REDUCE_PRE -D BUF_COLS=%d -D TILE_HEIGHT=%d -D %s -D dim=1" + " -D cn=%d -D ddepth=%d" + " -D srcT=%s -D bufT=%s -D dstT=%s" + " -D convertToWT=%s -D convertToBufT=%s -D convertToDT=%s%s", + buf_cols, tileHeight, ops[op], cn, ddepth, + ocl::typeToStr(sdepth), + ocl::typeToStr(ddepth), + ocl::typeToStr(ddepth0), + ocl::convertTypeStr(ddepth, wdepth, 1, cvt[0]), + ocl::convertTypeStr(sdepth, ddepth, 1, cvt[1]), + ocl::convertTypeStr(wdepth, ddepth0, 1, cvt[2]), + doubleSupport ? " -D DOUBLE_SUPPORT" : ""); + ocl::Kernel k("reduce_horz_opt", ocl::core::reduce2_oclsrc, build_opt); + if (k.empty()) return false; - - ocl::Kernel kmain("reduce", ocl::core::reduce2_oclsrc, build_opt); - if (kmain.empty()) - return false; - UMat src = _src.getUMat(); Size dsize(1, src.rows); _dst.create(dsize, dtype); UMat dst = _dst.getUMat(); - UMat buf(src.rows, buf_cols, dst.type()); - - kpre.args(ocl::KernelArg::ReadOnly(src), - ocl::KernelArg::WriteOnlyNoSize(buf)); + if (op0 == CV_REDUCE_AVG) + k.args(ocl::KernelArg::ReadOnly(src), + ocl::KernelArg::WriteOnlyNoSize(dst), 1.0f / src.cols); + else + k.args(ocl::KernelArg::ReadOnly(src), + ocl::KernelArg::WriteOnlyNoSize(dst)); + size_t localSize[2] = { buf_cols, tileHeight}; size_t globalSize[2] = { buf_cols, src.rows }; - if (!kpre.run(2, globalSize, NULL, false)) + return k.run(2, globalSize, localSize, false); + } + else + { + char cvt[2][40]; + cv::String build_opt = format("-D %s -D dim=%d -D cn=%d -D ddepth=%d" + " -D srcT=%s -D dstT=%s -D dstT0=%s -D convertToWT=%s" + " -D convertToDT=%s -D convertToDT0=%s%s", + ops[op], dim, cn, ddepth, ocl::typeToStr(useOptimized ? ddepth : sdepth), + ocl::typeToStr(ddepth), ocl::typeToStr(ddepth0), + ocl::convertTypeStr(ddepth, wdepth, 1, cvt[0]), + ocl::convertTypeStr(sdepth, ddepth, 1, cvt[0]), + ocl::convertTypeStr(wdepth, ddepth0, 1, cvt[1]), + doubleSupport ? " -D DOUBLE_SUPPORT" : ""); + + ocl::Kernel k("reduce", ocl::core::reduce2_oclsrc, build_opt); + if (k.empty()) return false; + UMat src = _src.getUMat(); + Size dsize(dim == 0 ? src.cols : 1, dim == 0 ? 1 : src.rows); + _dst.create(dsize, dtype); + UMat dst = _dst.getUMat(); + + ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), + temparg = ocl::KernelArg::WriteOnlyNoSize(dst); + if (op0 == CV_REDUCE_AVG) - kmain.args(ocl::KernelArg::ReadOnly(buf), - ocl::KernelArg::WriteOnlyNoSize(dst), 1.0f / src.cols); + k.args(srcarg, temparg, 1.0f / (dim == 0 ? src.rows : src.cols)); else - kmain.args(ocl::KernelArg::ReadOnly(buf), - ocl::KernelArg::WriteOnlyNoSize(dst)); + k.args(srcarg, temparg); - globalSize[0] = src.rows; - return kmain.run(1, globalSize, NULL, false); + size_t globalsize = std::max(dsize.width, dsize.height); + return k.run(1, &globalsize, NULL, false); } - - ocl::Kernel k("reduce", ocl::core::reduce2_oclsrc, build_opt); - if (k.empty()) - return false; - - UMat src = _src.getUMat(); - Size dsize(dim == 0 ? src.cols : 1, dim == 0 ? 1 : src.rows); - _dst.create(dsize, dtype); - UMat dst = _dst.getUMat(); - - ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src), - temparg = ocl::KernelArg::WriteOnlyNoSize(dst); - - if (op0 == CV_REDUCE_AVG) - k.args(srcarg, temparg, 1.0f / (dim == 0 ? src.rows : src.cols)); - else - k.args(srcarg, temparg); - - size_t globalsize = std::max(dsize.width, dsize.height); - return k.run(1, &globalsize, NULL, false); } } diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp index ed72ffc7f1..32db8c91b4 100644 --- a/modules/core/src/ocl.cpp +++ b/modules/core/src/ocl.cpp @@ -3494,9 +3494,8 @@ public: OpenCLBufferPoolImpl() : currentReservedSize(0), maxReservedSize(0) { - // Note: Buffer pool is disabled by default, - // because we didn't receive significant performance improvement - maxReservedSize = getConfigurationParameterForSize("OPENCV_OPENCL_BUFFERPOOL_LIMIT", 0); + int poolSize = ocl::Device::getDefault().isIntel() ? 1 << 27 : 0; + maxReservedSize = getConfigurationParameterForSize("OPENCV_OPENCL_BUFFERPOOL_LIMIT", poolSize); } virtual ~OpenCLBufferPoolImpl() { @@ -3739,6 +3738,7 @@ public: u->handle = clCreateBuffer(ctx_handle, CL_MEM_COPY_HOST_PTR|CL_MEM_READ_WRITE|createFlags, u->size, u->origdata, &retval); tempUMatFlags = UMatData::TEMP_COPIED_UMAT; + } if(!u->handle || retval != CL_SUCCESS) return false; @@ -3880,6 +3880,7 @@ public: if(u->data && retval == CL_SUCCESS) { u->markHostCopyObsolete(false); + u->markDeviceMemMapped(true); return; } @@ -3908,6 +3909,7 @@ public: if(!u) return; + CV_Assert(u->handle != 0); UMatDataAutoLock autolock(u); @@ -3918,8 +3920,10 @@ public: cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); cl_int retval = 0; - if( !u->copyOnMap() && u->data ) + if( !u->copyOnMap() && u->deviceMemMapped() ) { + CV_Assert(u->data != NULL); + u->markDeviceMemMapped(false); CV_Assert( (retval = clEnqueueUnmapMemObject(q, (cl_mem)u->handle, u->data, 0, 0, 0)) == CL_SUCCESS ); CV_OclDbgAssert(clFinish(q) == CL_SUCCESS); diff --git a/modules/core/src/opencl/lut.cl b/modules/core/src/opencl/lut.cl index 9bcd1b66f2..81ca4349a3 100644 --- a/modules/core/src/opencl/lut.cl +++ b/modules/core/src/opencl/lut.cl @@ -36,114 +36,118 @@ #if lcn == 1 #if dcn == 4 - #define LUT_OP(num)\ - int idx = *(__global const int *)(srcptr + mad24(num, src_step, src_index));\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - dst[0] = lut_l[idx & 0xff];\ - dst[1] = lut_l[(idx >> 8) & 0xff];\ - dst[2] = lut_l[(idx >> 16) & 0xff];\ + #define LUT_OP \ + int idx = *(__global const int *)(srcptr + src_index); \ + dst = (__global dstT *)(dstptr + dst_index); \ + dst[0] = lut_l[idx & 0xff]; \ + dst[1] = lut_l[(idx >> 8) & 0xff]; \ + dst[2] = lut_l[(idx >> 16) & 0xff]; \ dst[3] = lut_l[(idx >> 24) & 0xff]; #elif dcn == 3 - #define LUT_OP(num)\ - uchar3 idx = vload3(0, srcptr + mad24(num, src_step, src_index));\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - dst[0] = lut_l[idx.x];\ - dst[1] = lut_l[idx.y];\ + #define LUT_OP \ + uchar3 idx = vload3(0, srcptr + src_index); \ + dst = (__global dstT *)(dstptr + dst_index); \ + dst[0] = lut_l[idx.x]; \ + dst[1] = lut_l[idx.y]; \ dst[2] = lut_l[idx.z]; #elif dcn == 2 - #define LUT_OP(num)\ - short idx = *(__global const short *)(srcptr + mad24(num, src_step, src_index));\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - dst[0] = lut_l[idx & 0xff];\ + #define LUT_OP \ + short idx = *(__global const short *)(srcptr + src_index); \ + dst = (__global dstT *)(dstptr + dst_index); \ + dst[0] = lut_l[idx & 0xff]; \ dst[1] = lut_l[(idx >> 8) & 0xff]; #elif dcn == 1 - #define LUT_OP(num)\ - uchar idx = (srcptr + mad24(num, src_step, src_index))[0];\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ + #define LUT_OP \ + uchar idx = (srcptr + src_index)[0]; \ + dst = (__global dstT *)(dstptr + dst_index); \ dst[0] = lut_l[idx]; #else - #define LUT_OP(num)\ - __global const srcT * src = (__global const srcT *)(srcptr + mad24(num, src_step, src_index));\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - for (int cn = 0; cn < dcn; ++cn)\ + #define LUT_OP \ + __global const srcT * src = (__global const srcT *)(srcptr + src_index); \ + dst = (__global dstT *)(dstptr + dst_index); \ + for (int cn = 0; cn < dcn; ++cn) \ dst[cn] = lut_l[src[cn]]; #endif #else #if dcn == 4 - #define LUT_OP(num)\ - __global const uchar4 *src_pixel = (__global const uchar4 *)(srcptr + mad24(num, src_step, src_index));\ - int4 idx = convert_int4(src_pixel[0]) * lcn + (int4)(0, 1, 2, 3);\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - dst[0] = lut_l[idx.x];\ - dst[1] = lut_l[idx.y];\ - dst[2] = lut_l[idx.z];\ + #define LUT_OP \ + __global const uchar4 * src_pixel = (__global const uchar4 *)(srcptr + src_index); \ + int4 idx = mad24(convert_int4(src_pixel[0]), (int4)(lcn), (int4)(0, 1, 2, 3)); \ + dst = (__global dstT *)(dstptr + dst_index); \ + dst[0] = lut_l[idx.x]; \ + dst[1] = lut_l[idx.y]; \ + dst[2] = lut_l[idx.z]; \ dst[3] = lut_l[idx.w]; #elif dcn == 3 - #define LUT_OP(num)\ - uchar3 src_pixel = vload3(0, srcptr + mad24(num, src_step, src_index));\ - int3 idx = convert_int3(src_pixel) * lcn + (int3)(0, 1, 2);\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - dst[0] = lut_l[idx.x];\ - dst[1] = lut_l[idx.y];\ + #define LUT_OP \ + uchar3 src_pixel = vload3(0, srcptr + src_index); \ + int3 idx = mad24(convert_int3(src_pixel), (int3)(lcn), (int3)(0, 1, 2)); \ + dst = (__global dstT *)(dstptr + dst_index); \ + dst[0] = lut_l[idx.x]; \ + dst[1] = lut_l[idx.y]; \ dst[2] = lut_l[idx.z]; #elif dcn == 2 - #define LUT_OP(num)\ - __global const uchar2 *src_pixel = (__global const uchar2 *)(srcptr + mad24(num, src_step, src_index));\ - int2 idx = convert_int2(src_pixel[0]) * lcn + (int2)(0, 1);\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - dst[0] = lut_l[idx.x];\ + #define LUT_OP \ + __global const uchar2 * src_pixel = (__global const uchar2 *)(srcptr + src_index); \ + int2 idx = mad24(convert_int2(src_pixel[0]), lcn, (int2)(0, 1)); \ + dst = (__global dstT *)(dstptr + dst_index); \ + dst[0] = lut_l[idx.x]; \ dst[1] = lut_l[idx.y]; #elif dcn == 1 //error case (1 < lcn) ==> lcn == scn == dcn - #define LUT_OP(num)\ - uchar idx = (srcptr + mad24(num, src_step, src_index))[0];\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ + #define LUT_OP \ + uchar idx = (srcptr + src_index)[0]; \ + dst = (__global dstT *)(dstptr + dst_index); \ dst[0] = lut_l[idx]; #else - #define LUT_OP(num)\ - __global const srcT *src = (__global const srcT *)(srcptr + mad24(num, src_step, src_index));\ - dst = (__global dstT *)(dstptr + mad24(num, dst_step, dst_index));\ - for (int cn = 0; cn < dcn; ++cn)\ + #define LUT_OP \ + __global const srcT * src = (__global const srcT *)(srcptr + src_index); \ + dst = (__global dstT *)(dstptr + dst_index); \ + for (int cn = 0; cn < dcn; ++cn) \ dst[cn] = lut_l[mad24(src[cn], lcn, cn)]; #endif #endif -#define LOCAL_LUT_INIT\ - {\ - __global const dstT * lut = (__global const dstT *)(lutptr + lut_offset);\ - int init = mad24((int)get_local_id(1), (int)get_local_size(0), (int)get_local_id(0));\ - int step = get_local_size(0) * get_local_size(1);\ - for (int i = init; i < 256 * lcn; i += step)\ - {\ - lut_l[i] = lut[i];\ - }\ - barrier(CLK_LOCAL_MEM_FENCE);\ - } - __kernel void LUT(__global const uchar * srcptr, int src_step, int src_offset, __global const uchar * lutptr, int lut_step, int lut_offset, __global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols) { - __local dstT lut_l[256 * lcn]; - LOCAL_LUT_INIT; - int x = get_global_id(0); - int y = 4 * get_global_id(1); + int y = get_global_id(1) << 2; + + __local dstT lut_l[256 * lcn]; + __global const dstT * lut = (__global const dstT *)(lutptr + lut_offset); + + for (int i = mad24((int)get_local_id(1), (int)get_local_size(0), (int)get_local_id(0)), + step = get_local_size(0) * get_local_size(1); i < 256 * lcn; i += step) + lut_l[i] = lut[i]; + barrier(CLK_LOCAL_MEM_FENCE); if (x < cols && y < rows) { int src_index = mad24(y, src_step, mad24(x, (int)sizeof(srcT) * dcn, src_offset)); int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(dstT) * dcn, dst_offset)); + __global dstT * dst; - LUT_OP(0); + + LUT_OP; + if (y < rows - 1) { - LUT_OP(1); + src_index += src_step; + dst_index += dst_step; + LUT_OP; + if (y < rows - 2) { - LUT_OP(2); + src_index += src_step; + dst_index += dst_step; + LUT_OP; + if (y < rows - 3) { - LUT_OP(3); + src_index += src_step; + dst_index += dst_step; + LUT_OP; } } } diff --git a/modules/core/src/opencl/minmaxloc.cl b/modules/core/src/opencl/minmaxloc.cl index a51c5d93a3..664673e5a2 100644 --- a/modules/core/src/opencl/minmaxloc.cl +++ b/modules/core/src/opencl/minmaxloc.cl @@ -42,9 +42,13 @@ #if wdepth <= 4 #define MIN_ABS(a) convertFromU(abs(a)) #define MIN_ABS2(a, b) convertFromU(abs_diff(a, b)) +#define MIN(a, b) min(a, b) +#define MAX(a, b) max(a, b) #else #define MIN_ABS(a) fabs(a) #define MIN_ABS2(a, b) fabs(a - b) +#define MIN(a, b) fmin(a, b) +#define MAX(a, b) fmax(a, b) #endif #if kercn != 3 @@ -60,44 +64,41 @@ #define srcTSIZE (int)sizeof(srcT1) #endif -#ifdef NEED_MINLOC -#define CALC_MINLOC(inc) minloc = id + inc -#else -#define CALC_MINLOC(inc) -#endif - -#ifdef NEED_MAXLOC -#define CALC_MAXLOC(inc) maxloc = id + inc -#else -#define CALC_MAXLOC(inc) -#endif - #ifdef NEED_MINVAL +#ifdef NEED_MINLOC #define CALC_MIN(p, inc) \ if (minval > temp.p) \ { \ minval = temp.p; \ - CALC_MINLOC(inc); \ + minloc = id + inc; \ } #else +#define CALC_MIN(p, inc) \ + minval = MIN(minval, temp.p); +#endif +#else #define CALC_MIN(p, inc) #endif #ifdef NEED_MAXVAL +#ifdef NEED_MAXLOC #define CALC_MAX(p, inc) \ if (maxval < temp.p) \ { \ maxval = temp.p; \ - CALC_MAXLOC(inc); \ + maxloc = id + inc; \ } #else +#define CALC_MAX(p, inc) \ + maxval = MAX(maxval, temp.p); +#endif +#else #define CALC_MAX(p, inc) #endif #ifdef OP_CALC2 #define CALC_MAX2(p) \ - if (maxval2 < temp.p) \ - maxval2 = temp.p; + maxval2 = MAX(maxval2, temp.p); #else #define CALC_MAX2(p) #endif @@ -208,25 +209,28 @@ __kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_off #if kercn == 1 #ifdef NEED_MINVAL +#if NEED_MINLOC if (minval > temp) { minval = temp; -#ifdef NEED_MINLOC minloc = id; -#endif } +#else + minval = MIN(minval, temp); +#endif #endif #ifdef NEED_MAXVAL +#ifdef NEED_MAXLOC if (maxval < temp) { maxval = temp; -#ifdef NEED_MAXLOC maxloc = id; -#endif } +#else + maxval = MAX(maxval, temp); +#endif #ifdef OP_CALC2 - if (maxval2 < temp2) - maxval2 = temp2; + maxval2 = MAX(maxval2, temp2); #endif #endif #elif kercn >= 2 @@ -282,32 +286,35 @@ __kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_off { int lid3 = lid - WGS2_ALIGNED; #ifdef NEED_MINVAL +#ifdef NEED_MINLOC if (localmem_min[lid3] >= minval) { -#ifdef NEED_MINLOC if (localmem_min[lid3] == minval) localmem_minloc[lid3] = min(localmem_minloc[lid3], minloc); else localmem_minloc[lid3] = minloc, -#endif - localmem_min[lid3] = minval; + localmem_min[lid3] = minval; } +#else + localmem_min[lid3] = MIN(localmem_min[lid3], minval); +#endif #endif #ifdef NEED_MAXVAL +#ifdef NEED_MAXLOC if (localmem_max[lid3] <= maxval) { -#ifdef NEED_MAXLOC if (localmem_max[lid3] == maxval) localmem_maxloc[lid3] = min(localmem_maxloc[lid3], maxloc); else localmem_maxloc[lid3] = maxloc, -#endif - localmem_max[lid3] = maxval; + localmem_max[lid3] = maxval; } +#else + localmem_max[lid3] = MAX(localmem_max[lid3], maxval); +#endif #endif #ifdef OP_CALC2 - if (localmem_max2[lid3] < maxval2) - localmem_max2[lid3] = maxval2; + localmem_max2[lid3] = MAX(localmem_max2[lid3], maxval2); #endif } barrier(CLK_LOCAL_MEM_FENCE); @@ -319,32 +326,35 @@ __kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_off int lid2 = lsize + lid; #ifdef NEED_MINVAL +#ifdef NEED_MAXLOC if (localmem_min[lid] >= localmem_min[lid2]) { -#ifdef NEED_MINLOC if (localmem_min[lid] == localmem_min[lid2]) localmem_minloc[lid] = min(localmem_minloc[lid2], localmem_minloc[lid]); else localmem_minloc[lid] = localmem_minloc[lid2], -#endif - localmem_min[lid] = localmem_min[lid2]; + localmem_min[lid] = localmem_min[lid2]; } +#else + localmem_min[lid] = MIN(localmem_min[lid], localmem_min[lid2]); +#endif #endif #ifdef NEED_MAXVAL +#ifdef NEED_MAXLOC if (localmem_max[lid] <= localmem_max[lid2]) { -#ifdef NEED_MAXLOC if (localmem_max[lid] == localmem_max[lid2]) localmem_maxloc[lid] = min(localmem_maxloc[lid2], localmem_maxloc[lid]); else localmem_maxloc[lid] = localmem_maxloc[lid2], -#endif - localmem_max[lid] = localmem_max[lid2]; + localmem_max[lid] = localmem_max[lid2]; } +#else + localmem_max[lid] = MAX(localmem_max[lid], localmem_max[lid2]); +#endif #endif #ifdef OP_CALC2 - if (localmem_max2[lid] < localmem_max2[lid2]) - localmem_max2[lid] = localmem_max2[lid2]; + localmem_max2[lid] = MAX(localmem_max2[lid], localmem_max2[lid2]); #endif } barrier(CLK_LOCAL_MEM_FENCE); diff --git a/modules/core/src/opencl/reduce2.cl b/modules/core/src/opencl/reduce2.cl index 7800e7a743..457378cc13 100644 --- a/modules/core/src/opencl/reduce2.cl +++ b/modules/core/src/opencl/reduce2.cl @@ -81,29 +81,34 @@ #define PROCESS_ELEM(acc, value) acc += value #elif defined OCL_CV_REDUCE_MAX #define INIT_VALUE MIN_VAL -#define PROCESS_ELEM(acc, value) acc = value > acc ? value : acc +#define PROCESS_ELEM(acc, value) acc = max(value, acc) #elif defined OCL_CV_REDUCE_MIN #define INIT_VALUE MAX_VAL -#define PROCESS_ELEM(acc, value) acc = value < acc ? value : acc +#define PROCESS_ELEM(acc, value) acc = min(value, acc) #else #error "No operation is specified" #endif #ifdef OP_REDUCE_PRE -__kernel void reduce_horz_pre(__global const uchar * srcptr, int src_step, int src_offset, int rows, int cols, - __global uchar * bufptr, int buf_step, int buf_offset) +__kernel void reduce_horz_opt(__global const uchar * srcptr, int src_step, int src_offset, int rows, int cols, + __global uchar * dstptr, int dst_step, int dst_offset +#ifdef OCL_CV_REDUCE_AVG + , float fscale +#endif + ) { + __local bufT lsmem[TILE_HEIGHT][BUF_COLS][cn]; + int x = get_global_id(0); int y = get_global_id(1); - if (x < BUF_COLS) + int liy = get_local_id(1); + if ((x < BUF_COLS) && (y < rows)) { int src_index = mad24(y, src_step, mad24(x, (int)sizeof(srcT) * cn, src_offset)); - int buf_index = mad24(y, buf_step, mad24(x, (int)sizeof(dstT) * cn, buf_offset)); __global const srcT * src = (__global const srcT *)(srcptr + src_index); - __global dstT * buf = (__global dstT *)(bufptr + buf_index); - dstT tmp[cn] = { INIT_VALUE }; + bufT tmp[cn] = { INIT_VALUE }; int src_step_mul = BUF_COLS * cn; for (int idx = x; idx < cols; idx += BUF_COLS, src += src_step_mul) @@ -111,14 +116,49 @@ __kernel void reduce_horz_pre(__global const uchar * srcptr, int src_step, int s #pragma unroll for (int c = 0; c < cn; ++c) { - dstT value = convertToDT(src[c]); + bufT value = convertToBufT(src[c]); PROCESS_ELEM(tmp[c], value); } } #pragma unroll for (int c = 0; c < cn; ++c) - buf[c] = tmp[c]; + lsmem[liy][x][c] = tmp[c]; + } + barrier(CLK_LOCAL_MEM_FENCE); + if ((x < BUF_COLS / 2) && (y < rows)) + { + #pragma unroll + for (int c = 0; c < cn; ++c) + { + PROCESS_ELEM(lsmem[liy][x][c], lsmem[liy][x + BUF_COLS / 2][c]); + } + } + barrier(CLK_LOCAL_MEM_FENCE); + if ((x == 0) && (y < rows)) + { + int dst_index = mad24(y, dst_step, dst_offset); + + __global dstT * dst = (__global dstT *)(dstptr + dst_index); + bufT tmp[cn] = { INIT_VALUE }; + + #pragma unroll + for (int xin = 0; xin < BUF_COLS / 2; xin ++) + { + #pragma unroll + for (int c = 0; c < cn; ++c) + { + PROCESS_ELEM(tmp[c], lsmem[liy][xin][c]); + } + } + + #pragma unroll + for (int c = 0; c < cn; ++c) +#ifdef OCL_CV_REDUCE_AVG + dst[c] = convertToDT(convertToWT(tmp[c]) * fscale); +#else + dst[c] = convertToDT(tmp[c]); +#endif } } diff --git a/modules/core/src/opencl/set_identity.cl b/modules/core/src/opencl/set_identity.cl index 6b277fe0e4..952204d3ff 100644 --- a/modules/core/src/opencl/set_identity.cl +++ b/modules/core/src/opencl/set_identity.cl @@ -43,20 +43,18 @@ // //M*/ -#if cn != 3 -#define loadpix(addr) *(__global const T *)(addr) +#if kercn != 3 #define storepix(val, addr) *(__global T *)(addr) = val #define TSIZE (int)sizeof(T) #define scalar scalar_ #else -#define loadpix(addr) vload3(0, (__global const T1 *)(addr)) #define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr)) #define TSIZE ((int)sizeof(T1)*3) #define scalar (T)(scalar_.x, scalar_.y, scalar_.z) #endif __kernel void setIdentity(__global uchar * srcptr, int src_step, int src_offset, int rows, int cols, - ST scalar_, int rowsPerWI) + ST scalar_) { int x = get_global_id(0); int y0 = get_global_id(1) * rowsPerWI; @@ -65,7 +63,35 @@ __kernel void setIdentity(__global uchar * srcptr, int src_step, int src_offset, { int src_index = mad24(y0, src_step, mad24(x, TSIZE, src_offset)); - for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src_index += src_step) - storepix(x == y ? scalar : (T)(0), srcptr + src_index); +#if kercn == cn + #pragma unroll + for (int y = y0, i = 0, y1 = min(rows, y0 + rowsPerWI); i < rowsPerWI; ++y, ++i, src_index += src_step) + if (y < y1) + storepix(x == y ? scalar : (T)(0), srcptr + src_index); +#elif kercn == 4 && cn == 1 + if (y0 < rows) + { + storepix(x == y0 >> 2 ? (T)(scalar, 0, 0, 0) : (T)(0), srcptr + src_index); + if (++y0 < rows) + { + src_index += src_step; + storepix(x == y0 >> 2 ? (T)(0, scalar, 0, 0) : (T)(0), srcptr + src_index); + + if (++y0 < rows) + { + src_index += src_step; + storepix(x == y0 >> 2 ? (T)(0, 0, scalar, 0) : (T)(0), srcptr + src_index); + + if (++y0 < rows) + { + src_index += src_step; + storepix(x == y0 >> 2 ? (T)(0, 0, 0, scalar) : (T)(0), srcptr + src_index); + } + } + } + } +#else +#error "Incorrect combination of cn && kercn" +#endif } } diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index 3dd042860d..48d85900d9 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -918,8 +918,14 @@ static bool ocl_meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0, isContinuous = _src.isContinuous(); - int groups = ocl::Device::getDefault().maxComputeUnits(); - size_t wgs = ocl::Device::getDefault().maxWorkGroupSize(); + const ocl::Device &defDev = ocl::Device::getDefault(); + int groups = defDev.maxComputeUnits(); + if (defDev.isIntel()) + { + static const int subSliceEUCount = 10; + groups = (groups / subSliceEUCount) * 2; + } + size_t wgs = defDev.maxWorkGroupSize(); int ddepth = std::max(CV_32S, depth), sqddepth = std::max(CV_32F, depth), dtype = CV_MAKE_TYPE(ddepth, cn), diff --git a/modules/cudacodec/CMakeLists.txt b/modules/cudacodec/CMakeLists.txt index ace7cb3763..ca62995505 100644 --- a/modules/cudacodec/CMakeLists.txt +++ b/modules/cudacodec/CMakeLists.txt @@ -6,7 +6,7 @@ set(the_description "CUDA-accelerated Video Encoding/Decoding") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef) -ocv_add_module(cudacodec opencv_highgui OPTIONAL opencv_cudev) +ocv_add_module(cudacodec OPTIONAL opencv_cudev) ocv_module_include_directories() ocv_glob_module_sources() diff --git a/modules/cudev/test/CMakeLists.txt b/modules/cudev/test/CMakeLists.txt index 438e0a64c0..363970e4b7 100644 --- a/modules/cudev/test/CMakeLists.txt +++ b/modules/cudev/test/CMakeLists.txt @@ -1,4 +1,4 @@ -set(test_deps opencv_cudev opencv_core opencv_imgproc opencv_highgui opencv_ts ${OPENCV_MODULE_opencv_ts_DEPS}) +set(test_deps opencv_cudev opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui opencv_ts ${OPENCV_MODULE_opencv_ts_DEPS}) ocv_check_dependencies(${test_deps}) diff --git a/modules/features2d/perf/perf_precomp.hpp b/modules/features2d/perf/perf_precomp.hpp index 30607daaf6..f5280a84e8 100644 --- a/modules/features2d/perf/perf_precomp.hpp +++ b/modules/features2d/perf/perf_precomp.hpp @@ -10,7 +10,7 @@ #define __OPENCV_PERF_PRECOMP_HPP__ #include "opencv2/ts.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/features2d.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY diff --git a/modules/features2d/test/test_descriptors_regression.cpp b/modules/features2d/test/test_descriptors_regression.cpp index 7119d590ef..281df244a1 100644 --- a/modules/features2d/test/test_descriptors_regression.cpp +++ b/modules/features2d/test/test_descriptors_regression.cpp @@ -40,7 +40,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace std; using namespace cv; diff --git a/modules/features2d/test/test_precomp.hpp b/modules/features2d/test/test_precomp.hpp index 49bc1dfd18..bce72f7296 100644 --- a/modules/features2d/test/test_precomp.hpp +++ b/modules/features2d/test/test_precomp.hpp @@ -12,7 +12,7 @@ #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/features2d.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include #endif diff --git a/modules/flann/include/opencv2/flann/dist.h b/modules/flann/include/opencv2/flann/dist.h index 6a03b48a12..5d941e67fc 100644 --- a/modules/flann/include/opencv2/flann/dist.h +++ b/modules/flann/include/opencv2/flann/dist.h @@ -595,7 +595,7 @@ struct HellingerDistance typedef typename Accumulator::Type ResultType; /** - * Compute the histogram intersection distance + * Compute the Hellinger distance */ template ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const @@ -628,7 +628,8 @@ struct HellingerDistance template inline ResultType accum_dist(const U& a, const V& b, int) const { - return sqrt(static_cast(a)) - sqrt(static_cast(b)); + ResultType diff = sqrt(static_cast(a)) - sqrt(static_cast(b)); + return diff * diff; } }; @@ -729,9 +730,11 @@ struct KL_Divergence inline ResultType accum_dist(const U& a, const V& b, int) const { ResultType result = ResultType(); - ResultType ratio = (ResultType)(a / b); - if (ratio>0) { - result = a * log(ratio); + if( *b != 0 ) { + ResultType ratio = (ResultType)(a / b); + if (ratio>0) { + result = a * log(ratio); + } } return result; } diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index f4a19cffe6..78d6bfb094 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -1,5 +1,5 @@ set(the_description "High-level GUI and Media I/O") -ocv_add_module(highgui opencv_imgproc OPTIONAL opencv_androidcamera) +ocv_add_module(highgui opencv_imgproc opencv_imgcodecs opencv_videoio OPTIONAL opencv_androidcamera) # ---------------------------------------------------------------------------- # CMake file for highgui. See root CMakeLists.txt @@ -7,70 +7,20 @@ ocv_add_module(highgui opencv_imgproc OPTIONAL opencv_androidcamera) # Jose Luis Blanco, 2008 # ---------------------------------------------------------------------------- -ocv_clear_vars(GRFMT_LIBS) - if(HAVE_WINRT_CX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW") endif() -if(HAVE_PNG OR HAVE_TIFF OR HAVE_OPENEXR) +if(APPLE) ocv_include_directories(${ZLIB_INCLUDE_DIRS}) - list(APPEND GRFMT_LIBS ${ZLIB_LIBRARIES}) + list(APPEND HIGHGUI_LIBRARIES ${ZLIB_LIBRARIES}) endif() -if(HAVE_JPEG) - ocv_include_directories(${JPEG_INCLUDE_DIR}) - list(APPEND GRFMT_LIBS ${JPEG_LIBRARIES}) -endif() - -if(WITH_WEBP) - add_definitions(-DHAVE_WEBP) - ocv_include_directories(${WEBP_INCLUDE_DIR}) - list(APPEND GRFMT_LIBS ${WEBP_LIBRARIES}) -endif() - -if(HAVE_PNG) - add_definitions(${PNG_DEFINITIONS}) - ocv_include_directories(${PNG_INCLUDE_DIR}) - list(APPEND GRFMT_LIBS ${PNG_LIBRARIES}) -endif() - -if(HAVE_TIFF) - ocv_include_directories(${TIFF_INCLUDE_DIR}) - list(APPEND GRFMT_LIBS ${TIFF_LIBRARIES}) -endif() - -if(HAVE_JASPER) - ocv_include_directories(${JASPER_INCLUDE_DIR}) - list(APPEND GRFMT_LIBS ${JASPER_LIBRARIES}) -endif() - -if(HAVE_OPENEXR) - include_directories(SYSTEM ${OPENEXR_INCLUDE_PATHS}) - list(APPEND GRFMT_LIBS ${OPENEXR_LIBRARIES}) -endif() - -file(GLOB grfmt_hdrs src/grfmt*.hpp) -file(GLOB grfmt_srcs src/grfmt*.cpp) -list(APPEND grfmt_hdrs src/bitstrm.hpp) -list(APPEND grfmt_srcs src/bitstrm.cpp) -list(APPEND grfmt_hdrs src/rgbe.hpp) -list(APPEND grfmt_srcs src/rgbe.cpp) - -source_group("Src\\grfmts" FILES ${grfmt_hdrs} ${grfmt_srcs}) - set(highgui_hdrs src/precomp.hpp - src/utils.hpp - src/cap_ffmpeg_impl.hpp ) set(highgui_srcs - src/cap.cpp - src/cap_images.cpp - src/cap_ffmpeg.cpp - src/loadsave.cpp - src/utils.cpp src/window.cpp ) @@ -122,128 +72,6 @@ elseif(HAVE_COCOA) list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa") endif() -if(WIN32 AND NOT ARM) - list(APPEND highgui_srcs src/cap_cmu.cpp) -endif() - -if (WIN32 AND HAVE_DSHOW) - list(APPEND highgui_srcs src/cap_dshow.cpp) -endif() - -if (WIN32 AND HAVE_MSMF) - list(APPEND highgui_srcs src/cap_msmf.cpp) -endif() - -if (WIN32 AND HAVE_VFW) - list(APPEND highgui_srcs src/cap_vfw.cpp) -endif() - -if(HAVE_XINE) - list(APPEND highgui_srcs src/cap_xine.cpp) -endif(HAVE_XINE) - -if(HAVE_DC1394_2) - list(APPEND highgui_srcs src/cap_dc1394_v2.cpp) -endif(HAVE_DC1394_2) - -if(HAVE_DC1394) - list(APPEND highgui_srcs src/cap_dc1394.cpp) -endif(HAVE_DC1394) - -if(HAVE_GSTREAMER) - list(APPEND highgui_srcs src/cap_gstreamer.cpp) -endif(HAVE_GSTREAMER) - -if(HAVE_UNICAP) - list(APPEND highgui_srcs src/cap_unicap.cpp) -endif(HAVE_UNICAP) - -if(HAVE_LIBV4L) - list(APPEND highgui_srcs src/cap_libv4l.cpp) -elseif(HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO) - list(APPEND highgui_srcs src/cap_v4l.cpp) -endif() - -if(HAVE_OPENNI) - list(APPEND highgui_srcs src/cap_openni.cpp) - ocv_include_directories(${OPENNI_INCLUDE_DIR}) - list(APPEND HIGHGUI_LIBRARIES ${OPENNI_LIBRARY}) -endif(HAVE_OPENNI) - -if(HAVE_opencv_androidcamera) - list(APPEND highgui_srcs src/cap_android.cpp) - add_definitions(-DHAVE_ANDROID_NATIVE_CAMERA)#TODO: remove this line -endif(HAVE_opencv_androidcamera) - -if(HAVE_XIMEA) - list(APPEND highgui_srcs src/cap_ximea.cpp) - ocv_include_directories(${XIMEA_PATH}) - if(XIMEA_LIBRARY_DIR) - link_directories("${XIMEA_LIBRARY_DIR}") - endif() - if(X86_64) - list(APPEND HIGHGUI_LIBRARIES m3apiX64) - else() - list(APPEND HIGHGUI_LIBRARIES m3api) - endif() -endif(HAVE_XIMEA) - -if(HAVE_FFMPEG) - if(UNIX AND BZIP2_LIBRARIES) - list(APPEND HIGHGUI_LIBRARIES ${BZIP2_LIBRARIES}) - endif() - if(APPLE) - list(APPEND HIGHGUI_LIBRARIES "-framework VideoDecodeAcceleration" bz2) - endif() -endif(HAVE_FFMPEG) - -if(HAVE_PVAPI) - add_definitions(-DHAVE_PVAPI) - add_definitions(${PVAPI_DEFINITIONS}) - ocv_include_directories(${PVAPI_INCLUDE_PATH}) - set(highgui_srcs src/cap_pvapi.cpp ${highgui_srcs}) - list(APPEND HIGHGUI_LIBRARIES ${PVAPI_LIBRARY}) -endif() - -if(HAVE_GIGE_API) - add_definitions(-DHAVE_GIGE_API) - ocv_include_directories(${GIGEAPI_INCLUDE_PATH}) - set(highgui_srcs src/cap_giganetix.cpp ${highgui_srcs}) - list(APPEND HIGHGUI_LIBRARIES ${GIGEAPI_LIBRARIES}) - list(APPEND highgui_srcs src/cap_giganetix.cpp) -endif(HAVE_GIGE_API) - -if(HAVE_AVFOUNDATION) - list(APPEND highgui_srcs src/cap_avfoundation.mm) - list(APPEND HIGHGUI_LIBRARIES "-framework AVFoundation" "-framework QuartzCore") -endif() - -if(HAVE_QUICKTIME) - list(APPEND highgui_srcs src/cap_qt.cpp) - list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore") -elseif(HAVE_QTKIT) - list(APPEND highgui_srcs src/cap_qtkit.mm) - list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit") -endif() - -if(HAVE_INTELPERC) - list(APPEND highgui_srcs src/cap_intelperc.cpp) - ocv_include_directories(${INTELPERC_INCLUDE_DIR}) - list(APPEND HIGHGUI_LIBRARIES ${INTELPERC_LIBRARIES}) -endif(HAVE_INTELPERC) - -if(IOS) - add_definitions(-DHAVE_IOS=1) - list(APPEND highgui_srcs src/ios_conversions.mm src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm) - list(APPEND HIGHGUI_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework AssetsLibrary") -endif() - -if(WIN32) - link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only - include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW - include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests -endif() - if(UNIX) #these variables are set by CHECK_MODULE macro foreach(P ${HIGHGUI_INCLUDE_DIRS}) @@ -257,10 +85,10 @@ endif() source_group("Src" FILES ${highgui_srcs} ${highgui_hdrs}) source_group("Include" FILES ${highgui_ext_hdrs}) -ocv_set_module_sources(HEADERS ${highgui_ext_hdrs} SOURCES ${highgui_srcs} ${highgui_hdrs} ${grfmt_srcs} ${grfmt_hdrs}) +ocv_set_module_sources(HEADERS ${highgui_ext_hdrs} SOURCES ${highgui_srcs} ${highgui_hdrs}) ocv_module_include_directories() -ocv_create_module(${GRFMT_LIBS} ${HIGHGUI_LIBRARIES}) +ocv_create_module(${HIGHGUI_LIBRARIES}) if(APPLE) ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS) @@ -294,33 +122,5 @@ set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "") ocv_add_precompiled_headers(${the_module}) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations) -if(WIN32 AND WITH_FFMPEG) - #copy ffmpeg dll to the output folder - if(MSVC64 OR MINGW64) - set(FFMPEG_SUFFIX _64) - endif() - - set(ffmpeg_bare_name "opencv_ffmpeg${FFMPEG_SUFFIX}.dll") - set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll") - set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}") - - if(MSVC_IDE) - add_custom_command(TARGET ${the_module} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}" - COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name_ver}" - COMMENT "Copying ${ffmpeg_path} to the output directory") - elseif(MSVC AND (CMAKE_GENERATOR MATCHES "Visual")) - add_custom_command(TARGET ${the_module} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name_ver}" - COMMENT "Copying ${ffmpeg_path} to the output directory") - else() - add_custom_command(TARGET ${the_module} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}" - COMMENT "Copying ${ffmpeg_path} to the output directory") - endif() - - install(FILES "${ffmpeg_path}" DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs RENAME "${ffmpeg_bare_name_ver}") -endif() - ocv_add_accuracy_tests() ocv_add_perf_tests() diff --git a/modules/highgui/doc/highgui.rst b/modules/highgui/doc/highgui.rst index 02beddbafb..b42ac7a8ef 100644 --- a/modules/highgui/doc/highgui.rst +++ b/modules/highgui/doc/highgui.rst @@ -9,12 +9,9 @@ It provides easy interface to: * Create and manipulate windows that can display images and "remember" their content (no need to handle repaint events from OS). * Add trackbars to the windows, handle simple mouse events as well as keyboard commands. -* Read and write images to/from disk or memory. -* Read video from camera or file and write video to a file. .. toctree:: :maxdepth: 2 user_interface - reading_and_writing_images_and_video qt_new_functions diff --git a/modules/highgui/include/opencv2/highgui.hpp b/modules/highgui/include/opencv2/highgui.hpp index f05825f784..aef9105f74 100644 --- a/modules/highgui/include/opencv2/highgui.hpp +++ b/modules/highgui/include/opencv2/highgui.hpp @@ -44,6 +44,8 @@ #define __OPENCV_HIGHGUI_HPP__ #include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" ///////////////////////// graphical user interface ////////////////////////// @@ -201,392 +203,4 @@ CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change, bool initial_button_state = false); } // cv - - - -//////////////////////////////// image codec //////////////////////////////// -namespace cv -{ - -enum { IMREAD_UNCHANGED = -1, // 8bit, color or not - IMREAD_GRAYSCALE = 0, // 8bit, gray - IMREAD_COLOR = 1, // ?, color - IMREAD_ANYDEPTH = 2, // any depth, ? - IMREAD_ANYCOLOR = 4 // ?, any color - }; - -enum { IMWRITE_JPEG_QUALITY = 1, - IMWRITE_JPEG_PROGRESSIVE = 2, - IMWRITE_JPEG_OPTIMIZE = 3, - IMWRITE_PNG_COMPRESSION = 16, - IMWRITE_PNG_STRATEGY = 17, - IMWRITE_PNG_BILEVEL = 18, - IMWRITE_PXM_BINARY = 32, - IMWRITE_WEBP_QUALITY = 64 - }; - -enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0, - IMWRITE_PNG_STRATEGY_FILTERED = 1, - IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, - IMWRITE_PNG_STRATEGY_RLE = 3, - IMWRITE_PNG_STRATEGY_FIXED = 4 - }; - -CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR ); - -CV_EXPORTS_W bool imwrite( const String& filename, InputArray img, - const std::vector& params = std::vector()); - -CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); - -CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst); - -CV_EXPORTS_W bool imencode( const String& ext, InputArray img, - CV_OUT std::vector& buf, - const std::vector& params = std::vector()); - -} // cv - - - -////////////////////////////////// video io ///////////////////////////////// - -typedef struct CvCapture CvCapture; -typedef struct CvVideoWriter CvVideoWriter; - -namespace cv -{ - -// Camera API -enum { CAP_ANY = 0, // autodetect - CAP_VFW = 200, // platform native - CAP_V4L = 200, - CAP_V4L2 = CAP_V4L, - CAP_FIREWARE = 300, // IEEE 1394 drivers - CAP_FIREWIRE = CAP_FIREWARE, - CAP_IEEE1394 = CAP_FIREWARE, - CAP_DC1394 = CAP_FIREWARE, - CAP_CMU1394 = CAP_FIREWARE, - CAP_QT = 500, // QuickTime - CAP_UNICAP = 600, // Unicap drivers - CAP_DSHOW = 700, // DirectShow (via videoInput) - CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK - CAP_OPENNI = 900, // OpenNI (for Kinect) - CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion) - CAP_ANDROID = 1000, // Android - CAP_XIAPI = 1100, // XIMEA Camera API - CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) - CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK - CAP_MSMF = 1400, // Microsoft Media Foundation (via videoInput) - CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK - }; - -// generic properties (based on DC1394 properties) -enum { CAP_PROP_POS_MSEC =0, - CAP_PROP_POS_FRAMES =1, - CAP_PROP_POS_AVI_RATIO =2, - CAP_PROP_FRAME_WIDTH =3, - CAP_PROP_FRAME_HEIGHT =4, - CAP_PROP_FPS =5, - CAP_PROP_FOURCC =6, - CAP_PROP_FRAME_COUNT =7, - CAP_PROP_FORMAT =8, - CAP_PROP_MODE =9, - CAP_PROP_BRIGHTNESS =10, - CAP_PROP_CONTRAST =11, - CAP_PROP_SATURATION =12, - CAP_PROP_HUE =13, - CAP_PROP_GAIN =14, - CAP_PROP_EXPOSURE =15, - CAP_PROP_CONVERT_RGB =16, - CAP_PROP_WHITE_BALANCE_BLUE_U =17, - CAP_PROP_RECTIFICATION =18, - CAP_PROP_MONOCROME =19, - CAP_PROP_SHARPNESS =20, - CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature - CAP_PROP_GAMMA =22, - CAP_PROP_TEMPERATURE =23, - CAP_PROP_TRIGGER =24, - CAP_PROP_TRIGGER_DELAY =25, - CAP_PROP_WHITE_BALANCE_RED_V =26, - CAP_PROP_ZOOM =27, - CAP_PROP_FOCUS =28, - CAP_PROP_GUID =29, - CAP_PROP_ISO_SPEED =30, - CAP_PROP_BACKLIGHT =32, - CAP_PROP_PAN =33, - CAP_PROP_TILT =34, - CAP_PROP_ROLL =35, - CAP_PROP_IRIS =36, - CAP_PROP_SETTINGS =37 - }; - - -// DC1394 only -// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) -// every feature can have only one mode turned on at a time -enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) - CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user - CAP_PROP_DC1394_MODE_AUTO = -2, - CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, - CAP_PROP_DC1394_MAX = 31 - }; - - -// OpenNI map generators -enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, - CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, - CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR - }; - -// Properties of cameras available through OpenNI interfaces -enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100, - CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm - CAP_PROP_OPENNI_BASELINE = 102, // in mm - CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels - CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map - // by changing depth generator's view point (if the flag is "on") or - // sets this view point to its normal one (if the flag is "off"). - CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION, - CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, - CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, - CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, - CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, - CAP_PROP_OPENNI_GENERATOR_PRESENT = 109 - }; - -// OpenNI shortcats -enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT, - CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE, - CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE, - CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH, - CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION, - CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION - }; - -// OpenNI data given from depth generator -enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) - CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) - CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) - CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) - CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 - - // Data given from RGB image generator - CAP_OPENNI_BGR_IMAGE = 5, - CAP_OPENNI_GRAY_IMAGE = 6 - }; - -// Supported output modes of OpenNI image generator -enum { CAP_OPENNI_VGA_30HZ = 0, - CAP_OPENNI_SXGA_15HZ = 1, - CAP_OPENNI_SXGA_30HZ = 2, - CAP_OPENNI_QVGA_30HZ = 3, - CAP_OPENNI_QVGA_60HZ = 4 - }; - - -// GStreamer -enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1 - }; - - -// PVAPI -enum { CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast - CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301 // FrameStartTriggerMode: Determines how a frame is initiated - }; - -// PVAPI: FrameStartTriggerMode -enum { CAP_PVAPI_FSTRIGMODE_FREERUN = 0, // Freerun - CAP_PVAPI_FSTRIGMODE_SYNCIN1 = 1, // SyncIn1 - CAP_PVAPI_FSTRIGMODE_SYNCIN2 = 2, // SyncIn2 - CAP_PVAPI_FSTRIGMODE_FIXEDRATE = 3, // FixedRate - CAP_PVAPI_FSTRIGMODE_SOFTWARE = 4 // Software - }; - -// Properties of cameras available through XIMEA SDK interface -enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. - CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. - CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). - CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). - CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. - CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. - CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input - CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode - CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level - CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output - CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode - CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED - CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality - CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) - CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance - CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain - CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). - CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure - CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure - CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) - CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds - }; - - -// Properties for Android cameras -enum { CAP_PROP_ANDROID_AUTOGRAB = 1024, - CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed - CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed - CAP_PROP_ANDROID_FLASH_MODE = 8001, - CAP_PROP_ANDROID_FOCUS_MODE = 8002, - CAP_PROP_ANDROID_WHITE_BALANCE = 8003, - CAP_PROP_ANDROID_ANTIBANDING = 8004, - CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, - CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, - CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, - CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008 - }; - - -// Android camera output formats -enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR - CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR, - CAP_ANDROID_GREY_FRAME = 1, //Y - CAP_ANDROID_COLOR_FRAME_RGB = 2, - CAP_ANDROID_COLOR_FRAME_BGRA = 3, - CAP_ANDROID_COLOR_FRAME_RGBA = 4 - }; - - -// Android camera flash modes -enum { CAP_ANDROID_FLASH_MODE_AUTO = 0, - CAP_ANDROID_FLASH_MODE_OFF = 1, - CAP_ANDROID_FLASH_MODE_ON = 2, - CAP_ANDROID_FLASH_MODE_RED_EYE = 3, - CAP_ANDROID_FLASH_MODE_TORCH = 4 - }; - - -// Android camera focus modes -enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0, - CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1, - CAP_ANDROID_FOCUS_MODE_EDOF = 2, - CAP_ANDROID_FOCUS_MODE_FIXED = 3, - CAP_ANDROID_FOCUS_MODE_INFINITY = 4, - CAP_ANDROID_FOCUS_MODE_MACRO = 5 - }; - - -// Android camera white balance modes -enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0, - CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1, - CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2, - CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3, - CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4, - CAP_ANDROID_WHITE_BALANCE_SHADE = 5, - CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6, - CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7 - }; - - -// Android camera antibanding modes -enum { CAP_ANDROID_ANTIBANDING_50HZ = 0, - CAP_ANDROID_ANTIBANDING_60HZ = 1, - CAP_ANDROID_ANTIBANDING_AUTO = 2, - CAP_ANDROID_ANTIBANDING_OFF = 3 - }; - - -// Properties of cameras available through AVFOUNDATION interface -enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001, - CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, - CAP_PROP_IOS_DEVICE_FLASH = 9003, - CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, - CAP_PROP_IOS_DEVICE_TORCH = 9005 - }; - - -// Properties of cameras available through Smartek Giganetix Ethernet Vision interface -/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ -enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, - CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, - CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, - CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, - CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, - CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006 - }; - -enum { CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, - CAP_PROP_INTELPERC_PROFILE_IDX = 11002, - CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, - CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, - CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, - CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, - CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007 - }; - -// Intel PerC streams -enum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, - CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, - CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR - }; - -enum { CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. - CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. - CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. - CAP_INTELPERC_IMAGE = 3 - }; - - -class IVideoCapture; -class CV_EXPORTS_W VideoCapture -{ -public: - CV_WRAP VideoCapture(); - CV_WRAP VideoCapture(const String& filename); - CV_WRAP VideoCapture(int device); - - virtual ~VideoCapture(); - CV_WRAP virtual bool open(const String& filename); - CV_WRAP virtual bool open(int device); - CV_WRAP virtual bool isOpened() const; - CV_WRAP virtual void release(); - - CV_WRAP virtual bool grab(); - CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0); - virtual VideoCapture& operator >> (CV_OUT Mat& image); - virtual VideoCapture& operator >> (CV_OUT UMat& image); - CV_WRAP virtual bool read(OutputArray image); - - CV_WRAP virtual bool set(int propId, double value); - CV_WRAP virtual double get(int propId); - -protected: - Ptr cap; - Ptr icap; -private: - static Ptr createCameraCapture(int index); -}; - -class CV_EXPORTS_W VideoWriter -{ -public: - CV_WRAP VideoWriter(); - CV_WRAP VideoWriter(const String& filename, int fourcc, double fps, - Size frameSize, bool isColor = true); - - virtual ~VideoWriter(); - CV_WRAP virtual bool open(const String& filename, int fourcc, double fps, - Size frameSize, bool isColor = true); - CV_WRAP virtual bool isOpened() const; - CV_WRAP virtual void release(); - virtual VideoWriter& operator << (const Mat& image); - CV_WRAP virtual void write(const Mat& image); - - CV_WRAP static int fourcc(char c1, char c2, char c3, char c4); - -protected: - Ptr writer; -}; - -template<> CV_EXPORTS void DefaultDeleter::operator ()(CvCapture* obj) const; -template<> CV_EXPORTS void DefaultDeleter::operator ()(CvVideoWriter* obj) const; - -} // cv - #endif diff --git a/modules/highgui/include/opencv2/highgui/highgui_c.h b/modules/highgui/include/opencv2/highgui/highgui_c.h index 130302150f..b6b56ceb02 100644 --- a/modules/highgui/include/opencv2/highgui/highgui_c.h +++ b/modules/highgui/include/opencv2/highgui/highgui_c.h @@ -43,6 +43,8 @@ #define __OPENCV_HIGHGUI_H__ #include "opencv2/core/core_c.h" +#include "opencv2/imgcodecs/imgcodecs_c.h" +#include "opencv2/videoio/videoio_c.h" #ifdef __cplusplus extern "C" { @@ -194,67 +196,6 @@ typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, vo CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param CV_DEFAULT(NULL)); -enum -{ -/* 8bit, color or not */ - CV_LOAD_IMAGE_UNCHANGED =-1, -/* 8bit, gray */ - CV_LOAD_IMAGE_GRAYSCALE =0, -/* ?, color */ - CV_LOAD_IMAGE_COLOR =1, -/* any depth, ? */ - CV_LOAD_IMAGE_ANYDEPTH =2, -/* ?, any color */ - CV_LOAD_IMAGE_ANYCOLOR =4 -}; - -/* load image from file - iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED - overrides the other flags - using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED - unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit -*/ -CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); -CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); - -enum -{ - CV_IMWRITE_JPEG_QUALITY =1, - CV_IMWRITE_JPEG_PROGRESSIVE =2, - CV_IMWRITE_JPEG_OPTIMIZE =3, - CV_IMWRITE_PNG_COMPRESSION =16, - CV_IMWRITE_PNG_STRATEGY =17, - CV_IMWRITE_PNG_BILEVEL =18, - CV_IMWRITE_PNG_STRATEGY_DEFAULT =0, - CV_IMWRITE_PNG_STRATEGY_FILTERED =1, - CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, - CV_IMWRITE_PNG_STRATEGY_RLE =3, - CV_IMWRITE_PNG_STRATEGY_FIXED =4, - CV_IMWRITE_PXM_BINARY =32, - CV_IMWRITE_WEBP_QUALITY =64 -}; - -/* save image to file */ -CVAPI(int) cvSaveImage( const char* filename, const CvArr* image, - const int* params CV_DEFAULT(0) ); - -/* decode image stored in the buffer */ -CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); -CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); - -/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */ -CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image, - const int* params CV_DEFAULT(0) ); - -enum -{ - CV_CVTIMG_FLIP =1, - CV_CVTIMG_SWAP_RB =2 -}; - -/* utility function: convert one image to another with optional vertical flip */ -CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0)); - /* wait for key event infinitely (delay<=0) or for "delay" milliseconds */ CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0)); @@ -268,363 +209,10 @@ CVAPI(void) cvUpdateWindow(const char* window_name); /****************************************************************************************\ -* Working with Video Files and Cameras * -\****************************************************************************************/ -/* "black box" capture structure */ -typedef struct CvCapture CvCapture; - -/* start capturing frames from video file */ -CVAPI(CvCapture*) cvCreateFileCapture( const char* filename ); - -enum -{ - CV_CAP_ANY =0, // autodetect - - CV_CAP_MIL =100, // MIL proprietary drivers - - CV_CAP_VFW =200, // platform native - CV_CAP_V4L =200, - CV_CAP_V4L2 =200, - - CV_CAP_FIREWARE =300, // IEEE 1394 drivers - CV_CAP_FIREWIRE =300, - CV_CAP_IEEE1394 =300, - CV_CAP_DC1394 =300, - CV_CAP_CMU1394 =300, - - CV_CAP_STEREO =400, // TYZX proprietary drivers - CV_CAP_TYZX =400, - CV_TYZX_LEFT =400, - CV_TYZX_RIGHT =401, - CV_TYZX_COLOR =402, - CV_TYZX_Z =403, - - CV_CAP_QT =500, // QuickTime - - CV_CAP_UNICAP =600, // Unicap drivers - - CV_CAP_DSHOW =700, // DirectShow (via videoInput) - CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput) - - CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK - - CV_CAP_OPENNI =900, // OpenNI (for Kinect) - CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion) - - CV_CAP_ANDROID =1000, // Android - CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera - CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera - - CV_CAP_XIAPI =1100, // XIMEA Camera API - - CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) - - CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK - - CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK -}; - -/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */ -CVAPI(CvCapture*) cvCreateCameraCapture( int index ); - -/* grab a frame, return 1 on success, 0 on fail. - this function is thought to be fast */ -CVAPI(int) cvGrabFrame( CvCapture* capture ); - -/* get the frame grabbed with cvGrabFrame(..) - This function may apply some frame processing like - frame decompression, flipping etc. - !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ -CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) ); - -/* Just a combination of cvGrabFrame and cvRetrieveFrame - !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ -CVAPI(IplImage*) cvQueryFrame( CvCapture* capture ); - -/* stop capturing/reading and free resources */ -CVAPI(void) cvReleaseCapture( CvCapture** capture ); - -enum -{ - // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) - // every feature can have only one mode turned on at a time - CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) - CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user - CV_CAP_PROP_DC1394_MODE_AUTO = -2, - CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, - CV_CAP_PROP_POS_MSEC =0, - CV_CAP_PROP_POS_FRAMES =1, - CV_CAP_PROP_POS_AVI_RATIO =2, - CV_CAP_PROP_FRAME_WIDTH =3, - CV_CAP_PROP_FRAME_HEIGHT =4, - CV_CAP_PROP_FPS =5, - CV_CAP_PROP_FOURCC =6, - CV_CAP_PROP_FRAME_COUNT =7, - CV_CAP_PROP_FORMAT =8, - CV_CAP_PROP_MODE =9, - CV_CAP_PROP_BRIGHTNESS =10, - CV_CAP_PROP_CONTRAST =11, - CV_CAP_PROP_SATURATION =12, - CV_CAP_PROP_HUE =13, - CV_CAP_PROP_GAIN =14, - CV_CAP_PROP_EXPOSURE =15, - CV_CAP_PROP_CONVERT_RGB =16, - CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17, - CV_CAP_PROP_RECTIFICATION =18, - CV_CAP_PROP_MONOCROME =19, - CV_CAP_PROP_SHARPNESS =20, - CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera, - // user can adjust refernce level - // using this feature - CV_CAP_PROP_GAMMA =22, - CV_CAP_PROP_TEMPERATURE =23, - CV_CAP_PROP_TRIGGER =24, - CV_CAP_PROP_TRIGGER_DELAY =25, - CV_CAP_PROP_WHITE_BALANCE_RED_V =26, - CV_CAP_PROP_ZOOM =27, - CV_CAP_PROP_FOCUS =28, - CV_CAP_PROP_GUID =29, - CV_CAP_PROP_ISO_SPEED =30, - CV_CAP_PROP_MAX_DC1394 =31, - CV_CAP_PROP_BACKLIGHT =32, - CV_CAP_PROP_PAN =33, - CV_CAP_PROP_TILT =34, - CV_CAP_PROP_ROLL =35, - CV_CAP_PROP_IRIS =36, - CV_CAP_PROP_SETTINGS =37, - - CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only - CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed - CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed - - // OpenNI map generators - CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, - CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, - CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR, - - // Properties of cameras available through OpenNI interfaces - CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100, - CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm - CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm - CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels - CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag - CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map - // by changing depth generator's view point (if the flag is "on") or - // sets this view point to its normal one (if the flag is "off"). - CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, - CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, - CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, - CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, - - CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, - - CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, - CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE, - CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE, - CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH, - CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION, - CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, - - // Properties of cameras available through GStreamer interface - CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1 - - // PVAPI - CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast - CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated - - // Properties of cameras available through XIMEA SDK interface - CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. - CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. - CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). - CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). - CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. - CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. - CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input - CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode - CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level - CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output - CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode - CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED - CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality - CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) - CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance - CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain - CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). - CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure - CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure - CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) - CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds - - // Properties for Android cameras - CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, - CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, - CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, - CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, - CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, - CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, - CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, - CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, - CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009, - CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010, - - // Properties of cameras available through AVFOUNDATION interface - CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, - CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, - CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, - CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, - CV_CAP_PROP_IOS_DEVICE_TORCH = 9005, - - // Properties of cameras available through Smartek Giganetix Ethernet Vision interface - /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ - CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, - CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, - CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, - CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, - CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, - CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006, - - CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, - CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002, - CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, - CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, - CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, - CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, - CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007, - - // Intel PerC streams - CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, - CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, - CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR -}; - -enum -{ - // Data given from depth generator. - CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) - CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) - CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) - CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) - CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 - - // Data given from RGB image generator. - CV_CAP_OPENNI_BGR_IMAGE = 5, - CV_CAP_OPENNI_GRAY_IMAGE = 6 -}; - -// Supported output modes of OpenNI image generator -enum -{ - CV_CAP_OPENNI_VGA_30HZ = 0, - CV_CAP_OPENNI_SXGA_15HZ = 1, - CV_CAP_OPENNI_SXGA_30HZ = 2, - CV_CAP_OPENNI_QVGA_30HZ = 3, - CV_CAP_OPENNI_QVGA_60HZ = 4 -}; - -//supported by Android camera output formats -enum -{ - CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR - CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR, - CV_CAP_ANDROID_GREY_FRAME = 1, //Y - CV_CAP_ANDROID_COLOR_FRAME_RGB = 2, - CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3, - CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4 -}; - -// supported Android camera flash modes -enum -{ - CV_CAP_ANDROID_FLASH_MODE_AUTO = 0, - CV_CAP_ANDROID_FLASH_MODE_OFF, - CV_CAP_ANDROID_FLASH_MODE_ON, - CV_CAP_ANDROID_FLASH_MODE_RED_EYE, - CV_CAP_ANDROID_FLASH_MODE_TORCH -}; - -// supported Android camera focus modes -enum -{ - CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0, - CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE, - CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO, - CV_CAP_ANDROID_FOCUS_MODE_EDOF, - CV_CAP_ANDROID_FOCUS_MODE_FIXED, - CV_CAP_ANDROID_FOCUS_MODE_INFINITY, - CV_CAP_ANDROID_FOCUS_MODE_MACRO -}; - -// supported Android camera white balance modes -enum -{ - CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0, - CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT, - CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT, - CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT, - CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT, - CV_CAP_ANDROID_WHITE_BALANCE_SHADE, - CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT, - CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT -}; - -// supported Android camera antibanding modes -enum -{ - CV_CAP_ANDROID_ANTIBANDING_50HZ = 0, - CV_CAP_ANDROID_ANTIBANDING_60HZ, - CV_CAP_ANDROID_ANTIBANDING_AUTO, - CV_CAP_ANDROID_ANTIBANDING_OFF -}; - -enum -{ - CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. - CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. - CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. - CV_CAP_INTELPERC_IMAGE = 3 -}; - -/* retrieve or set capture properties */ -CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id ); -CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value ); - -// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY -CVAPI(int) cvGetCaptureDomain( CvCapture* capture); - -/* "black box" video file writer structure */ -typedef struct CvVideoWriter CvVideoWriter; - -#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24)) - -CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4) -{ - return CV_FOURCC_MACRO(c1, c2, c3, c4); -} - -#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */ -#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */ - -/* initialize video file writer */ -CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc, - double fps, CvSize frame_size, - int is_color CV_DEFAULT(1)); - -/* write frame to video file */ -CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); - -/* close video file writer */ -CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer ); - -/****************************************************************************************\ * Obsolete functions/synonyms * \****************************************************************************************/ -#define cvCaptureFromFile cvCreateFileCapture -#define cvCaptureFromCAM cvCreateCameraCapture -#define cvCaptureFromAVI cvCaptureFromFile -#define cvCreateAVIWriter cvCreateVideoWriter -#define cvWriteToAVI cvWriteFrame #define cvAddSearchPath(path) #define cvvInitSystem cvInitSystem #define cvvNamedWindow cvNamedWindow @@ -632,12 +220,9 @@ CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer ); #define cvvResizeWindow cvResizeWindow #define cvvDestroyWindow cvDestroyWindow #define cvvCreateTrackbar cvCreateTrackbar -#define cvvLoadImage(name) cvLoadImage((name),1) -#define cvvSaveImage cvSaveImage #define cvvAddSearchPath cvAddSearchPath #define cvvWaitKey(name) cvWaitKey(0) #define cvvWaitKeyEx(name,delay) cvWaitKey(delay) -#define cvvConvertImage cvConvertImage #define HG_AUTOSIZE CV_WINDOW_AUTOSIZE #define set_preprocess_func cvSetPreprocessFuncWin32 #define set_postprocess_func cvSetPostprocessFuncWin32 diff --git a/modules/highgui/src/precomp.hpp b/modules/highgui/src/precomp.hpp index bb4ed8ede5..c9517783f9 100644 --- a/modules/highgui/src/precomp.hpp +++ b/modules/highgui/src/precomp.hpp @@ -47,7 +47,10 @@ #include "opencv2/core/utility.hpp" #include "opencv2/core/private.hpp" +#include "opencv2/imgcodecs.hpp" + #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/imgcodecs/imgcodecs_c.h" #include "opencv2/highgui/highgui_c.h" #include @@ -92,90 +95,6 @@ #define CV_WINDOW_MAGIC_VAL 0x00420042 #define CV_TRACKBAR_MAGIC_VAL 0x00420043 -/***************************** CvCapture structure ******************************/ - -struct CvCapture -{ - virtual ~CvCapture() {} - virtual double getProperty(int) { return 0; } - virtual bool setProperty(int, double) { return 0; } - virtual bool grabFrame() { return true; } - virtual IplImage* retrieveFrame(int) { return 0; } - virtual int getCaptureDomain() { return CV_CAP_ANY; } // Return the type of the capture object: CV_CAP_VFW, etc... -}; - -/*************************** CvVideoWriter structure ****************************/ - -struct CvVideoWriter -{ - virtual ~CvVideoWriter() {} - virtual bool writeFrame(const IplImage*) { return false; } -}; - -CvCapture * cvCreateCameraCapture_V4L( int index ); -CvCapture * cvCreateCameraCapture_DC1394( int index ); -CvCapture * cvCreateCameraCapture_DC1394_2( int index ); -CvCapture* cvCreateCameraCapture_MIL( int index ); -CvCapture* cvCreateCameraCapture_Giganetix( int index ); -CvCapture * cvCreateCameraCapture_CMU( int index ); -CV_IMPL CvCapture * cvCreateCameraCapture_TYZX( int index ); -CvCapture* cvCreateFileCapture_Win32( const char* filename ); -CvCapture* cvCreateCameraCapture_VFW( int index ); -CvCapture* cvCreateFileCapture_VFW( const char* filename ); -CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc, - double fps, CvSize frameSize, int is_color ); -CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc, - double fps, CvSize frameSize, int is_color ); -CvCapture* cvCreateCameraCapture_DShow( int index ); -CvCapture* cvCreateCameraCapture_MSMF( int index ); -CvCapture* cvCreateFileCapture_MSMF (const char* filename); -CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc, - double fps, CvSize frameSize, int is_color ); -CvCapture* cvCreateCameraCapture_OpenNI( int index ); -CvCapture* cvCreateFileCapture_OpenNI( const char* filename ); -CvCapture* cvCreateCameraCapture_Android( int index ); -CvCapture* cvCreateCameraCapture_XIMEA( int index ); -CvCapture* cvCreateCameraCapture_AVFoundation(int index); - -CVAPI(int) cvHaveImageReader(const char* filename); -CVAPI(int) cvHaveImageWriter(const char* filename); - -CvCapture* cvCreateFileCapture_Images(const char* filename); -CvVideoWriter* cvCreateVideoWriter_Images(const char* filename); - -CvCapture* cvCreateFileCapture_XINE (const char* filename); - - - - -#define CV_CAP_GSTREAMER_1394 0 -#define CV_CAP_GSTREAMER_V4L 1 -#define CV_CAP_GSTREAMER_V4L2 2 -#define CV_CAP_GSTREAMER_FILE 3 - -CvCapture* cvCreateCapture_GStreamer(int type, const char *filename); -CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char* filename); - - -CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourcc, - double fps, CvSize frameSize, int is_color ); - -CvCapture * cvCreateFileCapture_QT (const char * filename); -CvCapture * cvCreateCameraCapture_QT (const int index); - -CvVideoWriter* cvCreateVideoWriter_QT ( const char* filename, int fourcc, - double fps, CvSize frameSize, int is_color ); - -CvCapture* cvCreateFileCapture_AVFoundation (const char * filename); -CvVideoWriter* cvCreateVideoWriter_AVFoundation( const char* filename, int fourcc, - double fps, CvSize frameSize, int is_color ); - - -CvCapture * cvCreateCameraCapture_Unicap (const int index); -CvCapture * cvCreateCameraCapture_PvAPI (const int index); -CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc, - double fps, CvSize frameSize, int is_color ); - //Yannick Verdie 2010 void cvSetModeWindow_W32(const char* name, double prop_value); void cvSetModeWindow_GTK(const char* name, double prop_value); @@ -196,20 +115,6 @@ double cvGetRatioWindow_GTK(const char* name); double cvGetOpenGlProp_W32(const char* name); double cvGetOpenGlProp_GTK(const char* name); -namespace cv -{ - class IVideoCapture - { - public: - virtual ~IVideoCapture() {} - virtual double getProperty(int) { return 0; } - virtual bool setProperty(int, double) { return 0; } - virtual bool grabFrame() = 0; - virtual bool retrieveFrame(int, cv::OutputArray) = 0; - virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_VFW, etc... - }; -}; - //for QT #if defined (HAVE_QT) double cvGetModeWindow_QT(const char* name); diff --git a/modules/highgui/test/test_precomp.hpp b/modules/highgui/test/test_precomp.hpp index 826d165749..e4d7797158 100644 --- a/modules/highgui/test/test_precomp.hpp +++ b/modules/highgui/test/test_precomp.hpp @@ -11,80 +11,11 @@ #include #include "opencv2/ts.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" -#include "opencv2/imgproc/imgproc_c.h" +//#include "opencv2/imgproc.hpp" +//#include "opencv2/imgcodecs.hpp" +//#include "opencv2/highgui.hpp" +//#include "opencv2/imgproc/imgproc_c.h" -#include "opencv2/core/private.hpp" - -#if defined(HAVE_DSHOW) || \ - defined(HAVE_TYZX) || \ - defined(HAVE_VFW) || \ - defined(HAVE_LIBV4L) || \ - (defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \ - defined(HAVE_GSTREAMER) || \ - defined(HAVE_DC1394_2) || \ - defined(HAVE_DC1394) || \ - defined(HAVE_CMU1394) || \ - defined(HAVE_MIL) || \ - defined(HAVE_QUICKTIME) || \ - defined(HAVE_QTKIT) || \ - defined(HAVE_UNICAP) || \ - defined(HAVE_PVAPI) || \ - defined(HAVE_OPENNI) || \ - defined(HAVE_XIMEA) || \ - defined(HAVE_AVFOUNDATION) || \ - defined(HAVE_GIGE_API) || \ - defined(HAVE_INTELPERC) || \ - (0) - //defined(HAVE_ANDROID_NATIVE_CAMERA) || - enable after #1193 -# define BUILD_WITH_CAMERA_SUPPORT 1 -#else -# define BUILD_WITH_CAMERA_SUPPORT 0 -#endif - -#if defined(HAVE_XINE) || \ - defined(HAVE_GSTREAMER) || \ - defined(HAVE_QUICKTIME) || \ - defined(HAVE_QTKIT) || \ - defined(HAVE_AVFOUNDATION) || \ - /*defined(HAVE_OPENNI) || too specialized */ \ - defined(HAVE_FFMPEG) || \ - defined(HAVE_MSMF) -# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1 -#else -# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 -#endif - -#if /*defined(HAVE_XINE) || */\ - defined(HAVE_GSTREAMER) || \ - defined(HAVE_QUICKTIME) || \ - defined(HAVE_QTKIT) || \ - defined(HAVE_AVFOUNDATION) || \ - defined(HAVE_FFMPEG) || \ - defined(HAVE_MSMF) -# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1 -#else -# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0 -#endif - -namespace cvtest -{ - -string fourccToString(int fourcc); - -struct VideoFormat -{ - VideoFormat() { fourcc = -1; } - VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {} - bool empty() const { return ext.empty(); } - - string ext; - int fourcc; -}; - -extern const VideoFormat g_specific_fmt_list[]; - -} +//#include "opencv2/core/private.hpp" #endif diff --git a/modules/imgcodecs/CMakeLists.txt b/modules/imgcodecs/CMakeLists.txt new file mode 100644 index 0000000000..8cf60e5469 --- /dev/null +++ b/modules/imgcodecs/CMakeLists.txt @@ -0,0 +1,131 @@ +set(the_description "Image codecs") +ocv_add_module(imgcodecs opencv_imgproc) + +# ---------------------------------------------------------------------------- +# CMake file for imgcodecs. See root CMakeLists.txt +# Some parts taken from version of Hartmut Seichter, HIT Lab NZ. +# Jose Luis Blanco, 2008 +# ---------------------------------------------------------------------------- + +ocv_clear_vars(GRFMT_LIBS) + +if(HAVE_WINRT_CX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW") +endif() + +if(HAVE_PNG OR HAVE_TIFF OR HAVE_OPENEXR) + ocv_include_directories(${ZLIB_INCLUDE_DIRS}) + list(APPEND GRFMT_LIBS ${ZLIB_LIBRARIES}) +endif() + +if(HAVE_JPEG) + ocv_include_directories(${JPEG_INCLUDE_DIR}) + list(APPEND GRFMT_LIBS ${JPEG_LIBRARIES}) +endif() + +if(WITH_WEBP) + add_definitions(-DHAVE_WEBP) + ocv_include_directories(${WEBP_INCLUDE_DIR}) + list(APPEND GRFMT_LIBS ${WEBP_LIBRARIES}) +endif() + +if(HAVE_PNG) + add_definitions(${PNG_DEFINITIONS}) + ocv_include_directories(${PNG_INCLUDE_DIR}) + list(APPEND GRFMT_LIBS ${PNG_LIBRARIES}) +endif() + +if(HAVE_TIFF) + ocv_include_directories(${TIFF_INCLUDE_DIR}) + list(APPEND GRFMT_LIBS ${TIFF_LIBRARIES}) +endif() + +if(HAVE_JASPER) + ocv_include_directories(${JASPER_INCLUDE_DIR}) + list(APPEND GRFMT_LIBS ${JASPER_LIBRARIES}) +endif() + +if(HAVE_OPENEXR) + include_directories(SYSTEM ${OPENEXR_INCLUDE_PATHS}) + list(APPEND GRFMT_LIBS ${OPENEXR_LIBRARIES}) +endif() + +file(GLOB grfmt_hdrs src/grfmt*.hpp) +file(GLOB grfmt_srcs src/grfmt*.cpp) +list(APPEND grfmt_hdrs src/bitstrm.hpp) +list(APPEND grfmt_srcs src/bitstrm.cpp) +list(APPEND grfmt_hdrs src/rgbe.hpp) +list(APPEND grfmt_srcs src/rgbe.cpp) + +source_group("Src\\grfmts" FILES ${grfmt_hdrs} ${grfmt_srcs}) + +set(imgcodecs_hdrs + src/precomp.hpp + src/utils.hpp + ) + +set(imgcodecs_srcs + src/loadsave.cpp + src/utils.cpp + ) + +file(GLOB imgcodecs_ext_hdrs "include/opencv2/*.hpp" "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h") + +if(IOS) + add_definitions(-DHAVE_IOS=1) + list(APPEND imgcodecs_srcs src/ios_conversions.mm) + list(APPEND IMGCODECS_LIBRARIES "-framework Accelerate" "-framework CoreGraphics" "-framework CoreImage" "-framework QuartzCore" "-framework AssetsLibrary") +endif() + +if(UNIX) + #these variables are set by CHECK_MODULE macro + foreach(P ${IMGCODECS_INCLUDE_DIRS}) + ocv_include_directories(${P}) + endforeach() + + foreach(P ${IMGCODECS_LIBRARY_DIRS}) + link_directories(${P}) + endforeach() +endif() + +source_group("Src" FILES ${imgcodecs_srcs} ${imgcodecs_hdrs}) +source_group("Include" FILES ${imgcodecs_ext_hdrs}) +ocv_set_module_sources(HEADERS ${imgcodecs_ext_hdrs} SOURCES ${imgcodecs_srcs} ${imgcodecs_hdrs} ${grfmt_srcs} ${grfmt_hdrs}) +ocv_module_include_directories() + +ocv_create_module(${GRFMT_LIBS} ${IMGCODECS_LIBRARIES}) + +if(APPLE) + ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS) + if(HAVE_OBJC_EXCEPTIONS) + foreach(source ${OPENCV_MODULE_${the_module}_SOURCES}) + if("${source}" MATCHES "\\.mm$") + get_source_file_property(flags "${source}" COMPILE_FLAGS) + if(flags) + set(flags "${_flags} -fobjc-exceptions") + else() + set(flags "-fobjc-exceptions") + endif() + + set_source_files_properties("${source}" PROPERTIES COMPILE_FLAGS "${flags}") + endif() + endforeach() + endif() +endif() + +if(BUILD_SHARED_LIBS) + add_definitions(-DIMGCODECS_EXPORTS) +endif() + +if(MSVC) + set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /NODEFAULTLIB:libcmt.lib /DEBUG") +endif() + +#stop automatic dependencies propagation for this module +set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "") + +ocv_add_precompiled_headers(${the_module}) +ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations) + +ocv_add_accuracy_tests() +ocv_add_perf_tests() diff --git a/modules/imgcodecs/doc/imgcodecs.rst b/modules/imgcodecs/doc/imgcodecs.rst new file mode 100644 index 0000000000..bcb3131f56 --- /dev/null +++ b/modules/imgcodecs/doc/imgcodecs.rst @@ -0,0 +1,10 @@ +***************************************** +imgcodecs. Image file reading and writing +***************************************** + +This module of the OpenCV help you read and write images to/from disk or memory. + +.. toctree:: + :maxdepth: 2 + + reading_and_writing_images diff --git a/modules/imgcodecs/doc/reading_and_writing_images.rst b/modules/imgcodecs/doc/reading_and_writing_images.rst new file mode 100644 index 0000000000..9f8dcaafef --- /dev/null +++ b/modules/imgcodecs/doc/reading_and_writing_images.rst @@ -0,0 +1,187 @@ +Reading and Writing Images +========================== + +.. highlight:: cpp + +imdecode +-------- +Reads an image from a buffer in memory. + +.. ocv:function:: Mat imdecode( InputArray buf, int flags ) + +.. ocv:function:: Mat imdecode( InputArray buf, int flags, Mat* dst ) + +.. ocv:cfunction:: IplImage* cvDecodeImage( const CvMat* buf, int iscolor=CV_LOAD_IMAGE_COLOR) + +.. ocv:cfunction:: CvMat* cvDecodeImageM( const CvMat* buf, int iscolor=CV_LOAD_IMAGE_COLOR) + +.. ocv:pyfunction:: cv2.imdecode(buf, flags) -> retval + + :param buf: Input array or vector of bytes. + + :param flags: The same flags as in :ocv:func:`imread` . + + :param dst: The optional output placeholder for the decoded matrix. It can save the image reallocations when the function is called repeatedly for images of the same size. + +The function reads an image from the specified buffer in the memory. +If the buffer is too short or contains invalid data, the empty matrix/image is returned. + +See +:ocv:func:`imread` for the list of supported formats and flags description. + +.. note:: In the case of color images, the decoded images will have the channels stored in ``B G R`` order. + +imencode +-------- +Encodes an image into a memory buffer. + +.. ocv:function:: bool imencode( const String& ext, InputArray img, vector& buf, const vector& params=vector()) + +.. ocv:cfunction:: CvMat* cvEncodeImage( const char* ext, const CvArr* image, const int* params=0 ) + +.. ocv:pyfunction:: cv2.imencode(ext, img[, params]) -> retval, buf + + :param ext: File extension that defines the output format. + + :param img: Image to be written. + + :param buf: Output buffer resized to fit the compressed image. + + :param params: Format-specific parameters. See :ocv:func:`imwrite` . + +The function compresses the image and stores it in the memory buffer that is resized to fit the result. +See +:ocv:func:`imwrite` for the list of supported formats and flags description. + +.. note:: ``cvEncodeImage`` returns single-row matrix of type ``CV_8UC1`` that contains encoded image as array of bytes. + +imread +------ +Loads an image from a file. + +.. ocv:function:: Mat imread( const String& filename, int flags=IMREAD_COLOR ) + +.. ocv:pyfunction:: cv2.imread(filename[, flags]) -> retval + +.. ocv:cfunction:: IplImage* cvLoadImage( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR ) + +.. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR ) + + :param filename: Name of file to be loaded. + + :param flags: Flags specifying the color type of a loaded image: + + * CV_LOAD_IMAGE_ANYDEPTH - If set, return 16-bit/32-bit image when the input has the corresponding depth, otherwise convert it to 8-bit. + + * CV_LOAD_IMAGE_COLOR - If set, always convert image to the color one + + * CV_LOAD_IMAGE_GRAYSCALE - If set, always convert image to the grayscale one + + * **>0** Return a 3-channel color image. + .. note:: In the current implementation the alpha channel, if any, is stripped from the output image. Use negative value if you need the alpha channel. + + * **=0** Return a grayscale image. + + * **<0** Return the loaded image as is (with alpha channel). + +The function ``imread`` loads an image from the specified file and returns it. If the image cannot be read (because of missing file, improper permissions, unsupported or invalid format), the function returns an empty matrix ( ``Mat::data==NULL`` ). Currently, the following file formats are supported: + + * Windows bitmaps - ``*.bmp, *.dib`` (always supported) + + * JPEG files - ``*.jpeg, *.jpg, *.jpe`` (see the *Notes* section) + + * JPEG 2000 files - ``*.jp2`` (see the *Notes* section) + + * Portable Network Graphics - ``*.png`` (see the *Notes* section) + + * WebP - ``*.webp`` (see the *Notes* section) + + * Portable image format - ``*.pbm, *.pgm, *.ppm`` (always supported) + + * Sun rasters - ``*.sr, *.ras`` (always supported) + + * TIFF files - ``*.tiff, *.tif`` (see the *Notes* section) + +.. note:: + + * The function determines the type of an image by the content, not by the file extension. + + * On Microsoft Windows* OS and MacOSX*, the codecs shipped with an OpenCV image (libjpeg, libpng, libtiff, and libjasper) are used by default. So, OpenCV can always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also an option to use native MacOSX image readers. But beware that currently these native image loaders give images with different pixel values because of the color management embedded into MacOSX. + + * On Linux*, BSD flavors and other Unix-like open-source operating systems, OpenCV looks for codecs supplied with an OS image. Install the relevant packages (do not forget the development files, for example, "libjpeg-dev", in Debian* and Ubuntu*) to get the codec support or turn on the ``OPENCV_BUILD_3RDPARTY_LIBS`` flag in CMake. + +.. note:: In the case of color images, the decoded images will have the channels stored in ``B G R`` order. + +imwrite +----------- +Saves an image to a specified file. + +.. ocv:function:: bool imwrite( const String& filename, InputArray img, const vector& params=vector() ) + +.. ocv:pyfunction:: cv2.imwrite(filename, img[, params]) -> retval + +.. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image, const int* params=0 ) + + :param filename: Name of the file. + + :param image: Image to be saved. + + :param params: Format-specific save parameters encoded as pairs ``paramId_1, paramValue_1, paramId_2, paramValue_2, ...`` . The following parameters are currently supported: + + * For JPEG, it can be a quality ( ``CV_IMWRITE_JPEG_QUALITY`` ) from 0 to 100 (the higher is the better). Default value is 95. + + * For WEBP, it can be a quality ( CV_IMWRITE_WEBP_QUALITY ) from 1 to 100 (the higher is the better). + By default (without any parameter) and for quality above 100 the lossless compression is used. + + * For PNG, it can be the compression level ( ``CV_IMWRITE_PNG_COMPRESSION`` ) from 0 to 9. A higher value means a smaller size and longer compression time. Default value is 3. + + * For PPM, PGM, or PBM, it can be a binary format flag ( ``CV_IMWRITE_PXM_BINARY`` ), 0 or 1. Default value is 1. + +The function ``imwrite`` saves the image to the specified file. The image format is chosen based on the ``filename`` extension (see +:ocv:func:`imread` for the list of extensions). Only 8-bit (or 16-bit unsigned (``CV_16U``) in case of PNG, JPEG 2000, and TIFF) single-channel or 3-channel (with 'BGR' channel order) images can be saved using this function. If the format, depth or channel order is different, use +:ocv:func:`Mat::convertTo` , and +:ocv:func:`cvtColor` to convert it before saving. Or, use the universal :ocv:class:`FileStorage` I/O functions to save the image to XML or YAML format. + +It is possible to store PNG images with an alpha channel using this function. To do this, create 8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535. The sample below shows how to create such a BGRA image and store to PNG file. It also demonstrates how to set custom compression parameters :: + + #include + #include + #include + + using namespace cv; + using namespace std; + + void createAlphaMat(Mat &mat) + { + for (int i = 0; i < mat.rows; ++i) { + for (int j = 0; j < mat.cols; ++j) { + Vec4b& rgba = mat.at(i, j); + rgba[0] = UCHAR_MAX; + rgba[1] = saturate_cast((float (mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX); + rgba[2] = saturate_cast((float (mat.rows - i)) / ((float)mat.rows) * UCHAR_MAX); + rgba[3] = saturate_cast(0.5 * (rgba[1] + rgba[2])); + } + } + } + + int main(int argv, char **argc) + { + // Create mat with alpha channel + Mat mat(480, 640, CV_8UC4); + createAlphaMat(mat); + + vector compression_params; + compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + compression_params.push_back(9); + + try { + imwrite("alpha.png", mat, compression_params); + } + catch (runtime_error& ex) { + fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what()); + return 1; + } + + fprintf(stdout, "Saved PNG file with alpha data.\n"); + return 0; + } diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp new file mode 100644 index 0000000000..81f8a45f65 --- /dev/null +++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp @@ -0,0 +1,91 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGCODECS_HPP__ +#define __OPENCV_IMGCODECS_HPP__ + +#include "opencv2/core.hpp" + +//////////////////////////////// image codec //////////////////////////////// +namespace cv +{ + +enum { IMREAD_UNCHANGED = -1, // 8bit, color or not + IMREAD_GRAYSCALE = 0, // 8bit, gray + IMREAD_COLOR = 1, // ?, color + IMREAD_ANYDEPTH = 2, // any depth, ? + IMREAD_ANYCOLOR = 4 // ?, any color + }; + +enum { IMWRITE_JPEG_QUALITY = 1, + IMWRITE_JPEG_PROGRESSIVE = 2, + IMWRITE_JPEG_OPTIMIZE = 3, + IMWRITE_PNG_COMPRESSION = 16, + IMWRITE_PNG_STRATEGY = 17, + IMWRITE_PNG_BILEVEL = 18, + IMWRITE_PXM_BINARY = 32, + IMWRITE_WEBP_QUALITY = 64 + }; + +enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0, + IMWRITE_PNG_STRATEGY_FILTERED = 1, + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, + IMWRITE_PNG_STRATEGY_RLE = 3, + IMWRITE_PNG_STRATEGY_FIXED = 4 + }; + +CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR ); + +CV_EXPORTS_W bool imwrite( const String& filename, InputArray img, + const std::vector& params = std::vector()); + +CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); + +CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst); + +CV_EXPORTS_W bool imencode( const String& ext, InputArray img, + CV_OUT std::vector& buf, + const std::vector& params = std::vector()); + +} // cv + +#endif //__OPENCV_IMGCODECS_HPP__ diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs.hpp new file mode 100644 index 0000000000..a3cd232645 --- /dev/null +++ b/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/imgcodecs.hpp" diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs_c.h b/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs_c.h new file mode 100644 index 0000000000..f0c2ae13fe --- /dev/null +++ b/modules/imgcodecs/include/opencv2/imgcodecs/imgcodecs_c.h @@ -0,0 +1,129 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGCODECS_H__ +#define __OPENCV_IMGCODECS_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +enum +{ +/* 8bit, color or not */ + CV_LOAD_IMAGE_UNCHANGED =-1, +/* 8bit, gray */ + CV_LOAD_IMAGE_GRAYSCALE =0, +/* ?, color */ + CV_LOAD_IMAGE_COLOR =1, +/* any depth, ? */ + CV_LOAD_IMAGE_ANYDEPTH =2, +/* ?, any color */ + CV_LOAD_IMAGE_ANYCOLOR =4 +}; + +/* load image from file + iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED + overrides the other flags + using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED + unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit +*/ +CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +enum +{ + CV_IMWRITE_JPEG_QUALITY =1, + CV_IMWRITE_JPEG_PROGRESSIVE =2, + CV_IMWRITE_JPEG_OPTIMIZE =3, + CV_IMWRITE_PNG_COMPRESSION =16, + CV_IMWRITE_PNG_STRATEGY =17, + CV_IMWRITE_PNG_BILEVEL =18, + CV_IMWRITE_PNG_STRATEGY_DEFAULT =0, + CV_IMWRITE_PNG_STRATEGY_FILTERED =1, + CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + CV_IMWRITE_PNG_STRATEGY_RLE =3, + CV_IMWRITE_PNG_STRATEGY_FIXED =4, + CV_IMWRITE_PXM_BINARY =32, + CV_IMWRITE_WEBP_QUALITY =64 +}; + +/* save image to file */ +CVAPI(int) cvSaveImage( const char* filename, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +/* decode image stored in the buffer */ +CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */ +CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +enum +{ + CV_CVTIMG_FLIP =1, + CV_CVTIMG_SWAP_RB =2 +}; + +/* utility function: convert one image to another with optional vertical flip */ +CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0)); + +CVAPI(int) cvHaveImageReader(const char* filename); +CVAPI(int) cvHaveImageWriter(const char* filename); + + +/****************************************************************************************\ +* Obsolete functions/synonyms * +\****************************************************************************************/ + +#define cvvLoadImage(name) cvLoadImage((name),1) +#define cvvSaveImage cvSaveImage +#define cvvConvertImage cvConvertImage + + +#ifdef __cplusplus +} +#endif + +#endif // __OPENCV_IMGCODECS_H__ diff --git a/modules/highgui/include/opencv2/highgui/ios.h b/modules/imgcodecs/include/opencv2/imgcodecs/ios.h similarity index 94% rename from modules/highgui/include/opencv2/highgui/ios.h rename to modules/imgcodecs/include/opencv2/imgcodecs/ios.h index a7f0395d71..8ec1356053 100644 --- a/modules/highgui/include/opencv2/highgui/ios.h +++ b/modules/imgcodecs/include/opencv2/imgcodecs/ios.h @@ -41,8 +41,11 @@ // //M*/ +#import +#import +#import +#import #include "opencv2/core/core.hpp" -#import "opencv2/highgui/cap_ios.h" UIImage* MatToUIImage(const cv::Mat& image); void UIImageToMat(const UIImage* image, diff --git a/modules/imgcodecs/perf/perf_main.cpp b/modules/imgcodecs/perf/perf_main.cpp new file mode 100644 index 0000000000..403402112d --- /dev/null +++ b/modules/imgcodecs/perf/perf_main.cpp @@ -0,0 +1,3 @@ +#include "perf_precomp.hpp" + +CV_PERF_TEST_MAIN(imgcodecs) diff --git a/modules/imgcodecs/perf/perf_precomp.hpp b/modules/imgcodecs/perf/perf_precomp.hpp new file mode 100644 index 0000000000..e6e34b40c6 --- /dev/null +++ b/modules/imgcodecs/perf/perf_precomp.hpp @@ -0,0 +1,19 @@ +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wmissing-declarations" +# if defined __clang__ || defined __APPLE__ +# pragma GCC diagnostic ignored "-Wmissing-prototypes" +# pragma GCC diagnostic ignored "-Wextra" +# endif +#endif + +#ifndef __OPENCV_PERF_PRECOMP_HPP__ +#define __OPENCV_PERF_PRECOMP_HPP__ + +#include "opencv2/ts.hpp" +#include "opencv2/imgcodecs.hpp" + +#ifdef GTEST_CREATE_SHARED_LIBRARY +#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined +#endif + +#endif diff --git a/modules/highgui/src/bitstrm.cpp b/modules/imgcodecs/src/bitstrm.cpp similarity index 100% rename from modules/highgui/src/bitstrm.cpp rename to modules/imgcodecs/src/bitstrm.cpp diff --git a/modules/highgui/src/bitstrm.hpp b/modules/imgcodecs/src/bitstrm.hpp similarity index 100% rename from modules/highgui/src/bitstrm.hpp rename to modules/imgcodecs/src/bitstrm.hpp diff --git a/modules/highgui/src/grfmt_base.cpp b/modules/imgcodecs/src/grfmt_base.cpp similarity index 100% rename from modules/highgui/src/grfmt_base.cpp rename to modules/imgcodecs/src/grfmt_base.cpp diff --git a/modules/highgui/src/grfmt_base.hpp b/modules/imgcodecs/src/grfmt_base.hpp similarity index 100% rename from modules/highgui/src/grfmt_base.hpp rename to modules/imgcodecs/src/grfmt_base.hpp diff --git a/modules/highgui/src/grfmt_bmp.cpp b/modules/imgcodecs/src/grfmt_bmp.cpp similarity index 100% rename from modules/highgui/src/grfmt_bmp.cpp rename to modules/imgcodecs/src/grfmt_bmp.cpp diff --git a/modules/highgui/src/grfmt_bmp.hpp b/modules/imgcodecs/src/grfmt_bmp.hpp similarity index 100% rename from modules/highgui/src/grfmt_bmp.hpp rename to modules/imgcodecs/src/grfmt_bmp.hpp diff --git a/modules/highgui/src/grfmt_exr.cpp b/modules/imgcodecs/src/grfmt_exr.cpp similarity index 100% rename from modules/highgui/src/grfmt_exr.cpp rename to modules/imgcodecs/src/grfmt_exr.cpp diff --git a/modules/highgui/src/grfmt_exr.hpp b/modules/imgcodecs/src/grfmt_exr.hpp similarity index 100% rename from modules/highgui/src/grfmt_exr.hpp rename to modules/imgcodecs/src/grfmt_exr.hpp diff --git a/modules/highgui/src/grfmt_hdr.cpp b/modules/imgcodecs/src/grfmt_hdr.cpp similarity index 100% rename from modules/highgui/src/grfmt_hdr.cpp rename to modules/imgcodecs/src/grfmt_hdr.cpp diff --git a/modules/highgui/src/grfmt_hdr.hpp b/modules/imgcodecs/src/grfmt_hdr.hpp similarity index 100% rename from modules/highgui/src/grfmt_hdr.hpp rename to modules/imgcodecs/src/grfmt_hdr.hpp diff --git a/modules/highgui/src/grfmt_jpeg.cpp b/modules/imgcodecs/src/grfmt_jpeg.cpp similarity index 100% rename from modules/highgui/src/grfmt_jpeg.cpp rename to modules/imgcodecs/src/grfmt_jpeg.cpp diff --git a/modules/highgui/src/grfmt_jpeg.hpp b/modules/imgcodecs/src/grfmt_jpeg.hpp similarity index 100% rename from modules/highgui/src/grfmt_jpeg.hpp rename to modules/imgcodecs/src/grfmt_jpeg.hpp diff --git a/modules/highgui/src/grfmt_jpeg2000.cpp b/modules/imgcodecs/src/grfmt_jpeg2000.cpp similarity index 100% rename from modules/highgui/src/grfmt_jpeg2000.cpp rename to modules/imgcodecs/src/grfmt_jpeg2000.cpp diff --git a/modules/highgui/src/grfmt_jpeg2000.hpp b/modules/imgcodecs/src/grfmt_jpeg2000.hpp similarity index 100% rename from modules/highgui/src/grfmt_jpeg2000.hpp rename to modules/imgcodecs/src/grfmt_jpeg2000.hpp diff --git a/modules/highgui/src/grfmt_png.cpp b/modules/imgcodecs/src/grfmt_png.cpp similarity index 100% rename from modules/highgui/src/grfmt_png.cpp rename to modules/imgcodecs/src/grfmt_png.cpp diff --git a/modules/highgui/src/grfmt_png.hpp b/modules/imgcodecs/src/grfmt_png.hpp similarity index 100% rename from modules/highgui/src/grfmt_png.hpp rename to modules/imgcodecs/src/grfmt_png.hpp diff --git a/modules/highgui/src/grfmt_pxm.cpp b/modules/imgcodecs/src/grfmt_pxm.cpp similarity index 100% rename from modules/highgui/src/grfmt_pxm.cpp rename to modules/imgcodecs/src/grfmt_pxm.cpp diff --git a/modules/highgui/src/grfmt_pxm.hpp b/modules/imgcodecs/src/grfmt_pxm.hpp similarity index 100% rename from modules/highgui/src/grfmt_pxm.hpp rename to modules/imgcodecs/src/grfmt_pxm.hpp diff --git a/modules/highgui/src/grfmt_sunras.cpp b/modules/imgcodecs/src/grfmt_sunras.cpp similarity index 100% rename from modules/highgui/src/grfmt_sunras.cpp rename to modules/imgcodecs/src/grfmt_sunras.cpp diff --git a/modules/highgui/src/grfmt_sunras.hpp b/modules/imgcodecs/src/grfmt_sunras.hpp similarity index 100% rename from modules/highgui/src/grfmt_sunras.hpp rename to modules/imgcodecs/src/grfmt_sunras.hpp diff --git a/modules/highgui/src/grfmt_tiff.cpp b/modules/imgcodecs/src/grfmt_tiff.cpp similarity index 100% rename from modules/highgui/src/grfmt_tiff.cpp rename to modules/imgcodecs/src/grfmt_tiff.cpp diff --git a/modules/highgui/src/grfmt_tiff.hpp b/modules/imgcodecs/src/grfmt_tiff.hpp similarity index 100% rename from modules/highgui/src/grfmt_tiff.hpp rename to modules/imgcodecs/src/grfmt_tiff.hpp diff --git a/modules/highgui/src/grfmt_webp.cpp b/modules/imgcodecs/src/grfmt_webp.cpp similarity index 100% rename from modules/highgui/src/grfmt_webp.cpp rename to modules/imgcodecs/src/grfmt_webp.cpp diff --git a/modules/highgui/src/grfmt_webp.hpp b/modules/imgcodecs/src/grfmt_webp.hpp similarity index 100% rename from modules/highgui/src/grfmt_webp.hpp rename to modules/imgcodecs/src/grfmt_webp.hpp diff --git a/modules/highgui/src/grfmts.hpp b/modules/imgcodecs/src/grfmts.hpp similarity index 100% rename from modules/highgui/src/grfmts.hpp rename to modules/imgcodecs/src/grfmts.hpp diff --git a/modules/highgui/src/ios_conversions.mm b/modules/imgcodecs/src/ios_conversions.mm similarity index 97% rename from modules/highgui/src/ios_conversions.mm rename to modules/imgcodecs/src/ios_conversions.mm index fa6208a17f..af522900c8 100644 --- a/modules/highgui/src/ios_conversions.mm +++ b/modules/imgcodecs/src/ios_conversions.mm @@ -40,7 +40,11 @@ // //M*/ -#import "opencv2/highgui/cap_ios.h" +#import +#import +#import +#import +#include "opencv2/core.hpp" #include "precomp.hpp" UIImage* MatToUIImage(const cv::Mat& image) { diff --git a/modules/highgui/src/loadsave.cpp b/modules/imgcodecs/src/loadsave.cpp similarity index 99% rename from modules/highgui/src/loadsave.cpp rename to modules/imgcodecs/src/loadsave.cpp index cdcaa23e5d..a3fcaba630 100644 --- a/modules/highgui/src/loadsave.cpp +++ b/modules/imgcodecs/src/loadsave.cpp @@ -454,7 +454,7 @@ bool imencode( const String& ext, InputArray _image, } /****************************************************************************************\ -* HighGUI loading & saving function implementation * +* Imgcodecs loading & saving function implementation * \****************************************************************************************/ CV_IMPL int diff --git a/modules/imgcodecs/src/precomp.hpp b/modules/imgcodecs/src/precomp.hpp new file mode 100644 index 0000000000..19e6fb9e3c --- /dev/null +++ b/modules/imgcodecs/src/precomp.hpp @@ -0,0 +1,84 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __IMGCODECS_H_ +#define __IMGCODECS_H_ + +#include "opencv2/imgcodecs.hpp" + +#include "opencv2/core/utility.hpp" +#include "opencv2/core/private.hpp" + +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/imgcodecs/imgcodecs_c.h" + +#include +#include +#include +#include +#include +#include + +#if defined WIN32 || defined WINCE + #if !defined _WIN32_WINNT + #ifdef HAVE_MSMF + #define _WIN32_WINNT 0x0600 // Windows Vista + #else + #define _WIN32_WINNT 0x0500 // Windows 2000 + #endif + #endif + + #include + #undef small + #undef min + #undef max + #undef abs +#endif + +#ifdef HAVE_TEGRA_OPTIMIZATION +#include "opencv2/imgcodecs/imgcodecs_tegra.hpp" +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +#endif /* __IMGCODECS_H_ */ diff --git a/modules/highgui/src/rgbe.cpp b/modules/imgcodecs/src/rgbe.cpp similarity index 100% rename from modules/highgui/src/rgbe.cpp rename to modules/imgcodecs/src/rgbe.cpp diff --git a/modules/highgui/src/rgbe.hpp b/modules/imgcodecs/src/rgbe.hpp similarity index 100% rename from modules/highgui/src/rgbe.hpp rename to modules/imgcodecs/src/rgbe.hpp diff --git a/modules/highgui/src/utils.cpp b/modules/imgcodecs/src/utils.cpp similarity index 100% rename from modules/highgui/src/utils.cpp rename to modules/imgcodecs/src/utils.cpp diff --git a/modules/highgui/src/utils.hpp b/modules/imgcodecs/src/utils.hpp similarity index 100% rename from modules/highgui/src/utils.hpp rename to modules/imgcodecs/src/utils.hpp diff --git a/modules/highgui/test/test_drawing.cpp b/modules/imgcodecs/test/test_drawing.cpp similarity index 98% rename from modules/highgui/test/test_drawing.cpp rename to modules/imgcodecs/test/test_drawing.cpp index 9d9e17dde2..e4936f63a0 100644 --- a/modules/highgui/test/test_drawing.cpp +++ b/modules/imgcodecs/test/test_drawing.cpp @@ -41,7 +41,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace std; using namespace cv; @@ -408,8 +407,8 @@ int CV_DrawingTest_C::checkLineIterator( Mat& _img ) } #ifdef HAVE_JPEG -TEST(Highgui_Drawing, cpp_regression) { CV_DrawingTest_CPP test; test.safe_run(); } -TEST(Highgui_Drawing, c_regression) { CV_DrawingTest_C test; test.safe_run(); } +TEST(Imgcodecs_Drawing, cpp_regression) { CV_DrawingTest_CPP test; test.safe_run(); } +TEST(Imgcodecs_Drawing, c_regression) { CV_DrawingTest_C test; test.safe_run(); } #endif class CV_FillConvexPolyTest : public cvtest::BaseTest @@ -444,4 +443,4 @@ protected: } }; -TEST(Highgui_Drawing, fillconvexpoly_clipping) { CV_FillConvexPolyTest test; test.safe_run(); } +TEST(Imgcodecs_Drawing, fillconvexpoly_clipping) { CV_FillConvexPolyTest test; test.safe_run(); } diff --git a/modules/highgui/test/test_grfmt.cpp b/modules/imgcodecs/test/test_grfmt.cpp similarity index 95% rename from modules/highgui/test/test_grfmt.cpp rename to modules/imgcodecs/test/test_grfmt.cpp index e4d3e70461..9b06c5744c 100644 --- a/modules/highgui/test/test_grfmt.cpp +++ b/modules/imgcodecs/test/test_grfmt.cpp @@ -41,7 +41,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace cv; using namespace std; @@ -223,12 +222,12 @@ public: #ifdef HAVE_PNG -TEST(Highgui_Image, write_big) { CV_GrfmtWriteBigImageTest test; test.safe_run(); } +TEST(Imgcodecs_Image, write_big) { CV_GrfmtWriteBigImageTest test; test.safe_run(); } #endif -TEST(Highgui_Image, write_imageseq) { CV_GrfmtWriteSequenceImageTest test; test.safe_run(); } +TEST(Imgcodecs_Image, write_imageseq) { CV_GrfmtWriteSequenceImageTest test; test.safe_run(); } -TEST(Highgui_Image, read_bmp_rle8) { CV_GrfmtReadBMPRLE8Test test; test.safe_run(); } +TEST(Imgcodecs_Image, read_bmp_rle8) { CV_GrfmtReadBMPRLE8Test test; test.safe_run(); } #ifdef HAVE_PNG class CV_GrfmtPNGEncodeTest : public cvtest::BaseTest @@ -257,9 +256,9 @@ public: } }; -TEST(Highgui_Image, encode_png) { CV_GrfmtPNGEncodeTest test; test.safe_run(); } +TEST(Imgcodecs_Image, encode_png) { CV_GrfmtPNGEncodeTest test; test.safe_run(); } -TEST(Highgui_ImreadVSCvtColor, regression) +TEST(Imgcodecs_ImreadVSCvtColor, regression) { cvtest::TS& ts = *cvtest::TS::ptr(); @@ -375,11 +374,11 @@ public: } }; -TEST(Highgui_Image, read_png_color_palette_with_alpha) { CV_GrfmtReadPNGColorPaletteWithAlphaTest test; test.safe_run(); } +TEST(Imgcodecs_Image, read_png_color_palette_with_alpha) { CV_GrfmtReadPNGColorPaletteWithAlphaTest test; test.safe_run(); } #endif #ifdef HAVE_JPEG -TEST(Highgui_Jpeg, encode_empty) +TEST(Imgcodecs_Jpeg, encode_empty) { cv::Mat img; std::vector jpegImg; @@ -387,7 +386,7 @@ TEST(Highgui_Jpeg, encode_empty) ASSERT_THROW(cv::imencode(".jpg", img, jpegImg), cv::Exception); } -TEST(Highgui_Jpeg, encode_decode_progressive_jpeg) +TEST(Imgcodecs_Jpeg, encode_decode_progressive_jpeg) { cvtest::TS& ts = *cvtest::TS::ptr(); string input = string(ts.get_data_path()) + "../cv/shared/lena.png"; @@ -411,7 +410,7 @@ TEST(Highgui_Jpeg, encode_decode_progressive_jpeg) remove(output_progressive.c_str()); } -TEST(Highgui_Jpeg, encode_decode_optimize_jpeg) +TEST(Imgcodecs_Jpeg, encode_decode_optimize_jpeg) { cvtest::TS& ts = *cvtest::TS::ptr(); string input = string(ts.get_data_path()) + "../cv/shared/lena.png"; @@ -447,9 +446,9 @@ TEST(Highgui_Jpeg, encode_decode_optimize_jpeg) #ifdef ANDROID // Test disabled as it uses a lot of memory. // It is killed with SIGKILL by out of memory killer. -TEST(Highgui_Tiff, DISABLED_decode_tile16384x16384) +TEST(Imgcodecs_Tiff, DISABLED_decode_tile16384x16384) #else -TEST(Highgui_Tiff, decode_tile16384x16384) +TEST(Imgcodecs_Tiff, decode_tile16384x16384) #endif { // see issue #2161 @@ -478,7 +477,7 @@ TEST(Highgui_Tiff, decode_tile16384x16384) remove(file4.c_str()); } -TEST(Highgui_Tiff, write_read_16bit_big_little_endian) +TEST(Imgcodecs_Tiff, write_read_16bit_big_little_endian) { // see issue #2601 "16-bit Grayscale TIFF Load Failures Due to Buffer Underflow and Endianness" @@ -561,7 +560,7 @@ public: } }; -TEST(Highgui_Tiff, decode_tile_remainder) +TEST(Imgcodecs_Tiff, decode_tile_remainder) { CV_GrfmtReadTifTiledWithNotFullTiles test; test.safe_run(); } @@ -570,7 +569,7 @@ TEST(Highgui_Tiff, decode_tile_remainder) #ifdef HAVE_WEBP -TEST(Highgui_WebP, encode_decode_lossless_webp) +TEST(Imgcodecs_WebP, encode_decode_lossless_webp) { cvtest::TS& ts = *cvtest::TS::ptr(); string input = string(ts.get_data_path()) + "../cv/shared/lena.png"; @@ -619,7 +618,7 @@ TEST(Highgui_WebP, encode_decode_lossless_webp) EXPECT_TRUE(cvtest::norm(img, img_webp, NORM_INF) == 0); } -TEST(Highgui_WebP, encode_decode_lossy_webp) +TEST(Imgcodecs_WebP, encode_decode_lossy_webp) { cvtest::TS& ts = *cvtest::TS::ptr(); std::string input = std::string(ts.get_data_path()) + "../cv/shared/lena.png"; @@ -643,7 +642,7 @@ TEST(Highgui_WebP, encode_decode_lossy_webp) } } -TEST(Highgui_WebP, encode_decode_with_alpha_webp) +TEST(Imgcodecs_WebP, encode_decode_with_alpha_webp) { cvtest::TS& ts = *cvtest::TS::ptr(); std::string input = std::string(ts.get_data_path()) + "../cv/shared/lena.png"; @@ -669,7 +668,7 @@ TEST(Highgui_WebP, encode_decode_with_alpha_webp) #endif -TEST(Highgui_Hdr, regression) +TEST(Imgcodecs_Hdr, regression) { string folder = string(cvtest::TS::ptr()->get_data_path()) + "/readwrite/"; string name_rle = folder + "rle.hdr"; diff --git a/modules/imgcodecs/test/test_main.cpp b/modules/imgcodecs/test/test_main.cpp new file mode 100644 index 0000000000..461e7fac7f --- /dev/null +++ b/modules/imgcodecs/test/test_main.cpp @@ -0,0 +1,3 @@ +#include "test_precomp.hpp" + +CV_TEST_MAIN("imgcodecs") diff --git a/modules/imgcodecs/test/test_precomp.hpp b/modules/imgcodecs/test/test_precomp.hpp new file mode 100644 index 0000000000..2aed614337 --- /dev/null +++ b/modules/imgcodecs/test/test_precomp.hpp @@ -0,0 +1,20 @@ +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wmissing-declarations" +# if defined __clang__ || defined __APPLE__ +# pragma GCC diagnostic ignored "-Wmissing-prototypes" +# pragma GCC diagnostic ignored "-Wextra" +# endif +#endif + +#ifndef __OPENCV_TEST_PRECOMP_HPP__ +#define __OPENCV_TEST_PRECOMP_HPP__ + +#include +#include "opencv2/ts.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc/imgproc_c.h" + +#include "opencv2/core/private.hpp" + +#endif diff --git a/modules/imgproc/doc/histograms.rst b/modules/imgproc/doc/histograms.rst index 91199f3781..5ebf168611 100644 --- a/modules/imgproc/doc/histograms.rst +++ b/modules/imgproc/doc/histograms.rst @@ -181,6 +181,8 @@ Compares two histograms. * **CV_COMP_HELLINGER** Synonym for ``CV_COMP_BHATTACHARYYA`` + * **CV_COMP_KL_DIV** Kullback-Leibler divergence + The functions ``compareHist`` compare two dense or two sparse histograms using the specified method: * Correlation (``method=CV_COMP_CORREL``) @@ -224,6 +226,12 @@ The functions ``compareHist`` compare two dense or two sparse histograms using t d(H_1,H_2) = \sqrt{1 - \frac{1}{\sqrt{\bar{H_1} \bar{H_2} N^2}} \sum_I \sqrt{H_1(I) \cdot H_2(I)}} +* Kullback-Leibler divergence (``method=CV_COMP_KL_DIV``). + + .. math:: + + d(H_1,H_2) = \sum _I H_1(I) \log \left(\frac{H_1(I)}{H_2(I)}\right) + The function returns :math:`d(H_1, H_2)` . diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 7928ae0fe8..76d65c2802 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -204,7 +204,8 @@ enum { HISTCMP_CORREL = 0, HISTCMP_INTERSECT = 2, HISTCMP_BHATTACHARYYA = 3, HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA, - HISTCMP_CHISQR_ALT = 4 + HISTCMP_CHISQR_ALT = 4, + HISTCMP_KL_DIV = 5 }; //! the color conversion code diff --git a/modules/imgproc/include/opencv2/imgproc/types_c.h b/modules/imgproc/include/opencv2/imgproc/types_c.h index dd0d8b8a6e..de8fb62038 100644 --- a/modules/imgproc/include/opencv2/imgproc/types_c.h +++ b/modules/imgproc/include/opencv2/imgproc/types_c.h @@ -509,7 +509,8 @@ enum CV_COMP_INTERSECT =2, CV_COMP_BHATTACHARYYA =3, CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA, - CV_COMP_CHISQR_ALT =4 + CV_COMP_CHISQR_ALT =4, + CV_COMP_KL_DIV =5 }; /* Mask size for distance transform */ diff --git a/modules/imgproc/perf/perf_precomp.hpp b/modules/imgproc/perf/perf_precomp.hpp index 19f62712c1..5bede78db8 100644 --- a/modules/imgproc/perf/perf_precomp.hpp +++ b/modules/imgproc/perf/perf_precomp.hpp @@ -11,7 +11,7 @@ #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined diff --git a/modules/imgproc/src/canny.cpp b/modules/imgproc/src/canny.cpp index 3590e290db..2a87ae05b4 100644 --- a/modules/imgproc/src/canny.cpp +++ b/modules/imgproc/src/canny.cpp @@ -348,6 +348,10 @@ void cv::Canny( InputArray _src, OutputArray _dst, #define CANNY_PUSH(d) *(d) = uchar(2), *stack_top++ = (d) #define CANNY_POP(d) (d) = *--stack_top +#if CV_SSE2 + bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); +#endif + // calculate magnitude and angle of gradient, perform non-maxima suppression. // fill the map with one of the following values: // 0 - the pixel might belong to an edge @@ -363,12 +367,52 @@ void cv::Canny( InputArray _src, OutputArray _dst, if (!L2gradient) { - for (int j = 0; j < src.cols*cn; j++) + int j = 0, width = src.cols * cn; +#if CV_SSE2 + if (haveSSE2) + { + __m128i v_zero = _mm_setzero_si128(); + for ( ; j <= width - 8; j += 8) + { + __m128i v_dx = _mm_loadu_si128((const __m128i *)(_dx + j)); + __m128i v_dy = _mm_loadu_si128((const __m128i *)(_dy + j)); + v_dx = _mm_max_epi16(v_dx, _mm_sub_epi16(v_zero, v_dx)); + v_dy = _mm_max_epi16(v_dy, _mm_sub_epi16(v_zero, v_dy)); + + __m128i v_norm = _mm_add_epi32(_mm_unpacklo_epi16(v_dx, v_zero), _mm_unpacklo_epi16(v_dy, v_zero)); + _mm_storeu_si128((__m128i *)(_norm + j), v_norm); + + v_norm = _mm_add_epi32(_mm_unpackhi_epi16(v_dx, v_zero), _mm_unpackhi_epi16(v_dy, v_zero)); + _mm_storeu_si128((__m128i *)(_norm + j + 4), v_norm); + } + } +#endif + for ( ; j < width; ++j) _norm[j] = std::abs(int(_dx[j])) + std::abs(int(_dy[j])); } else { - for (int j = 0; j < src.cols*cn; j++) + int j = 0, width = src.cols * cn; +#if CV_SSE2 + if (haveSSE2) + { + for ( ; j <= width - 8; j += 8) + { + __m128i v_dx = _mm_loadu_si128((const __m128i *)(_dx + j)); + __m128i v_dy = _mm_loadu_si128((const __m128i *)(_dy + j)); + + __m128i v_dx_ml = _mm_mullo_epi16(v_dx, v_dx), v_dx_mh = _mm_mulhi_epi16(v_dx, v_dx); + __m128i v_dy_ml = _mm_mullo_epi16(v_dy, v_dy), v_dy_mh = _mm_mulhi_epi16(v_dy, v_dy); + + __m128i v_norm = _mm_add_epi32(_mm_unpacklo_epi16(v_dx_ml, v_dx_mh), _mm_unpacklo_epi16(v_dy_ml, v_dy_mh)); + _mm_storeu_si128((__m128i *)(_norm + j), v_norm); + + v_norm = _mm_add_epi32(_mm_unpackhi_epi16(v_dx_ml, v_dx_mh), _mm_unpackhi_epi16(v_dy_ml, v_dy_mh)); + _mm_storeu_si128((__m128i *)(_norm + j + 4), v_norm); + } + } +#endif + for ( ; j < width; ++j) _norm[j] = int(_dx[j])*_dx[j] + int(_dy[j])*_dy[j]; } diff --git a/modules/imgproc/src/deriv.cpp b/modules/imgproc/src/deriv.cpp index 5e920ec0b8..1a29c38abf 100644 --- a/modules/imgproc/src/deriv.cpp +++ b/modules/imgproc/src/deriv.cpp @@ -702,7 +702,7 @@ void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, #ifdef HAVE_IPP if ((ksize == 3 || ksize == 5) && ((borderType & BORDER_ISOLATED) != 0 || !_src.isSubmatrix()) && - ((stype == CV_8UC1 && ddepth == CV_16S) || (ddepth == CV_32F && stype == CV_32FC1))) + ((stype == CV_8UC1 && ddepth == CV_16S) || (ddepth == CV_32F && stype == CV_32FC1)) && !ocl::useOpenCL()) { int iscale = saturate_cast(scale), idelta = saturate_cast(delta); bool floatScale = std::fabs(scale - iscale) > DBL_EPSILON, needScale = iscale != 1; diff --git a/modules/imgproc/src/histogram.cpp b/modules/imgproc/src/histogram.cpp index 441d2226b8..b38ddcad0c 100644 --- a/modules/imgproc/src/histogram.cpp +++ b/modules/imgproc/src/histogram.cpp @@ -2325,6 +2325,21 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) s2 += b; } } + else if( method == CV_COMP_KL_DIV ) + { + for( j = 0; j < len; j++ ) + { + double p = h1[j]; + double q = h2[j]; + if( fabs(p) <= DBL_EPSILON ) { + continue; + } + if( fabs(q) <= DBL_EPSILON ) { + q = 1e-10; + } + result += p * std::log( p / q ); + } + } else CV_Error( CV_StsBadArg, "Unknown comparison method" ); } @@ -2360,7 +2375,7 @@ double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method ) CV_Assert( H1.size(i) == H2.size(i) ); const SparseMat *PH1 = &H1, *PH2 = &H2; - if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT) + if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV ) std::swap(PH1, PH2); SparseMatConstIterator it = PH1->begin(); @@ -2440,6 +2455,18 @@ double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method ) s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.; result = std::sqrt(std::max(1. - result*s1, 0.)); } + else if( method == CV_COMP_KL_DIV ) + { + for( i = 0; i < N1; i++, ++it ) + { + double v1 = it.value(); + const SparseMat::Node* node = it.node(); + double v2 = PH2->value(node->idx, (size_t*)&node->hashval); + if( !v2 ) + v2 = 1e-10; + result += v1 * std::log( v1 / v2 ); + } + } else CV_Error( CV_StsBadArg, "Unknown comparison method" ); @@ -2785,7 +2812,7 @@ cvCompareHist( const CvHistogram* hist1, CvSparseMatIterator iterator; CvSparseNode *node1, *node2; - if( mat1->heap->active_count > mat2->heap->active_count && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT) + if( mat1->heap->active_count > mat2->heap->active_count && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV ) { CvSparseMat* t; CV_SWAP( mat1, mat2, t ); @@ -2887,6 +2914,13 @@ cvCompareHist( const CvHistogram* hist1, result = 1. - result*s1; result = sqrt(MAX(result,0.)); } + else if( method == CV_COMP_KL_DIV ) + { + cv::SparseMat sH1, sH2; + ((const CvSparseMat*)hist1->bins)->copyToSparseMat(sH1); + ((const CvSparseMat*)hist2->bins)->copyToSparseMat(sH2); + result = cv::compareHist( sH1, sH2, CV_COMP_KL_DIV ); + } else CV_Error( CV_StsBadArg, "Unknown comparison method" ); diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index e07641f6a1..1d16bcc3fc 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -3582,7 +3582,9 @@ private: static bool ocl_remap(InputArray _src, OutputArray _dst, InputArray _map1, InputArray _map2, int interpolation, int borderType, const Scalar& borderValue) { - int cn = _src.channels(), type = _src.type(), depth = _src.depth(); + const ocl::Device & dev = ocl::Device::getDefault(); + int cn = _src.channels(), type = _src.type(), depth = _src.depth(), + rowsPerWI = dev.isIntel() ? 4 : 1; if (borderType == BORDER_TRANSPARENT || !(interpolation == INTER_LINEAR || interpolation == INTER_NEAREST) || _map1.type() == CV_16SC1 || _map2.type() == CV_16SC1) @@ -3619,12 +3621,14 @@ static bool ocl_remap(InputArray _src, OutputArray _dst, InputArray _map1, Input static const char * const interMap[] = { "INTER_NEAREST", "INTER_LINEAR", "INTER_CUBIC", "INTER_LINEAR", "INTER_LANCZOS" }; static const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101", "BORDER_TRANSPARENT" }; - String buildOptions = format("-D %s -D %s -D T=%s", interMap[interpolation], borderMap[borderType], ocl::typeToStr(type)); + String buildOptions = format("-D %s -D %s -D T=%s -D rowsPerWI=%d", + interMap[interpolation], borderMap[borderType], + ocl::typeToStr(type), rowsPerWI); if (interpolation != INTER_NEAREST) { char cvt[3][40]; - int wdepth = std::max(CV_32F, dst.depth()); + int wdepth = std::max(CV_32F, depth); buildOptions = buildOptions + format(" -D WT=%s -D convertToT=%s -D convertToWT=%s" " -D convertToWT2=%s -D WT2=%s", @@ -3636,10 +3640,9 @@ static bool ocl_remap(InputArray _src, OutputArray _dst, InputArray _map1, Input } int scalarcn = cn == 3 ? 4 : cn; int sctype = CV_MAKETYPE(depth, scalarcn); - buildOptions += format(" -D T=%s -D T1=%s" - " -D cn=%d -D ST=%s", + buildOptions += format(" -D T=%s -D T1=%s -D cn=%d -D ST=%s -D depth=%d", ocl::typeToStr(type), ocl::typeToStr(depth), - cn, ocl::typeToStr(sctype)); + cn, ocl::typeToStr(sctype), depth); ocl::Kernel k(kernelName.c_str(), ocl::imgproc::remap_oclsrc, buildOptions); @@ -3653,7 +3656,7 @@ static bool ocl_remap(InputArray _src, OutputArray _dst, InputArray _map1, Input else k.args(srcarg, dstarg, map1arg, ocl::KernelArg::ReadOnlyNoSize(map2), scalararg); - size_t globalThreads[2] = { dst.cols, dst.rows }; + size_t globalThreads[2] = { dst.cols, (dst.rows + rowsPerWI - 1) / rowsPerWI }; return k.run(2, globalThreads, NULL, false); } diff --git a/modules/imgproc/src/moments.cpp b/modules/imgproc/src/moments.cpp index 61fff29852..a61002a792 100644 --- a/modules/imgproc/src/moments.cpp +++ b/modules/imgproc/src/moments.cpp @@ -202,6 +202,128 @@ static Moments contourMoments( const Mat& contour ) * Spatial Raster Moments * \****************************************************************************************/ +template +struct MomentsInTile_SSE +{ + int operator() (const T *, int, WT &, WT &, WT &, MT &) + { + return 0; + } +}; + +#if CV_SSE2 + +template <> +struct MomentsInTile_SSE +{ + MomentsInTile_SSE() + { + useSIMD = checkHardwareSupport(CV_CPU_SSE2); + } + + int operator() (const uchar * ptr, int len, int & x0, int & x1, int & x2, int & x3) + { + int x = 0; + + if( useSIMD ) + { + __m128i qx_init = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); + __m128i dx = _mm_set1_epi16(8); + __m128i z = _mm_setzero_si128(), qx0 = z, qx1 = z, qx2 = z, qx3 = z, qx = qx_init; + + for( ; x <= len - 8; x += 8 ) + { + __m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z); + qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z)); + __m128i px = _mm_mullo_epi16(p, qx); + __m128i sx = _mm_mullo_epi16(qx, qx); + qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx)); + qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx)); + qx3 = _mm_add_epi32(qx3, _mm_madd_epi16(px, sx)); + + qx = _mm_add_epi16(qx, dx); + } + + int CV_DECL_ALIGNED(16) buf[4]; + _mm_store_si128((__m128i*)buf, qx0); + x0 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, qx1); + x1 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, qx2); + x2 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, qx3); + x3 = buf[0] + buf[1] + buf[2] + buf[3]; + } + + return x; + } + + bool useSIMD; +}; + +#endif + +#if CV_SSE4_1 + +template <> +struct MomentsInTile_SSE +{ + MomentsInTile_SSE() + { + useSIMD = checkHardwareSupport(CV_CPU_SSE4_1); + } + + int operator() (const ushort * ptr, int len, int & x0, int & x1, int & x2, int64 & x3) + { + int x = 0; + + if (useSIMD) + { + __m128i vx_init0 = _mm_setr_epi32(0, 1, 2, 3), vx_init1 = _mm_setr_epi32(4, 5, 6, 7), + v_delta = _mm_set1_epi32(8), v_zero = _mm_setzero_si128(), v_x0 = v_zero, + v_x1 = v_zero, v_x2 = v_zero, v_x3 = v_zero, v_ix0 = vx_init0, v_ix1 = vx_init1; + + for( ; x <= len - 8; x += 8 ) + { + __m128i v_src = _mm_loadu_si128((const __m128i *)(ptr + x)); + __m128i v_src0 = _mm_unpacklo_epi16(v_src, v_zero), v_src1 = _mm_unpackhi_epi16(v_src, v_zero); + + v_x0 = _mm_add_epi32(v_x0, _mm_add_epi32(v_src0, v_src1)); + __m128i v_x1_0 = _mm_mullo_epi32(v_src0, v_ix0), v_x1_1 = _mm_mullo_epi32(v_src1, v_ix1); + v_x1 = _mm_add_epi32(v_x1, _mm_add_epi32(v_x1_0, v_x1_1)); + + __m128i v_2ix0 = _mm_mullo_epi32(v_ix0, v_ix0), v_2ix1 = _mm_mullo_epi32(v_ix1, v_ix1); + v_x2 = _mm_add_epi32(v_x2, _mm_add_epi32(_mm_mullo_epi32(v_2ix0, v_src0), _mm_mullo_epi32(v_2ix1, v_src1))); + + __m128i t = _mm_add_epi32(_mm_mullo_epi32(v_2ix0, v_x1_0), _mm_mullo_epi32(v_2ix1, v_x1_1)); + v_x3 = _mm_add_epi64(v_x3, _mm_add_epi64(_mm_unpacklo_epi32(t, v_zero), _mm_unpackhi_epi32(t, v_zero))); + + v_ix0 = _mm_add_epi32(v_ix0, v_delta); + v_ix1 = _mm_add_epi32(v_ix1, v_delta); + } + + int CV_DECL_ALIGNED(16) buf[4]; + int64 CV_DECL_ALIGNED(16) buf64[2]; + + _mm_store_si128((__m128i*)buf, v_x0); + x0 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, v_x1); + x1 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, v_x2); + x2 = buf[0] + buf[1] + buf[2] + buf[3]; + + _mm_store_si128((__m128i*)buf64, v_x3); + x3 = buf64[0] + buf64[1]; + } + + return x; + } + + bool useSIMD; +}; + +#endif + template #if defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 9 // Workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60196 @@ -212,14 +334,16 @@ static void momentsInTile( const Mat& img, double* moments ) Size size = img.size(); int x, y; MT mom[10] = {0,0,0,0,0,0,0,0,0,0}; + MomentsInTile_SSE vop; for( y = 0; y < size.height; y++ ) { const T* ptr = (const T*)(img.data + y*img.step); WT x0 = 0, x1 = 0, x2 = 0; MT x3 = 0; + x = vop(ptr, size.width, x0, x1, x2, x3); - for( x = 0; x < size.width; x++ ) + for( ; x < size.width; x++ ) { WT p = ptr[x]; WT xp = x * p, xxp; @@ -249,85 +373,6 @@ static void momentsInTile( const Mat& img, double* moments ) moments[x] = (double)mom[x]; } - -#if CV_SSE2 - -template<> void momentsInTile( const cv::Mat& img, double* moments ) -{ - typedef uchar T; - typedef int WT; - typedef int MT; - Size size = img.size(); - int y; - MT mom[10] = {0,0,0,0,0,0,0,0,0,0}; - bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); - - for( y = 0; y < size.height; y++ ) - { - const T* ptr = img.ptr(y); - int x0 = 0, x1 = 0, x2 = 0, x3 = 0, x = 0; - - if( useSIMD ) - { - __m128i qx_init = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); - __m128i dx = _mm_set1_epi16(8); - __m128i z = _mm_setzero_si128(), qx0 = z, qx1 = z, qx2 = z, qx3 = z, qx = qx_init; - - for( ; x <= size.width - 8; x += 8 ) - { - __m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z); - qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z)); - __m128i px = _mm_mullo_epi16(p, qx); - __m128i sx = _mm_mullo_epi16(qx, qx); - qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx)); - qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx)); - qx3 = _mm_add_epi32(qx3, _mm_madd_epi16(px, sx)); - - qx = _mm_add_epi16(qx, dx); - } - int CV_DECL_ALIGNED(16) buf[4]; - _mm_store_si128((__m128i*)buf, qx0); - x0 = buf[0] + buf[1] + buf[2] + buf[3]; - _mm_store_si128((__m128i*)buf, qx1); - x1 = buf[0] + buf[1] + buf[2] + buf[3]; - _mm_store_si128((__m128i*)buf, qx2); - x2 = buf[0] + buf[1] + buf[2] + buf[3]; - _mm_store_si128((__m128i*)buf, qx3); - x3 = buf[0] + buf[1] + buf[2] + buf[3]; - } - - for( ; x < size.width; x++ ) - { - WT p = ptr[x]; - WT xp = x * p, xxp; - - x0 += p; - x1 += xp; - xxp = xp * x; - x2 += xxp; - x3 += xxp * x; - } - - WT py = y * x0, sy = y*y; - - mom[9] += ((MT)py) * sy; // m03 - mom[8] += ((MT)x1) * sy; // m12 - mom[7] += ((MT)x2) * y; // m21 - mom[6] += x3; // m30 - mom[5] += x0 * sy; // m02 - mom[4] += x1 * y; // m11 - mom[3] += x2; // m20 - mom[2] += py; // m01 - mom[1] += x1; // m10 - mom[0] += x0; // m00 - } - - for(int x = 0; x < 10; x++ ) - moments[x] = (double)mom[x]; -} - -#endif - typedef void (*MomentsInTileFunc)(const Mat& img, double* moments); Moments::Moments() diff --git a/modules/imgproc/src/morph.cpp b/modules/imgproc/src/morph.cpp index a621a6e403..4f696b4209 100644 --- a/modules/imgproc/src/morph.cpp +++ b/modules/imgproc/src/morph.cpp @@ -1257,7 +1257,7 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne } #undef IPP_MORPH_CASE -#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ > 8 +#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ >= 8 return false; /// It disables false positive warning in GCC 4.8 and further #endif } diff --git a/modules/imgproc/src/opencl/corner.cl b/modules/imgproc/src/opencl/corner.cl index 563cb98081..e1c2e407dc 100644 --- a/modules/imgproc/src/opencl/corner.cl +++ b/modules/imgproc/src/opencl/corner.cl @@ -114,25 +114,23 @@ __kernel void corner(__global const float * Dx, int dx_step, int dx_offset, int int dst_startX = gX * (THREADS-ksX+1) + dst_x_off; int dst_startY = (gY << 1) + dst_y_off; - float dx_data[ksY+1],dy_data[ksY+1], data[3][ksY+1]; + float data[3][ksY+1]; __local float temp[6][THREADS]; #ifdef BORDER_CONSTANT for (int i=0; i < ksY+1; i++) { bool dx_con = dx_startX+col >= 0 && dx_startX+col < dx_whole_cols && dx_startY+i >= 0 && dx_startY+i < dx_whole_rows; - int indexDx = (dx_startY+i)*(dx_step>>2)+(dx_startX+col); + int indexDx = mad24(dx_startY+i, dx_step>>2, dx_startX+col); float dx_s = dx_con ? Dx[indexDx] : 0.0f; - dx_data[i] = dx_s; bool dy_con = dy_startX+col >= 0 && dy_startX+col < dy_whole_cols && dy_startY+i >= 0 && dy_startY+i < dy_whole_rows; - int indexDy = (dy_startY+i)*(dy_step>>2)+(dy_startX+col); + int indexDy = mad24(dy_startY+i, dy_step>>2, dy_startX+col); float dy_s = dy_con ? Dy[indexDy] : 0.0f; - dy_data[i] = dy_s; - data[0][i] = dx_data[i] * dx_data[i]; - data[1][i] = dx_data[i] * dy_data[i]; - data[2][i] = dy_data[i] * dy_data[i]; + data[0][i] = dx_s * dx_s; + data[1][i] = dx_s * dy_s; + data[2][i] = dy_s * dy_s; } #else int clamped_col = min(2*dst_cols, col); @@ -141,16 +139,16 @@ __kernel void corner(__global const float * Dx, int dx_step, int dx_offset, int int dx_selected_row = dx_startY+i, dx_selected_col = dx_startX+clamped_col; EXTRAPOLATE(dx_selected_row, dx_whole_rows) EXTRAPOLATE(dx_selected_col, dx_whole_cols) - dx_data[i] = Dx[dx_selected_row * (dx_step>>2) + dx_selected_col]; + float dx_s = Dx[mad24(dx_selected_row, dx_step>>2, dx_selected_col)]; int dy_selected_row = dy_startY+i, dy_selected_col = dy_startX+clamped_col; EXTRAPOLATE(dy_selected_row, dy_whole_rows) EXTRAPOLATE(dy_selected_col, dy_whole_cols) - dy_data[i] = Dy[dy_selected_row * (dy_step>>2) + dy_selected_col]; + float dy_s = Dy[mad24(dy_selected_row, dy_step>>2, dy_selected_col)]; - data[0][i] = dx_data[i] * dx_data[i]; - data[1][i] = dx_data[i] * dy_data[i]; - data[2][i] = dy_data[i] * dy_data[i]; + data[0][i] = dx_s * dx_s; + data[1][i] = dx_s * dy_s; + data[2][i] = dy_s * dy_s; } #endif float sum0 = 0.0f, sum1 = 0.0f, sum2 = 0.0f; @@ -180,7 +178,7 @@ __kernel void corner(__global const float * Dx, int dx_step, int dx_offset, int col += anX; int posX = dst_startX - dst_x_off + col - anX; int posY = (gly << 1); - int till = (ksX + 1)%2; + int till = (ksX + 1) & 1; float tmp_sum[6] = { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; for (int k=0; k<6; k++) { @@ -210,7 +208,7 @@ __kernel void corner(__global const float * Dx, int dx_step, int dx_offset, int float a = tmp_sum[0] * 0.5f; float b = tmp_sum[2]; float c = tmp_sum[4] * 0.5f; - *(__global float *)(dst + dst_index) = (float)((a+c) - sqrt((a-c)*(a-c) + b*b)); + *(__global float *)(dst + dst_index) = (float)((a+c) - native_sqrt((a-c)*(a-c) + b*b)); } if (posX < dst_cols && (posY + 1) < dst_rows) { @@ -218,7 +216,7 @@ __kernel void corner(__global const float * Dx, int dx_step, int dx_offset, int float a = tmp_sum[1] * 0.5f; float b = tmp_sum[3]; float c = tmp_sum[5] * 0.5f; - *(__global float *)(dst + dst_index) = (float)((a+c) - sqrt((a-c)*(a-c) + b*b)); + *(__global float *)(dst + dst_index) = (float)((a+c) - native_sqrt((a-c)*(a-c) + b*b)); } #else #error "No such corners type" diff --git a/modules/imgproc/src/opencl/pyr_down.cl b/modules/imgproc/src/opencl/pyr_down.cl index b8b06b712b..2358775e7a 100644 --- a/modules/imgproc/src/opencl/pyr_down.cl +++ b/modules/imgproc/src/opencl/pyr_down.cl @@ -79,12 +79,22 @@ #define SRC(_x,_y) convertToFT(loadpix(srcData + mad24(_y, src_step, PIXSIZE * _x))) +#if kercn == 4 +#define SRC4(_x,_y) convert_float4(vload4(0, srcData + mad24(_y, src_step, PIXSIZE * _x))) +#endif + +#ifdef INTEL_DEVICE +#define MAD(x,y,z) fma((x),(y),(z)) +#else +#define MAD(x,y,z) mad((x),(y),(z)) +#endif + #define noconvert __kernel void pyrDown(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols, __global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols) { - const int x = get_global_id(0); + const int x = get_global_id(0)*kercn; const int y = get_group_id(1); __local FT smem[LOCAL_SIZE + 4]; @@ -97,98 +107,190 @@ __kernel void pyrDown(__global const uchar * src, int src_step, int src_offset, FT co3 = 0.0625f; const int src_y = 2*y; + int col; - if (src_y >= 2 && src_y < src_rows - 2 && x >= 2 && x < src_cols - 2) + if (src_y >= 2 && src_y < src_rows - 2) { - sum = co3 * SRC(x, src_y - 2); - sum = sum + co2 * SRC(x, src_y - 1); - sum = sum + co1 * SRC(x, src_y ); - sum = sum + co2 * SRC(x, src_y + 1); - sum = sum + co3 * SRC(x, src_y + 2); +#if kercn == 1 + col = EXTRAPOLATE(x, src_cols); + + sum = co3* SRC(col, src_y - 2); + sum = MAD(co2, SRC(col, src_y - 1), sum); + sum = MAD(co1, SRC(col, src_y ), sum); + sum = MAD(co2, SRC(col, src_y + 1), sum); + sum = MAD(co3, SRC(col, src_y + 2), sum); smem[2 + get_local_id(0)] = sum; +#else + if (x < src_cols-4) + { + float4 sum4; + sum4 = co3* SRC4(x, src_y - 2); + sum4 = MAD(co2, SRC4(x, src_y - 1), sum4); + sum4 = MAD(co1, SRC4(x, src_y ), sum4); + sum4 = MAD(co2, SRC4(x, src_y + 1), sum4); + sum4 = MAD(co3, SRC4(x, src_y + 2), sum4); + vstore4(sum4, get_local_id(0), (__local float*) &smem[2]); + } + else + { + for (int i=0; i<4; i++) + { + col = EXTRAPOLATE(x+i, src_cols); + sum = co3* SRC(col, src_y - 2); + sum = MAD(co2, SRC(col, src_y - 1), sum); + sum = MAD(co1, SRC(col, src_y ), sum); + sum = MAD(co2, SRC(col, src_y + 1), sum); + sum = MAD(co3, SRC(col, src_y + 2), sum); + + smem[2 + 4*get_local_id(0)+i] = sum; + } + } +#endif if (get_local_id(0) < 2) { - const int left_x = x - 2; + col = EXTRAPOLATE((int)(get_group_id(0)*LOCAL_SIZE + get_local_id(0) - 2), src_cols); - sum = co3 * SRC(left_x, src_y - 2); - sum = sum + co2 * SRC(left_x, src_y - 1); - sum = sum + co1 * SRC(left_x, src_y ); - sum = sum + co2 * SRC(left_x, src_y + 1); - sum = sum + co3 * SRC(left_x, src_y + 2); + sum = co3* SRC(col, src_y - 2); + sum = MAD(co2, SRC(col, src_y - 1), sum); + sum = MAD(co1, SRC(col, src_y ), sum); + sum = MAD(co2, SRC(col, src_y + 1), sum); + sum = MAD(co3, SRC(col, src_y + 2), sum); smem[get_local_id(0)] = sum; } - if (get_local_id(0) > LOCAL_SIZE - 3) + if (get_local_id(0) > 1 && get_local_id(0) < 4) { - const int right_x = x + 2; + col = EXTRAPOLATE((int)((get_group_id(0)+1)*LOCAL_SIZE + get_local_id(0) - 2), src_cols); - sum = co3 * SRC(right_x, src_y - 2); - sum = sum + co2 * SRC(right_x, src_y - 1); - sum = sum + co1 * SRC(right_x, src_y ); - sum = sum + co2 * SRC(right_x, src_y + 1); - sum = sum + co3 * SRC(right_x, src_y + 2); + sum = co3* SRC(col, src_y - 2); + sum = MAD(co2, SRC(col, src_y - 1), sum); + sum = MAD(co1, SRC(col, src_y ), sum); + sum = MAD(co2, SRC(col, src_y + 1), sum); + sum = MAD(co3, SRC(col, src_y + 2), sum); - smem[4 + get_local_id(0)] = sum; + smem[LOCAL_SIZE + get_local_id(0)] = sum; } } - else + else // need extrapolate y { - int col = EXTRAPOLATE(x, src_cols); +#if kercn == 1 + col = EXTRAPOLATE(x, src_cols); - sum = co3 * SRC(col, EXTRAPOLATE(src_y - 2, src_rows)); - sum = sum + co2 * SRC(col, EXTRAPOLATE(src_y - 1, src_rows)); - sum = sum + co1 * SRC(col, EXTRAPOLATE(src_y , src_rows)); - sum = sum + co2 * SRC(col, EXTRAPOLATE(src_y + 1, src_rows)); - sum = sum + co3 * SRC(col, EXTRAPOLATE(src_y + 2, src_rows)); + sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows)); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum); + sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum); + sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum); smem[2 + get_local_id(0)] = sum; +#else + if (x < src_cols-4) + { + float4 sum4; + sum4 = co3* SRC4(x, EXTRAPOLATE(src_y - 2, src_rows)); + sum4 = MAD(co2, SRC4(x, EXTRAPOLATE(src_y - 1, src_rows)), sum4); + sum4 = MAD(co1, SRC4(x, EXTRAPOLATE(src_y , src_rows)), sum4); + sum4 = MAD(co2, SRC4(x, EXTRAPOLATE(src_y + 1, src_rows)), sum4); + sum4 = MAD(co3, SRC4(x, EXTRAPOLATE(src_y + 2, src_rows)), sum4); + vstore4(sum4, get_local_id(0), (__local float*) &smem[2]); + } + else + { + for (int i=0; i<4; i++) + { + col = EXTRAPOLATE(x+i, src_cols); + sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows)); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum); + sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum); + sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum); + + smem[2 + 4*get_local_id(0)+i] = sum; + } + } +#endif if (get_local_id(0) < 2) { - col = EXTRAPOLATE(x - 2, src_cols); + col = EXTRAPOLATE((int)(get_group_id(0)*LOCAL_SIZE + get_local_id(0) - 2), src_cols); - sum = co3 * SRC(col, EXTRAPOLATE(src_y - 2, src_rows)); - sum = sum + co2 * SRC(col, EXTRAPOLATE(src_y - 1, src_rows)); - sum = sum + co1 * SRC(col, EXTRAPOLATE(src_y , src_rows)); - sum = sum + co2 * SRC(col, EXTRAPOLATE(src_y + 1, src_rows)); - sum = sum + co3 * SRC(col, EXTRAPOLATE(src_y + 2, src_rows)); + sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows)); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum); + sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum); + sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum); smem[get_local_id(0)] = sum; } - if (get_local_id(0) > LOCAL_SIZE - 3) + if (get_local_id(0) > 1 && get_local_id(0) < 4) { - col = EXTRAPOLATE(x + 2, src_cols); + col = EXTRAPOLATE((int)((get_group_id(0)+1)*LOCAL_SIZE + get_local_id(0) - 2), src_cols); - sum = co3 * SRC(col, EXTRAPOLATE(src_y - 2, src_rows)); - sum = sum + co2 * SRC(col, EXTRAPOLATE(src_y - 1, src_rows)); - sum = sum + co1 * SRC(col, EXTRAPOLATE(src_y , src_rows)); - sum = sum + co2 * SRC(col, EXTRAPOLATE(src_y + 1, src_rows)); - sum = sum + co3 * SRC(col, EXTRAPOLATE(src_y + 2, src_rows)); + sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows)); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum); + sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum); + sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum); + sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum); - smem[4 + get_local_id(0)] = sum; + smem[LOCAL_SIZE + get_local_id(0)] = sum; } } barrier(CLK_LOCAL_MEM_FENCE); +#if kercn == 1 if (get_local_id(0) < LOCAL_SIZE / 2) { const int tid2 = get_local_id(0) * 2; - sum = co3 * smem[2 + tid2 - 2]; - sum = sum + co2 * smem[2 + tid2 - 1]; - sum = sum + co1 * smem[2 + tid2 ]; - sum = sum + co2 * smem[2 + tid2 + 1]; - sum = sum + co3 * smem[2 + tid2 + 2]; + sum = 0.f; +#if cn == 1 +#if fdepth <= 5 + sum = sum + dot(vload4(0, (__local float*) (&smem)+tid2), (float4)(co3, co2, co1, co2)); +#else + sum = sum + dot(vload4(0, (__local double*) (&smem)+tid2), (double4)(co3, co2, co1, co2)); +#endif +#else + sum = MAD(co3, smem[2 + tid2 - 2], sum); + sum = MAD(co2, smem[2 + tid2 - 1], sum); + sum = MAD(co1, smem[2 + tid2 ], sum); + sum = MAD(co2, smem[2 + tid2 + 1], sum); +#endif + sum = MAD(co3, smem[2 + tid2 + 2], sum); const int dst_x = (get_group_id(0) * get_local_size(0) + tid2) / 2; if (dst_x < dst_cols) storepix(convertToT(sum), dstData + y * dst_step + dst_x * PIXSIZE); } +#else + int tid4 = get_local_id(0) * 4; + sum = co3* smem[2 + tid4 + 2]; + sum = MAD(co3, smem[2 + tid4 - 2], sum); + sum = MAD(co2, smem[2 + tid4 - 1], sum); + sum = MAD(co1, smem[2 + tid4 ], sum); + sum = MAD(co2, smem[2 + tid4 + 1], sum); + + int dst_x = (get_group_id(0) * LOCAL_SIZE + tid4) / 2; + + if (dst_x < dst_cols) + storepix(convertToT(sum), dstData + mad24(y, dst_step, dst_x * PIXSIZE)); + + tid4 += 2; + dst_x += 1; + + sum = co3* smem[2 + tid4 + 2]; + sum = MAD(co3, smem[2 + tid4 - 2], sum); + sum = MAD(co2, smem[2 + tid4 - 1], sum); + sum = MAD(co1, smem[2 + tid4 ], sum); + sum = MAD(co2, smem[2 + tid4 + 1], sum); + + if (dst_x < dst_cols) + storepix(convertToT(sum), dstData + mad24(y, dst_step, dst_x * PIXSIZE)); +#endif } diff --git a/modules/imgproc/src/opencl/remap.cl b/modules/imgproc/src/opencl/remap.cl index bd043c5e4b..4e45b40bd3 100644 --- a/modules/imgproc/src/opencl/remap.cl +++ b/modules/imgproc/src/opencl/remap.cl @@ -147,37 +147,43 @@ __kernel void remap_2_32FC1(__global const uchar * srcptr, int src_step, int src ST nVal) { int x = get_global_id(0); - int y = get_global_id(1); + int y = get_global_id(1) * rowsPerWI; - T scalar = convertScalar(nVal); - - if (x < dst_cols && y < dst_rows) + if (x < dst_cols) { - int map1_index = mad24(y, map1_step, x * (int)sizeof(float) + map1_offset); - int map2_index = mad24(y, map2_step, x * (int)sizeof(float) + map2_offset); - int dst_index = mad24(y, dst_step, x * TSIZE + dst_offset); + T scalar = convertScalar(nVal); - __global const float * map1 = (__global const float *)(map1ptr + map1_index); - __global const float * map2 = (__global const float *)(map2ptr + map2_index); - __global T * dst = (__global T *)(dstptr + dst_index); + int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(float), map1_offset)); + int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(float), map2_offset)); + int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset)); - int gx = convert_int_sat_rte(map1[0]); - int gy = convert_int_sat_rte(map2[0]); + #pragma unroll + for (int i = 0; i < rowsPerWI; ++i, ++y, + map1_index += map1_step, map2_index += map2_step, dst_index += dst_step) + if (y < dst_rows) + { + __global const float * map1 = (__global const float *)(map1ptr + map1_index); + __global const float * map2 = (__global const float *)(map2ptr + map2_index); + __global T * dst = (__global T *)(dstptr + dst_index); - if (NEED_EXTRAPOLATION(gx, gy)) - { + int gx = convert_int_sat_rte(map1[0]); + int gy = convert_int_sat_rte(map2[0]); + + if (NEED_EXTRAPOLATION(gx, gy)) + { #ifndef BORDER_CONSTANT - int2 gxy = (int2)(gx, gy); + int2 gxy = (int2)(gx, gy); #endif - T v; - EXTRAPOLATE(gxy, v) - storepix(v, dst); - } - else - { - int src_index = mad24(gy, src_step, gx * TSIZE + src_offset); - storepix(loadpix((__global const T*)(srcptr + src_index)), dst); - } + T v; + EXTRAPOLATE(gxy, v) + storepix(v, dst); + } + else + { + int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset)); + storepix(loadpix((__global const T*)(srcptr + src_index)), dst); + } + } } } @@ -187,31 +193,36 @@ __kernel void remap_32FC2(__global const uchar * srcptr, int src_step, int src_o ST nVal) { int x = get_global_id(0); - int y = get_global_id(1); + int y = get_global_id(1) * rowsPerWI; - T scalar = convertScalar(nVal); - - if (x < dst_cols && y < dst_rows) + if (x < dst_cols) { - int dst_index = mad24(y, dst_step, x * TSIZE + dst_offset); - int map_index = mad24(y, map_step, x * (int)sizeof(float2) + map_offset); + T scalar = convertScalar(nVal); + int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset)); + int map_index = mad24(y, map_step, mad24(x, (int)sizeof(float2), map_offset)); - __global const float2 * map = (__global const float2 *)(mapptr + map_index); - __global T * dst = (__global T *)(dstptr + dst_index); + #pragma unroll + for (int i = 0; i < rowsPerWI; ++i, ++y, + map_index += map_step, dst_index += dst_step) + if (y < dst_rows) + { + __global const float2 * map = (__global const float2 *)(mapptr + map_index); + __global T * dst = (__global T *)(dstptr + dst_index); - int2 gxy = convert_int2_sat_rte(map[0]); - int gx = gxy.x, gy = gxy.y; + int2 gxy = convert_int2_sat_rte(map[0]); + int gx = gxy.x, gy = gxy.y; - if (NEED_EXTRAPOLATION(gx, gy)) - { - T v; - EXTRAPOLATE(gxy, v) - storepix(v, dst); - } - else - { - int src_index = mad24(gy, src_step, gx * TSIZE + src_offset); - storepix(loadpix((__global const T *)(srcptr + src_index)), dst); + if (NEED_EXTRAPOLATION(gx, gy)) + { + T v; + EXTRAPOLATE(gxy, v) + storepix(v, dst); + } + else + { + int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset)); + storepix(loadpix((__global const T *)(srcptr + src_index)), dst); + } } } } @@ -222,32 +233,37 @@ __kernel void remap_16SC2(__global const uchar * srcptr, int src_step, int src_o ST nVal) { int x = get_global_id(0); - int y = get_global_id(1); + int y = get_global_id(1) * rowsPerWI; - T scalar = convertScalar(nVal); - - if (x < dst_cols && y < dst_rows) + if (x < dst_cols) { - int dst_index = mad24(y, dst_step, x * TSIZE + dst_offset); - int map_index = mad24(y, map_step, x * (int)sizeof(short2) + map_offset); + T scalar = convertScalar(nVal); + int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset)); + int map_index = mad24(y, map_step, mad24(x, (int)sizeof(short2), map_offset)); - __global const short2 * map = (__global const short2 *)(mapptr + map_index); - __global T * dst = (__global T *)(dstptr + dst_index); + #pragma unroll + for (int i = 0; i < rowsPerWI; ++i, ++y, + map_index += map_step, dst_index += dst_step) + if (y < dst_rows) + { + __global const short2 * map = (__global const short2 *)(mapptr + map_index); + __global T * dst = (__global T *)(dstptr + dst_index); - int2 gxy = convert_int2(map[0]); - int gx = gxy.x, gy = gxy.y; + int2 gxy = convert_int2(map[0]); + int gx = gxy.x, gy = gxy.y; - if (NEED_EXTRAPOLATION(gx, gy)) - { - T v; - EXTRAPOLATE(gxy, v) - storepix(v, dst); - } - else - { - int src_index = mad24(gy, src_step, gx * TSIZE + src_offset); - storepix(loadpix((__global const T *)(srcptr + src_index)), dst); - } + if (NEED_EXTRAPOLATION(gx, gy)) + { + T v; + EXTRAPOLATE(gxy, v) + storepix(v, dst); + } + else + { + int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset)); + storepix(loadpix((__global const T *)(srcptr + src_index)), dst); + } + } } } @@ -258,41 +274,54 @@ __kernel void remap_16SC2_16UC1(__global const uchar * srcptr, int src_step, int ST nVal) { int x = get_global_id(0); - int y = get_global_id(1); + int y = get_global_id(1) * rowsPerWI; - T scalar = convertScalar(nVal); - - if (x < dst_cols && y < dst_rows) + if (x < dst_cols) { - int dst_index = mad24(y, dst_step, x * TSIZE + dst_offset); - int map1_index = mad24(y, map1_step, x * (int)sizeof(short2) + map1_offset); - int map2_index = mad24(y, map2_step, x * (int)sizeof(ushort) + map2_offset); + T scalar = convertScalar(nVal); + int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset)); + int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(short2), map1_offset)); + int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(ushort), map2_offset)); - __global const short2 * map1 = (__global const short2 *)(map1ptr + map1_index); - __global const ushort * map2 = (__global const ushort *)(map2ptr + map2_index); - __global T * dst = (__global T *)(dstptr + dst_index); + #pragma unroll + for (int i = 0; i < rowsPerWI; ++i, ++y, + map1_index += map1_step, map2_index += map2_step, dst_index += dst_step) + if (y < dst_rows) + { + __global const short2 * map1 = (__global const short2 *)(map1ptr + map1_index); + __global const ushort * map2 = (__global const ushort *)(map2ptr + map2_index); + __global T * dst = (__global T *)(dstptr + dst_index); - int map2Value = convert_int(map2[0]) & (INTER_TAB_SIZE2 - 1); - int dx = (map2Value & (INTER_TAB_SIZE - 1)) < (INTER_TAB_SIZE >> 1) ? 1 : 0; - int dy = (map2Value >> INTER_BITS) < (INTER_TAB_SIZE >> 1) ? 1 : 0; - int2 gxy = convert_int2(map1[0]) + (int2)(dx, dy); - int gx = gxy.x, gy = gxy.y; + int map2Value = convert_int(map2[0]) & (INTER_TAB_SIZE2 - 1); + int dx = (map2Value & (INTER_TAB_SIZE - 1)) < (INTER_TAB_SIZE >> 1) ? 1 : 0; + int dy = (map2Value >> INTER_BITS) < (INTER_TAB_SIZE >> 1) ? 1 : 0; + int2 gxy = convert_int2(map1[0]) + (int2)(dx, dy); + int gx = gxy.x, gy = gxy.y; - if (NEED_EXTRAPOLATION(gx, gy)) - { - T v; - EXTRAPOLATE(gxy, v) - storepix(v, dst); - } - else - { - int src_index = mad24(gy, src_step, gx * TSIZE + src_offset); - storepix(loadpix((__global const T *)(srcptr + src_index)), dst); - } + if (NEED_EXTRAPOLATION(gx, gy)) + { + T v; + EXTRAPOLATE(gxy, v) + storepix(v, dst); + } + else + { + int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset)); + storepix(loadpix((__global const T *)(srcptr + src_index)), dst); + } + } } } -#elif INTER_LINEAR +#elif defined INTER_LINEAR + +__constant float coeffs[64] = +{ 1.000000f, 0.000000f, 0.968750f, 0.031250f, 0.937500f, 0.062500f, 0.906250f, 0.093750f, 0.875000f, 0.125000f, 0.843750f, 0.156250f, + 0.812500f, 0.187500f, 0.781250f, 0.218750f, 0.750000f, 0.250000f, 0.718750f, 0.281250f, 0.687500f, 0.312500f, 0.656250f, 0.343750f, + 0.625000f, 0.375000f, 0.593750f, 0.406250f, 0.562500f, 0.437500f, 0.531250f, 0.468750f, 0.500000f, 0.500000f, 0.468750f, 0.531250f, + 0.437500f, 0.562500f, 0.406250f, 0.593750f, 0.375000f, 0.625000f, 0.343750f, 0.656250f, 0.312500f, 0.687500f, 0.281250f, 0.718750f, + 0.250000f, 0.750000f, 0.218750f, 0.781250f, 0.187500f, 0.812500f, 0.156250f, 0.843750f, 0.125000f, 0.875000f, 0.093750f, 0.906250f, + 0.062500f, 0.937500f, 0.031250f, 0.968750f }; __kernel void remap_16SC2_16UC1(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols, __global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols, @@ -301,54 +330,60 @@ __kernel void remap_16SC2_16UC1(__global const uchar * srcptr, int src_step, int ST nVal) { int x = get_global_id(0); - int y = get_global_id(1); + int y = get_global_id(1) * rowsPerWI; - if (x < dst_cols && y < dst_rows) + if (x < dst_cols) { - int dst_index = mad24(y, dst_step, x * TSIZE + dst_offset); - int map1_index = mad24(y, map1_step, x * (int)sizeof(short2) + map1_offset); - int map2_index = mad24(y, map2_step, x * (int)sizeof(ushort) + map2_offset); - - __global const short2 * map1 = (__global const short2 *)(map1ptr + map1_index); - __global const ushort * map2 = (__global const ushort *)(map2ptr + map2_index); - __global T * dst = (__global T *)(dstptr + dst_index); - - int2 map_dataA = convert_int2(map1[0]); - int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y); - int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1); - int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1); - - ushort map2Value = (ushort)(map2[0] & (INTER_TAB_SIZE2 - 1)); - WT2 u = (WT2)(map2Value & (INTER_TAB_SIZE - 1), map2Value >> INTER_BITS) / (WT2)(INTER_TAB_SIZE); - WT scalar = convertToWT(convertScalar(nVal)); - WT a = scalar, b = scalar, c = scalar, d = scalar; + int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset)); + int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(short2), map1_offset)); + int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(ushort), map2_offset)); - if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y)) - a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataA, a); + #pragma unroll + for (int i = 0; i < rowsPerWI; ++i, ++y, + map1_index += map1_step, map2_index += map2_step, dst_index += dst_step) + if (y < dst_rows) + { + __global const short2 * map1 = (__global const short2 *)(map1ptr + map1_index); + __global const ushort * map2 = (__global const ushort *)(map2ptr + map2_index); + __global T * dst = (__global T *)(dstptr + dst_index); - if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y)) - b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataB, b); + int2 map_dataA = convert_int2(map1[0]); + int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y); + int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1); + int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1); - if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y)) - c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataC, c); + ushort map2Value = (ushort)(map2[0] & (INTER_TAB_SIZE2 - 1)); + WT2 u = (WT2)(map2Value & (INTER_TAB_SIZE - 1), map2Value >> INTER_BITS) / (WT2)(INTER_TAB_SIZE); - if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y)) - d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataD, d); + WT a = scalar, b = scalar, c = scalar, d = scalar; - WT dst_data = a * (1 - u.x) * (1 - u.y) + - b * (u.x) * (1 - u.y) + - c * (1 - u.x) * (u.y) + - d * (u.x) * (u.y); - storepix(convertToT(dst_data), dst); + if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y)) + a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataA, a); + + if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y)) + b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataB, b); + + if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y)) + c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataC, c); + + if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y)) + d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataD, d); + + WT dst_data = a * (1 - u.x) * (1 - u.y) + + b * (u.x) * (1 - u.y) + + c * (1 - u.x) * (u.y) + + d * (u.x) * (u.y); + storepix(convertToT(dst_data), dst); + } } } @@ -359,55 +394,106 @@ __kernel void remap_2_32FC1(__global const uchar * srcptr, int src_step, int src ST nVal) { int x = get_global_id(0); - int y = get_global_id(1); + int y = get_global_id(1) * rowsPerWI; - if (x < dst_cols && y < dst_rows) + if (x < dst_cols) { - int dst_index = mad24(y, dst_step, x * TSIZE + dst_offset); - int map1_index = mad24(y, map1_step, x * (int)sizeof(float) + map1_offset); - int map2_index = mad24(y, map2_step, x * (int)sizeof(float) + map2_offset); - - __global const float * map1 = (__global const float *)(map1ptr + map1_index); - __global const float * map2 = (__global const float *)(map2ptr + map2_index); - __global T * dst = (__global T *)(dstptr + dst_index); - - float2 map_data = (float2)(map1[0], map2[0]); - - int2 map_dataA = convert_int2_sat_rtn(map_data); - int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y); - int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1); - int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1); - - float2 _u = map_data - convert_float2(map_dataA); - WT2 u = convertToWT2(convert_int2_rte(convertToWT2(_u) * (WT2)INTER_TAB_SIZE)) / (WT2)INTER_TAB_SIZE; WT scalar = convertToWT(convertScalar(nVal)); - WT a = scalar, b = scalar, c = scalar, d = scalar; + int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset)); + int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(float), map1_offset)); + int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(float), map2_offset)); - if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y)) - a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataA, a); + #pragma unroll + for (int i = 0; i < rowsPerWI; ++i, ++y, + map1_index += map1_step, map2_index += map2_step, dst_index += dst_step) + if (y < dst_rows) + { + __global const float * map1 = (__global const float *)(map1ptr + map1_index); + __global const float * map2 = (__global const float *)(map2ptr + map2_index); + __global T * dst = (__global T *)(dstptr + dst_index); - if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y)) - b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataB, b); +#if defined BORDER_CONSTANT - if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y)) - c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataC, c); + float xf = map1[0], yf = map2[0]; + int sx = convert_int_sat_rtn(xf), sy = convert_int_sat_rtn(yf); - if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y)) - d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataD, d); + __constant float * coeffs_x = coeffs + ((convert_int_rte(xf * INTER_TAB_SIZE) & (INTER_TAB_SIZE - 1)) << 1); + __constant float * coeffs_y = coeffs + ((convert_int_rte(yf * INTER_TAB_SIZE) & (INTER_TAB_SIZE - 1)) << 1); - WT dst_data = a * (1 - u.x) * (1 - u.y) + - b * (u.x) * (1 - u.y) + - c * (1 - u.x) * (u.y) + - d * (u.x) * (u.y); - storepix(convertToT(dst_data), dst); + WT sum = (WT)(0), xsum; + int src_index = mad24(sy, src_step, mad24(sx, TSIZE, src_offset)); + + #pragma unroll + for (int yp = 0; yp < 2; ++yp, src_index += src_step) + { + if (sy + yp >= 0 && sy + yp < src_rows) + { + xsum = (WT)(0); + if (sx >= 0 && sx + 2 < src_cols) + { +#if depth == 0 && cn == 1 + uchar2 value = vload2(0, srcptr + src_index); + xsum = dot(convert_float2(value), (float2)(coeffs_x[0], coeffs_x[1])); +#else + #pragma unroll + for (int xp = 0; xp < 2; ++xp) + xsum = fma(convertToWT(loadpix(srcptr + mad24(xp, TSIZE, src_index))), coeffs_x[xp], xsum); +#endif + } + else + { + #pragma unroll + for (int xp = 0; xp < 2; ++xp) + xsum = fma(sx + xp >= 0 && sx + xp < src_cols ? + convertToWT(loadpix(srcptr + mad24(xp, TSIZE, src_index))) : scalar, coeffs_x[xp], xsum); + } + sum = fma(xsum, coeffs_y[yp], sum); + } + else + sum = fma(scalar, coeffs_y[yp], sum); + } + + storepix(convertToT(sum), dst); +#else + float2 map_data = (float2)(map1[0], map2[0]); + + int2 map_dataA = convert_int2_sat_rtn(map_data); + int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y); + int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1); + int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1); + + float2 _u = map_data - convert_float2(map_dataA); + WT2 u = convertToWT2(convert_int2_rte(convertToWT2(_u) * (WT2)INTER_TAB_SIZE)) / (WT2)INTER_TAB_SIZE; + WT scalar = convertToWT(convertScalar(nVal)); + WT a = scalar, b = scalar, c = scalar, d = scalar; + + if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y)) + a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataA, a); + + if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y)) + b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataB, b); + + if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y)) + c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataC, c); + + if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y)) + d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataD, d); + + WT dst_data = a * (1 - u.x) * (1 - u.y) + + b * (u.x) * (1 - u.y) + + c * (1 - u.x) * (u.y) + + d * (u.x) * (u.y); + storepix(convertToT(dst_data), dst); +#endif + } } } @@ -417,52 +503,58 @@ __kernel void remap_32FC2(__global const uchar * srcptr, int src_step, int src_o ST nVal) { int x = get_global_id(0); - int y = get_global_id(1); + int y = get_global_id(1) * rowsPerWI; - if (x < dst_cols && y < dst_rows) + if (x < dst_cols) { - int dst_index = mad24(y, dst_step, x * TSIZE + dst_offset); - int map_index = mad24(y, map_step, x * (int)sizeof(float2) + map_offset); - - __global const float2 * map = (__global const float2 *)(mapptr + map_index); - __global T * dst = (__global T *)(dstptr + dst_index); - - float2 map_data = map[0]; - int2 map_dataA = convert_int2_sat_rtn(map_data); - int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y); - int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1); - int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1); - - float2 _u = map_data - convert_float2(map_dataA); - WT2 u = convertToWT2(convert_int2_rte(convertToWT2(_u) * (WT2)INTER_TAB_SIZE)) / (WT2)INTER_TAB_SIZE; WT scalar = convertToWT(convertScalar(nVal)); - WT a = scalar, b = scalar, c = scalar, d = scalar; + int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset)); + int map_index = mad24(y, map_step, mad24(x, (int)sizeof(float2), map_offset)); - if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y)) - a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataA, a); + #pragma unroll + for (int i = 0; i < rowsPerWI; ++i, ++y, + map_index += map_step, dst_index += dst_step) + if (y < dst_rows) + { + __global const float2 * map = (__global const float2 *)(mapptr + map_index); + __global T * dst = (__global T *)(dstptr + dst_index); - if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y)) - b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataB, b); + float2 map_data = map[0]; + int2 map_dataA = convert_int2_sat_rtn(map_data); + int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y); + int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1); + int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1); - if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y)) - c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataC, c); + float2 _u = map_data - convert_float2(map_dataA); + WT2 u = convertToWT2(convert_int2_rte(convertToWT2(_u) * (WT2)INTER_TAB_SIZE)) / (WT2)INTER_TAB_SIZE; + WT a = scalar, b = scalar, c = scalar, d = scalar; - if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y)) - d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset)))); - else - EXTRAPOLATE(map_dataD, d); + if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y)) + a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataA, a); - WT dst_data = a * (1 - u.x) * (1 - u.y) + - b * (u.x) * (1 - u.y) + - c * (1 - u.x) * (u.y) + - d * (u.x) * (u.y); - storepix(convertToT(dst_data), dst); + if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y)) + b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataB, b); + + if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y)) + c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataC, c); + + if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y)) + d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset)))); + else + EXTRAPOLATE(map_dataD, d); + + WT dst_data = a * (1 - u.x) * (1 - u.y) + + b * (u.x) * (1 - u.y) + + c * (1 - u.x) * (u.y) + + d * (u.x) * (u.y); + storepix(convertToT(dst_data), dst); + } } } diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index 1e4f89cc64..cbbe399301 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -405,10 +405,10 @@ typedef void (*PyrFunc)(const Mat&, Mat&, int); static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType) { - int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type); + int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0; - if (channels > 4 || (depth == CV_64F && !doubleSupport)) + if (cn > 4 || (depth == CV_64F && !doubleSupport)) return false; Size ssize = _src.size(); @@ -423,17 +423,20 @@ static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, in int float_depth = depth == CV_64F ? CV_64F : CV_32F; const int local_size = 256; + int kercn = 1; + if (depth == CV_8U && float_depth == CV_32F && cn == 1 && ocl::Device::getDefault().isIntel()) + kercn = 4; const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101" }; char cvt[2][50]; String buildOptions = format( "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s " - "-D T1=%s -D cn=%d -D %s -D LOCAL_SIZE=%d", - ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)), - ocl::convertTypeStr(float_depth, depth, channels, cvt[0]), - ocl::convertTypeStr(depth, float_depth, channels, cvt[1]), - doubleSupport ? " -D DOUBLE_SUPPORT" : "", - ocl::typeToStr(depth), channels, borderMap[borderType], local_size + "-D T1=%s -D cn=%d -D kercn=%d -D fdepth=%d -D %s -D LOCAL_SIZE=%d", + ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, cn)), + ocl::convertTypeStr(float_depth, depth, cn, cvt[0]), + ocl::convertTypeStr(depth, float_depth, cn, cvt[1]), + doubleSupport ? " -D DOUBLE_SUPPORT" : "", ocl::typeToStr(depth), + cn, kercn, float_depth, borderMap[borderType], local_size ); ocl::Kernel k("pyrDown", ocl::imgproc::pyr_down_oclsrc, buildOptions); if (k.empty()) @@ -441,8 +444,8 @@ static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, in k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst)); - size_t localThreads[2] = { local_size, 1 }; - size_t globalThreads[2] = { src.cols, dst.rows }; + size_t localThreads[2] = { local_size/kercn, 1 }; + size_t globalThreads[2] = { (src.cols + (kercn-1))/kercn, dst.rows }; return k.run(2, globalThreads, localThreads, false); } diff --git a/modules/imgproc/test/ocl/test_warp.cpp b/modules/imgproc/test/ocl/test_warp.cpp index 416bd523ed..53d82187f9 100644 --- a/modules/imgproc/test/ocl/test_warp.cpp +++ b/modules/imgproc/test/ocl/test_warp.cpp @@ -267,7 +267,7 @@ PARAM_TEST_CASE(Remap, MatDepth, Channels, std::pair, BorderTy Border map1Border = randomBorder(0, useRoi ? MAX_VALUE : 0); randomSubMat(map1, map1_roi, dstROISize, map1Border, map1Type, -mapMaxValue, mapMaxValue); - Border map2Border = randomBorder(0, useRoi ? MAX_VALUE : 0); + Border map2Border = randomBorder(0, useRoi ? MAX_VALUE + 1 : 0); if (map2Type != noType) { int mapMinValue = -mapMaxValue; diff --git a/modules/imgproc/test/test_histograms.cpp b/modules/imgproc/test/test_histograms.cpp index 19ccc656b5..e9db6fcde0 100644 --- a/modules/imgproc/test/test_histograms.cpp +++ b/modules/imgproc/test/test_histograms.cpp @@ -948,7 +948,7 @@ int CV_ThreshHistTest::validate_test_results( int /*test_case_idx*/ ) class CV_CompareHistTest : public CV_BaseHistTest { public: - enum { MAX_METHOD = 5 }; + enum { MAX_METHOD = 6 }; CV_CompareHistTest(); protected: @@ -1021,6 +1021,13 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ ) sq0 += v0*v0; sq1 += v1*v1; result0[CV_COMP_BHATTACHARYYA] += sqrt(v0*v1); + { + if( fabs(v0) <= DBL_EPSILON ) + continue; + if( fabs(v1) <= DBL_EPSILON ) + v1 = 1e-10; + result0[CV_COMP_KL_DIV] += v0 * std::log( v0 / v1 ); + } } } else @@ -1046,6 +1053,13 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ ) s0 += v0; sq0 += v0*v0; result0[CV_COMP_BHATTACHARYYA] += sqrt(v0*v1); + { + if (v0 <= DBL_EPSILON) + continue; + if (!v1) + v1 = 1e-10; + result0[CV_COMP_KL_DIV] += v0 * std::log( v0 / v1 ); + } } for( node = cvInitSparseMatIterator( sparse1, &iterator ); @@ -1076,7 +1090,8 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ ) i == CV_COMP_CHISQR_ALT ? "Alternative Chi-Square" : i == CV_COMP_CORREL ? "Correlation" : i == CV_COMP_INTERSECT ? "Intersection" : - i == CV_COMP_BHATTACHARYYA ? "Bhattacharyya" : "Unknown"; + i == CV_COMP_BHATTACHARYYA ? "Bhattacharyya" : + i == CV_COMP_KL_DIV ? "Kullback-Leibler" : "Unknown"; if( cvIsNaN(v) || cvIsInf(v) ) { diff --git a/modules/imgproc/test/test_precomp.hpp b/modules/imgproc/test/test_precomp.hpp index 53f315ee4f..249ec8d629 100644 --- a/modules/imgproc/test/test_precomp.hpp +++ b/modules/imgproc/test/test_precomp.hpp @@ -13,7 +13,7 @@ #include "opencv2/ts.hpp" #include "opencv2/core/private.hpp" #include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc/imgproc_c.h" diff --git a/modules/java/CMakeLists.txt b/modules/java/CMakeLists.txt index 1948e21141..3e95b52ab4 100644 --- a/modules/java/CMakeLists.txt +++ b/modules/java/CMakeLists.txt @@ -6,7 +6,7 @@ if(IOS OR NOT PYTHON_EXECUTABLE OR NOT ANT_EXECUTABLE OR NOT (JNI_FOUND OR (ANDR endif() set(the_description "The java bindings") -ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_highgui opencv_ml opencv_calib3d opencv_photo opencv_nonfree opencv_contrib) +ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_imgcodecs opencv_videoio opencv_ml opencv_calib3d opencv_photo opencv_nonfree opencv_contrib) ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/generator/src/cpp") if(NOT ANDROID) diff --git a/modules/java/android_test/src/org/opencv/test/OpenCVTestCase.java b/modules/java/android_test/src/org/opencv/test/OpenCVTestCase.java index 97cdca783a..78eb738cb5 100644 --- a/modules/java/android_test/src/org/opencv/test/OpenCVTestCase.java +++ b/modules/java/android_test/src/org/opencv/test/OpenCVTestCase.java @@ -21,7 +21,7 @@ import org.opencv.core.Scalar; import org.opencv.core.Size; import org.opencv.core.DMatch; import org.opencv.core.KeyPoint; -import org.opencv.highgui.Highgui; +import org.opencv.imgcodecs.Imgcodecs; import android.util.Log; @@ -134,8 +134,8 @@ public class OpenCVTestCase extends TestCase { rgba0 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(0)); rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128)); - rgbLena = Highgui.imread(OpenCVTestRunner.LENA_PATH); - grayChess = Highgui.imread(OpenCVTestRunner.CHESS_PATH, 0); + rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH); + grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0); v1 = new Mat(1, 3, CvType.CV_32F); v1.put(0, 0, 1.0, 3.0, 2.0); diff --git a/modules/java/android_test/src/org/opencv/test/android/UtilsTest.java b/modules/java/android_test/src/org/opencv/test/android/UtilsTest.java index ac20139104..5494513158 100644 --- a/modules/java/android_test/src/org/opencv/test/android/UtilsTest.java +++ b/modules/java/android_test/src/org/opencv/test/android/UtilsTest.java @@ -5,7 +5,7 @@ import org.opencv.core.Core; import org.opencv.core.CvType; import org.opencv.core.Mat; import org.opencv.core.Scalar; -import org.opencv.highgui.Highgui; +import org.opencv.imgcodecs.Imgcodecs; import org.opencv.imgproc.Imgproc; import org.opencv.test.OpenCVTestCase; import org.opencv.test.OpenCVTestRunner; @@ -57,7 +57,7 @@ public class UtilsTest extends OpenCVTestCase { } public void testMatToBitmap() { - Mat imgBGR = Highgui.imread( OpenCVTestRunner.LENA_PATH ); + Mat imgBGR = Imgcodecs.imread( OpenCVTestRunner.LENA_PATH ); assertTrue(imgBGR != null && !imgBGR.empty() && imgBGR.channels() == 3); Mat m16 = new Mat(imgBGR.rows(), imgBGR.cols(), CvType.CV_8UC4); diff --git a/modules/java/android_test/src/org/opencv/test/features2d/Features2dTest.java b/modules/java/android_test/src/org/opencv/test/features2d/Features2dTest.java index 2118b53065..b9e8983fdc 100644 --- a/modules/java/android_test/src/org/opencv/test/features2d/Features2dTest.java +++ b/modules/java/android_test/src/org/opencv/test/features2d/Features2dTest.java @@ -18,7 +18,7 @@ import org.opencv.features2d.DescriptorMatcher; import org.opencv.features2d.FeatureDetector; import org.opencv.features2d.Features2d; import org.opencv.core.KeyPoint; -import org.opencv.highgui.Highgui; +import org.opencv.imgcodecs.Imgcodecs; import org.opencv.test.OpenCVTestCase; import org.opencv.test.OpenCVTestRunner; @@ -93,7 +93,7 @@ public class Features2dTest extends OpenCVTestCase { writeFile(extractorCfgFile, extractorCfg); extractor.read(extractorCfgFile); - Mat imgTrain = Highgui.imread(OpenCVTestRunner.LENA_PATH, Highgui.CV_LOAD_IMAGE_GRAYSCALE); + Mat imgTrain = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE); Mat imgQuery = imgTrain.submat(new Range(0, imgTrain.rows() - 100), Range.all()); MatOfKeyPoint trainKeypoints = new MatOfKeyPoint(); @@ -139,7 +139,7 @@ public class Features2dTest extends OpenCVTestCase { Mat outimg = new Mat(); Features2d.drawMatches(imgQuery, queryKeypoints, imgTrain, trainKeypoints, matches, outimg); String outputPath = OpenCVTestRunner.getOutputFileName("PTODresult.png"); - Highgui.imwrite(outputPath, outimg); + Imgcodecs.imwrite(outputPath, outimg); // OpenCVTestRunner.Log("Output image is saved to: " + outputPath); } } diff --git a/modules/java/android_test/src/org/opencv/test/highgui/HighguiTest.java b/modules/java/android_test/src/org/opencv/test/highgui/HighguiTest.java index b8e7c3b8b2..312462c3a5 100644 --- a/modules/java/android_test/src/org/opencv/test/highgui/HighguiTest.java +++ b/modules/java/android_test/src/org/opencv/test/highgui/HighguiTest.java @@ -2,7 +2,7 @@ package org.opencv.test.highgui; import org.opencv.core.MatOfByte; import org.opencv.core.MatOfInt; -import org.opencv.highgui.Highgui; +import org.opencv.imgcodecs.Imgcodecs; import org.opencv.test.OpenCVTestCase; import org.opencv.test.OpenCVTestRunner; @@ -15,29 +15,29 @@ public class HighguiTest extends OpenCVTestCase { public void testImencodeStringMatListOfByte() { MatOfByte buff = new MatOfByte(); assertEquals(0, buff.total()); - assertTrue( Highgui.imencode(".jpg", gray127, buff) ); + assertTrue( Imgcodecs.imencode(".jpg", gray127, buff) ); assertFalse(0 == buff.total()); } public void testImencodeStringMatListOfByteListOfInteger() { - MatOfInt params40 = new MatOfInt(Highgui.IMWRITE_JPEG_QUALITY, 40); - MatOfInt params90 = new MatOfInt(Highgui.IMWRITE_JPEG_QUALITY, 90); + MatOfInt params40 = new MatOfInt(Imgcodecs.IMWRITE_JPEG_QUALITY, 40); + MatOfInt params90 = new MatOfInt(Imgcodecs.IMWRITE_JPEG_QUALITY, 90); /* or MatOfInt params = new MatOfInt(); - params.fromArray(Highgui.IMWRITE_JPEG_QUALITY, 40); + params.fromArray(Imgcodecs.IMWRITE_JPEG_QUALITY, 40); */ MatOfByte buff40 = new MatOfByte(); MatOfByte buff90 = new MatOfByte(); - assertTrue( Highgui.imencode(".jpg", rgbLena, buff40, params40) ); - assertTrue( Highgui.imencode(".jpg", rgbLena, buff90, params90) ); + assertTrue( Imgcodecs.imencode(".jpg", rgbLena, buff40, params40) ); + assertTrue( Imgcodecs.imencode(".jpg", rgbLena, buff90, params90) ); assertTrue(buff40.total() > 0); assertTrue(buff40.total() < buff90.total()); } public void testImreadString() { - dst = Highgui.imread(OpenCVTestRunner.LENA_PATH); + dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH); assertTrue(!dst.empty()); assertEquals(3, dst.channels()); assertTrue(512 == dst.cols()); @@ -45,7 +45,7 @@ public class HighguiTest extends OpenCVTestCase { } public void testImreadStringInt() { - dst = Highgui.imread(OpenCVTestRunner.LENA_PATH, 0); + dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, 0); assertTrue(!dst.empty()); assertEquals(1, dst.channels()); assertTrue(512 == dst.cols()); diff --git a/modules/java/android_test/src/org/opencv/test/highgui/VideoCaptureTest.java b/modules/java/android_test/src/org/opencv/test/highgui/VideoCaptureTest.java index ec7211a294..f4cccdf5ce 100644 --- a/modules/java/android_test/src/org/opencv/test/highgui/VideoCaptureTest.java +++ b/modules/java/android_test/src/org/opencv/test/highgui/VideoCaptureTest.java @@ -3,8 +3,8 @@ package org.opencv.test.highgui; import java.util.List; import org.opencv.core.Size; -import org.opencv.highgui.Highgui; -import org.opencv.highgui.VideoCapture; +import org.opencv.videoio.Videoio; +import org.opencv.videoio.VideoCapture; import org.opencv.test.OpenCVTestCase; @@ -26,8 +26,8 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testGet() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); - double frameWidth = capture.get(Highgui.CV_CAP_PROP_FRAME_WIDTH); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); + double frameWidth = capture.get(Videoio.CV_CAP_PROP_FRAME_WIDTH); assertTrue(0 != frameWidth); } finally { if (capture != null) capture.release(); @@ -36,7 +36,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testGetSupportedPreviewSizes() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); List sizes = capture.getSupportedPreviewSizes(); assertNotNull(sizes); assertFalse(sizes.isEmpty()); @@ -53,7 +53,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testGrabFromRealCamera() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); isSucceed = capture.grab(); assertTrue(isSucceed); } finally { @@ -68,7 +68,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testIsOpenedRealCamera() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); isOpened = capture.isOpened(); assertTrue(isOpened); } finally { @@ -79,7 +79,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testOpen() { try { capture = new VideoCapture(); - capture.open(Highgui.CV_CAP_ANDROID); + capture.open(Videoio.CV_CAP_ANDROID); isOpened = capture.isOpened(); assertTrue(isOpened); } finally { @@ -89,7 +89,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testRead() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); isSucceed = capture.read(dst); assertTrue(isSucceed); assertFalse(dst.empty()); @@ -101,7 +101,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testRelease() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); capture.release(); assertFalse(capture.isOpened()); capture = null; @@ -112,7 +112,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testRetrieveMat() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); capture.grab(); isSucceed = capture.retrieve(dst); assertTrue(isSucceed); @@ -125,9 +125,9 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testRetrieveMatInt() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); capture.grab(); - isSucceed = capture.retrieve(dst, Highgui.CV_CAP_ANDROID_GREY_FRAME); + isSucceed = capture.retrieve(dst, Videoio.CV_CAP_ANDROID_GREY_FRAME); assertTrue(isSucceed); assertFalse(dst.empty()); assertEquals(1, dst.channels()); @@ -138,10 +138,10 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testSet() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); - capture.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, 640); - capture.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, 480); - double frameWidth = capture.get(Highgui.CV_CAP_PROP_FRAME_WIDTH); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); + capture.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, 640); + capture.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, 480); + double frameWidth = capture.get(Videoio.CV_CAP_PROP_FRAME_WIDTH); capture.read(dst); assertEquals(640.0, frameWidth); assertEquals(640, dst.cols()); @@ -158,7 +158,7 @@ public class VideoCaptureTest extends OpenCVTestCase { public void testVideoCaptureInt() { try { - capture = new VideoCapture(Highgui.CV_CAP_ANDROID); + capture = new VideoCapture(Videoio.CV_CAP_ANDROID); assertNotNull(capture); assertTrue(capture.isOpened()); } finally { diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index cce2708284..0f317d5248 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -11,7 +11,7 @@ except: class_ignore_list = ( #core "FileNode", "FileStorage", "KDTree", "KeyPoint", "DMatch", - #highgui + #videoio "VideoWriter", ) @@ -536,13 +536,13 @@ JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1getTextSize """\n private static native String getSupportedPreviewSizes_0(long nativeObj);\n""", 'cpp_code' : """ -JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10 +JNIEXPORT jstring JNICALL Java_org_opencv_videoio_VideoCapture_getSupportedPreviewSizes_10 (JNIEnv *env, jclass, jlong self); -JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10 +JNIEXPORT jstring JNICALL Java_org_opencv_videoio_VideoCapture_getSupportedPreviewSizes_10 (JNIEnv *env, jclass, jlong self) { - static const char method_name[] = "highgui::VideoCapture_getSupportedPreviewSizes_10()"; + static const char method_name[] = "videoio::VideoCapture_getSupportedPreviewSizes_10()"; try { LOGD("%s", method_name); VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL diff --git a/modules/java/generator/rst_parser.py b/modules/java/generator/rst_parser.py index 750d6f0be8..80b09ac40b 100755 --- a/modules/java/generator/rst_parser.py +++ b/modules/java/generator/rst_parser.py @@ -2,7 +2,7 @@ from __future__ import print_function import os, sys, re, string, fnmatch -allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"] +allmodules = ["core", "flann", "imgproc", "ml", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"] verbose = False show_warnings = True show_errors = True diff --git a/modules/java/generator/src/java/android+CameraBridgeViewBase.java b/modules/java/generator/src/java/android+CameraBridgeViewBase.java index c0c9f5bde7..67a7489cfd 100644 --- a/modules/java/generator/src/java/android+CameraBridgeViewBase.java +++ b/modules/java/generator/src/java/android+CameraBridgeViewBase.java @@ -6,7 +6,7 @@ import org.opencv.R; import org.opencv.android.Utils; import org.opencv.core.Mat; import org.opencv.core.Size; -import org.opencv.highgui.Highgui; +import org.opencv.videoio.Videoio; import android.app.Activity; import android.app.AlertDialog; @@ -46,7 +46,7 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac protected int mMaxHeight; protected int mMaxWidth; protected float mScale = 0; - protected int mPreviewFormat = Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA; + protected int mPreviewFormat = Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA; protected int mCameraIndex = CAMERA_ID_ANY; protected boolean mEnabled; protected FpsMeter mFpsMeter = null; @@ -151,10 +151,10 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac public Mat onCameraFrame(CvCameraViewFrame inputFrame) { Mat result = null; switch (mPreviewFormat) { - case Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA: + case Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA: result = mOldStyleListener.onCameraFrame(inputFrame.rgba()); break; - case Highgui.CV_CAP_ANDROID_GREY_FRAME: + case Videoio.CV_CAP_ANDROID_GREY_FRAME: result = mOldStyleListener.onCameraFrame(inputFrame.gray()); break; default: @@ -168,7 +168,7 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac mPreviewFormat = format; } - private int mPreviewFormat = Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA; + private int mPreviewFormat = Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA; private CvCameraViewListener mOldStyleListener; }; diff --git a/modules/java/generator/src/java/android+NativeCameraView.java b/modules/java/generator/src/java/android+NativeCameraView.java index db146d8aed..47d6a27c23 100644 --- a/modules/java/generator/src/java/android+NativeCameraView.java +++ b/modules/java/generator/src/java/android+NativeCameraView.java @@ -2,8 +2,8 @@ package org.opencv.android; import org.opencv.core.Mat; import org.opencv.core.Size; -import org.opencv.highgui.Highgui; -import org.opencv.highgui.VideoCapture; +import org.opencv.videoio.Videoio; +import org.opencv.videoio.VideoCapture; import android.content.Context; import android.util.AttributeSet; @@ -88,9 +88,9 @@ public class NativeCameraView extends CameraBridgeViewBase { synchronized (this) { if (mCameraIndex == -1) - mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID); + mCamera = new VideoCapture(Videoio.CV_CAP_ANDROID); else - mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID + mCameraIndex); + mCamera = new VideoCapture(Videoio.CV_CAP_ANDROID + mCameraIndex); if (mCamera == null) return false; @@ -119,8 +119,8 @@ public class NativeCameraView extends CameraBridgeViewBase { AllocateCache(); - mCamera.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, frameSize.width); - mCamera.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, frameSize.height); + mCamera.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, frameSize.width); + mCamera.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, frameSize.height); } Log.i(TAG, "Selected camera frame size = (" + mFrameWidth + ", " + mFrameHeight + ")"); @@ -139,13 +139,13 @@ public class NativeCameraView extends CameraBridgeViewBase { @Override public Mat rgba() { - mCapture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA); + mCapture.retrieve(mRgba, Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA); return mRgba; } @Override public Mat gray() { - mCapture.retrieve(mGray, Highgui.CV_CAP_ANDROID_GREY_FRAME); + mCapture.retrieve(mGray, Videoio.CV_CAP_ANDROID_GREY_FRAME); return mGray; } diff --git a/modules/java/generator/src/java/android+Utils.java b/modules/java/generator/src/java/android+Utils.java index 9c461087c4..404c986da8 100644 --- a/modules/java/generator/src/java/android+Utils.java +++ b/modules/java/generator/src/java/android+Utils.java @@ -6,7 +6,7 @@ import android.graphics.Bitmap; import org.opencv.core.CvException; import org.opencv.core.CvType; import org.opencv.core.Mat; -import org.opencv.highgui.Highgui; +import org.opencv.imgcodecs.Imgcodecs; import java.io.ByteArrayOutputStream; import java.io.File; @@ -67,7 +67,7 @@ public class Utils { encoded.put(0, 0, os.toByteArray()); os.close(); - Mat decoded = Highgui.imdecode(encoded, flags); + Mat decoded = Imgcodecs.imdecode(encoded, flags); encoded.release(); return decoded; diff --git a/modules/java/test/src/org/opencv/test/OpenCVTestCase.java b/modules/java/test/src/org/opencv/test/OpenCVTestCase.java index 496f96242b..864c17ef14 100644 --- a/modules/java/test/src/org/opencv/test/OpenCVTestCase.java +++ b/modules/java/test/src/org/opencv/test/OpenCVTestCase.java @@ -25,7 +25,7 @@ import org.opencv.core.Scalar; import org.opencv.core.Size; import org.opencv.core.DMatch; import org.opencv.core.KeyPoint; -import org.opencv.highgui.Highgui; +import org.opencv.imgcodecs.Imgcodecs; public class OpenCVTestCase extends TestCase { //change to 'true' to unblock fail on fail("Not yet implemented") @@ -164,8 +164,8 @@ public class OpenCVTestCase extends TestCase { rgba0 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(0)); rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128)); - rgbLena = Highgui.imread(OpenCVTestRunner.LENA_PATH); - grayChess = Highgui.imread(OpenCVTestRunner.CHESS_PATH, 0); + rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH); + grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0); v1 = new Mat(1, 3, CvType.CV_32F); v1.put(0, 0, 1.0, 3.0, 2.0); diff --git a/modules/matlab/CMakeLists.txt b/modules/matlab/CMakeLists.txt index a4c1c3b164..06b81696b9 100644 --- a/modules/matlab/CMakeLists.txt +++ b/modules/matlab/CMakeLists.txt @@ -85,7 +85,8 @@ endif() set(the_description "The Matlab/Octave bindings") ocv_add_module(matlab BINDINGS OPTIONAL opencv_core - opencv_imgproc opencv_ml opencv_highgui + opencv_imgproc opencv_ml + opencv_imgcodecs opencv_videoio opencv_highgui opencv_objdetect opencv_flann opencv_features2d opencv_photo opencv_video opencv_videostab opencv_calib opencv_calib3d diff --git a/modules/nonfree/perf/perf_precomp.hpp b/modules/nonfree/perf/perf_precomp.hpp index 45478eb8a3..ed84be8e37 100644 --- a/modules/nonfree/perf/perf_precomp.hpp +++ b/modules/nonfree/perf/perf_precomp.hpp @@ -13,7 +13,7 @@ #include "opencv2/ts.hpp" #include "opencv2/nonfree.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/opencv_modules.hpp" diff --git a/modules/objdetect/test/test_precomp.hpp b/modules/objdetect/test/test_precomp.hpp index cd0fbe4987..a68dd79911 100644 --- a/modules/objdetect/test/test_precomp.hpp +++ b/modules/objdetect/test/test_precomp.hpp @@ -12,6 +12,6 @@ #include "opencv2/ts.hpp" #include "opencv2/objdetect.hpp" #include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #endif diff --git a/modules/optim/test/test_denoise_tvl1.cpp b/modules/optim/test/test_denoise_tvl1.cpp index 76ec2cda3e..f757a1438d 100644 --- a/modules/optim/test/test_denoise_tvl1.cpp +++ b/modules/optim/test/test_denoise_tvl1.cpp @@ -39,7 +39,6 @@ // //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" void make_noisy(const cv::Mat& img, cv::Mat& noisy, double sigma, double pepper_salt_ratio,cv::RNG& rng) { diff --git a/modules/optim/test/test_precomp.hpp b/modules/optim/test/test_precomp.hpp index 9a86cab4b5..4f633e517a 100644 --- a/modules/optim/test/test_precomp.hpp +++ b/modules/optim/test/test_precomp.hpp @@ -11,5 +11,6 @@ #include "opencv2/ts.hpp" #include "opencv2/optim.hpp" +#include "opencv2/imgcodecs.hpp" #endif diff --git a/modules/photo/perf/perf_precomp.hpp b/modules/photo/perf/perf_precomp.hpp index 1fd0c81093..8e0acf7a24 100644 --- a/modules/photo/perf/perf_precomp.hpp +++ b/modules/photo/perf/perf_precomp.hpp @@ -11,7 +11,7 @@ #include "opencv2/ts.hpp" #include "opencv2/photo.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined diff --git a/modules/photo/test/test_precomp.hpp b/modules/photo/test/test_precomp.hpp index 336e5f8424..0b18859525 100644 --- a/modules/photo/test/test_precomp.hpp +++ b/modules/photo/test/test_precomp.hpp @@ -12,7 +12,7 @@ #include #include "opencv2/ts.hpp" #include "opencv2/photo.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include #include #include diff --git a/modules/python/CMakeLists.txt b/modules/python/CMakeLists.txt index a50e372cc9..af062cebc7 100644 --- a/modules/python/CMakeLists.txt +++ b/modules/python/CMakeLists.txt @@ -11,7 +11,7 @@ if(ANDROID OR IOS OR NOT PYTHONLIBS_FOUND OR NOT PYTHON_NUMPY_INCLUDE_DIRS) endif() set(the_description "The python bindings") -ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_photo opencv_objdetect OPTIONAL opencv_nonfree) +ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_imgcodecs opencv_videoio opencv_highgui opencv_calib3d opencv_photo opencv_objdetect OPTIONAL opencv_nonfree) ocv_module_include_directories( "${PYTHON_INCLUDE_PATH}" @@ -31,6 +31,8 @@ set(opencv_hdrs "${OPENCV_MODULE_opencv_video_LOCATION}/include/opencv2/video/background_segm.hpp" "${OPENCV_MODULE_opencv_video_LOCATION}/include/opencv2/video/tracking.hpp" "${OPENCV_MODULE_opencv_photo_LOCATION}/include/opencv2/photo.hpp" + "${OPENCV_MODULE_opencv_imgcodecs_LOCATION}/include/opencv2/imgcodecs.hpp" + "${OPENCV_MODULE_opencv_videoio_LOCATION}/include/opencv2/videoio.hpp" "${OPENCV_MODULE_opencv_highgui_LOCATION}/include/opencv2/highgui.hpp" "${OPENCV_MODULE_opencv_ml_LOCATION}/include/opencv2/ml.hpp" "${OPENCV_MODULE_opencv_features2d_LOCATION}/include/opencv2/features2d.hpp" diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index e2bab9e7d3..92f1b7347c 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -15,6 +15,8 @@ opencv_hdr_list = [ "../../video/include/opencv2/video/tracking.hpp", "../../video/include/opencv2/video/background_segm.hpp", "../../objdetect/include/opencv2/objdetect.hpp", +"../../imgcodecs/include/opencv2/imgcodecs.hpp", +"../../videoio/include/opencv2/videoio.hpp", "../../highgui/include/opencv2/highgui.hpp" ] diff --git a/modules/shape/test/test_precomp.hpp b/modules/shape/test/test_precomp.hpp index e73248422b..819d711e85 100644 --- a/modules/shape/test/test_precomp.hpp +++ b/modules/shape/test/test_precomp.hpp @@ -13,7 +13,7 @@ #include "opencv2/ts.hpp" #include "opencv2/core.hpp" #include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/shape.hpp" #include "opencv2/opencv_modules.hpp" diff --git a/modules/stitching/perf/perf_stich.cpp b/modules/stitching/perf/perf_stich.cpp index 1a37472e98..b64fa18984 100644 --- a/modules/stitching/perf/perf_stich.cpp +++ b/modules/stitching/perf/perf_stich.cpp @@ -1,5 +1,5 @@ #include "perf_precomp.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/flann.hpp" #include "opencv2/opencv_modules.hpp" diff --git a/modules/stitching/src/stitcher.cpp b/modules/stitching/src/stitcher.cpp index ddfdb50843..0d4623af8f 100644 --- a/modules/stitching/src/stitcher.cpp +++ b/modules/stitching/src/stitcher.cpp @@ -522,7 +522,7 @@ Stitcher::Status Stitcher::estimateCameraParams() { std::vector rmats; for (size_t i = 0; i < cameras_.size(); ++i) - rmats.push_back(cameras_[i].R); + rmats.push_back(cameras_[i].R.clone()); detail::waveCorrect(rmats, wave_correct_kind_); for (size_t i = 0; i < cameras_.size(); ++i) cameras_[i].R = rmats[i]; diff --git a/modules/stitching/test/test_precomp.hpp b/modules/stitching/test/test_precomp.hpp index 60d6b3f057..0a3a709d18 100644 --- a/modules/stitching/test/test_precomp.hpp +++ b/modules/stitching/test/test_precomp.hpp @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/modules/superres/CMakeLists.txt b/modules/superres/CMakeLists.txt index c360303f6b..f85cf20736 100644 --- a/modules/superres/CMakeLists.txt +++ b/modules/superres/CMakeLists.txt @@ -5,4 +5,4 @@ endif() set(the_description "Super Resolution") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 -Wundef -Wshadow) ocv_define_module(superres opencv_imgproc opencv_video - OPTIONAL opencv_highgui opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudaoptflow opencv_cudacodec) + OPTIONAL opencv_videoio opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudaoptflow opencv_cudacodec) diff --git a/modules/superres/src/frame_source.cpp b/modules/superres/src/frame_source.cpp index c572c09a92..0f81efd5e1 100644 --- a/modules/superres/src/frame_source.cpp +++ b/modules/superres/src/frame_source.cpp @@ -80,7 +80,7 @@ Ptr cv::superres::createFrameSource_Empty() ////////////////////////////////////////////////////// // VideoFrameSource & CameraFrameSource -#ifndef HAVE_OPENCV_HIGHGUI +#ifndef HAVE_OPENCV_VIDEOIO Ptr cv::superres::createFrameSource_Video(const String& fileName) { @@ -96,7 +96,7 @@ Ptr cv::superres::createFrameSource_Camera(int deviceId) return Ptr(); } -#else // HAVE_OPENCV_HIGHGUI +#else // HAVE_OPENCV_VIDEOIO namespace { @@ -187,7 +187,7 @@ Ptr cv::superres::createFrameSource_Camera(int deviceId) return makePtr(deviceId); } -#endif // HAVE_OPENCV_HIGHGUI +#endif // HAVE_OPENCV_VIDEOIO ////////////////////////////////////////////////////// // VideoFrameSource_CUDA diff --git a/modules/superres/src/precomp.hpp b/modules/superres/src/precomp.hpp index c3aeb665d4..9f12c248d6 100644 --- a/modules/superres/src/precomp.hpp +++ b/modules/superres/src/precomp.hpp @@ -82,8 +82,8 @@ # include "opencv2/cudacodec.hpp" #endif -#ifdef HAVE_OPENCV_HIGHGUI - #include "opencv2/highgui.hpp" +#ifdef HAVE_OPENCV_VIDEOIO + #include "opencv2/videoio.hpp" #endif #include "opencv2/superres.hpp" diff --git a/modules/ts/CMakeLists.txt b/modules/ts/CMakeLists.txt index 3e1b5a05ac..c923a29d8f 100644 --- a/modules/ts/CMakeLists.txt +++ b/modules/ts/CMakeLists.txt @@ -9,7 +9,7 @@ set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef) -ocv_add_module(ts opencv_core opencv_imgproc opencv_highgui) +ocv_add_module(ts opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui) ocv_glob_module_sources() ocv_module_include_directories() diff --git a/modules/ts/include/opencv2/ts/cuda_perf.hpp b/modules/ts/include/opencv2/ts/cuda_perf.hpp index 8a23857f99..c179b72499 100644 --- a/modules/ts/include/opencv2/ts/cuda_perf.hpp +++ b/modules/ts/include/opencv2/ts/cuda_perf.hpp @@ -44,7 +44,8 @@ #define __OPENCV_CUDA_PERF_UTILITY_HPP__ #include "opencv2/core.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/ts/ts_perf.hpp" #include "cvconfig.h" diff --git a/modules/ts/include/opencv2/ts/cuda_test.hpp b/modules/ts/include/opencv2/ts/cuda_test.hpp index c433dfec7a..049021b544 100644 --- a/modules/ts/include/opencv2/ts/cuda_test.hpp +++ b/modules/ts/include/opencv2/ts/cuda_test.hpp @@ -47,6 +47,7 @@ #include "cvconfig.h" #include "opencv2/core.hpp" #include "opencv2/core/cuda.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/ts.hpp" diff --git a/modules/ts/include/opencv2/ts/ocl_test.hpp b/modules/ts/include/opencv2/ts/ocl_test.hpp index 43c01b2dab..3703b7b9f7 100644 --- a/modules/ts/include/opencv2/ts/ocl_test.hpp +++ b/modules/ts/include/opencv2/ts/ocl_test.hpp @@ -46,6 +46,8 @@ #include "opencv2/ts.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/imgproc/types_c.h" diff --git a/modules/video/perf/perf_precomp.hpp b/modules/video/perf/perf_precomp.hpp index 0ea88f0da5..7d1e9d922e 100644 --- a/modules/video/perf/perf_precomp.hpp +++ b/modules/video/perf/perf_precomp.hpp @@ -12,7 +12,7 @@ #include "opencv2/ts.hpp" #include #include -#include +#include #ifdef GTEST_CREATE_SHARED_LIBRARY #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined diff --git a/modules/video/test/test_precomp.hpp b/modules/video/test/test_precomp.hpp index 0aae1031f8..96a96f88fb 100644 --- a/modules/video/test/test_precomp.hpp +++ b/modules/video/test/test_precomp.hpp @@ -13,6 +13,6 @@ #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/video.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #endif diff --git a/modules/videoio/CMakeLists.txt b/modules/videoio/CMakeLists.txt new file mode 100644 index 0000000000..bba3d33396 --- /dev/null +++ b/modules/videoio/CMakeLists.txt @@ -0,0 +1,240 @@ +set(the_description "Media I/O") +ocv_add_module(videoio opencv_imgproc opencv_imgcodecs OPTIONAL opencv_androidcamera) + +# ---------------------------------------------------------------------------- +# CMake file for videoio. See root CMakeLists.txt +# Some parts taken from version of Hartmut Seichter, HIT Lab NZ. +# Jose Luis Blanco, 2008 +# ---------------------------------------------------------------------------- + +if(HAVE_WINRT_CX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW") +endif() + +if(APPLE) + ocv_include_directories(${ZLIB_INCLUDE_DIRS}) + list(APPEND VIDEOIO_LIBRARIES ${ZLIB_LIBRARIES}) +endif() + +set(videoio_hdrs + src/precomp.hpp + src/cap_ffmpeg_impl.hpp + ) + +set(videoio_srcs + src/cap.cpp + src/cap_images.cpp + src/cap_ffmpeg.cpp + ) + +file(GLOB videoio_ext_hdrs "include/opencv2/*.hpp" "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h") + +if(WIN32 AND NOT ARM) + list(APPEND videoio_srcs src/cap_cmu.cpp) +endif() + +if (WIN32 AND HAVE_DSHOW) + list(APPEND videoio_srcs src/cap_dshow.cpp) + list(APPEND videoio_hdrs src/cap_dshow.hpp) +endif() + +if (WIN32 AND HAVE_MSMF) + list(APPEND videoio_srcs src/cap_msmf.cpp) +endif() + +if (WIN32 AND HAVE_VFW) + list(APPEND videoio_srcs src/cap_vfw.cpp) +endif() + +if(HAVE_XINE) + list(APPEND videoio_srcs src/cap_xine.cpp) +endif(HAVE_XINE) + +if(HAVE_DC1394_2) + list(APPEND videoio_srcs src/cap_dc1394_v2.cpp) +endif(HAVE_DC1394_2) + +if(HAVE_DC1394) + list(APPEND videoio_srcs src/cap_dc1394.cpp) +endif(HAVE_DC1394) + +if(HAVE_GSTREAMER) + list(APPEND videoio_srcs src/cap_gstreamer.cpp) +endif(HAVE_GSTREAMER) + +if(HAVE_UNICAP) + list(APPEND videoio_srcs src/cap_unicap.cpp) +endif(HAVE_UNICAP) + +if(HAVE_LIBV4L) + list(APPEND videoio_srcs src/cap_libv4l.cpp) +elseif(HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO) + list(APPEND videoio_srcs src/cap_v4l.cpp) +endif() + +if(HAVE_OPENNI) + list(APPEND videoio_srcs src/cap_openni.cpp) + ocv_include_directories(${OPENNI_INCLUDE_DIR}) + list(APPEND VIDEOIO_LIBRARIES ${OPENNI_LIBRARY}) +endif(HAVE_OPENNI) + +if(HAVE_OPENNI2) + list(APPEND videoio_srcs src/cap_openni2.cpp) + ocv_include_directories(${OPENNI2_INCLUDE_DIR}) + list(APPEND VIDEOIO_LIBRARIES ${OPENNI2_LIBRARY}) +endif(HAVE_OPENNI2) + +if(HAVE_opencv_androidcamera) + list(APPEND videoio_srcs src/cap_android.cpp) + add_definitions(-DHAVE_ANDROID_NATIVE_CAMERA)#TODO: remove this line +endif(HAVE_opencv_androidcamera) + +if(HAVE_XIMEA) + list(APPEND videoio_srcs src/cap_ximea.cpp) + ocv_include_directories(${XIMEA_PATH}) + if(XIMEA_LIBRARY_DIR) + link_directories("${XIMEA_LIBRARY_DIR}") + endif() + if(X86_64) + list(APPEND VIDEOIO_LIBRARIES m3apiX64) + else() + list(APPEND VIDEOIO_LIBRARIES m3api) + endif() +endif(HAVE_XIMEA) + +if(HAVE_FFMPEG) + if(UNIX AND BZIP2_LIBRARIES) + list(APPEND VIDEOIO_LIBRARIES ${BZIP2_LIBRARIES}) + endif() + if(APPLE) + list(APPEND VIDEOIO_LIBRARIES "-framework VideoDecodeAcceleration" bz2) + endif() +endif(HAVE_FFMPEG) + +if(HAVE_PVAPI) + add_definitions(-DHAVE_PVAPI) + add_definitions(${PVAPI_DEFINITIONS}) + ocv_include_directories(${PVAPI_INCLUDE_PATH}) + set(videoio_srcs src/cap_pvapi.cpp ${videoio_srcs}) + list(APPEND VIDEOIO_LIBRARIES ${PVAPI_LIBRARY}) +endif() + +if(HAVE_GIGE_API) + add_definitions(-DHAVE_GIGE_API) + ocv_include_directories(${GIGEAPI_INCLUDE_PATH}) + set(videoio_srcs src/cap_giganetix.cpp ${videoio_srcs}) + list(APPEND VIDEOIO_LIBRARIES ${GIGEAPI_LIBRARIES}) + list(APPEND videoio_srcs src/cap_giganetix.cpp) +endif(HAVE_GIGE_API) + +if(HAVE_AVFOUNDATION) + list(APPEND videoio_srcs src/cap_avfoundation.mm) + list(APPEND VIDEOIO_LIBRARIES "-framework AVFoundation" "-framework QuartzCore") +endif() + +if(HAVE_QUICKTIME) + list(APPEND videoio_srcs src/cap_qt.cpp) + list(APPEND VIDEOIO_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore") +elseif(HAVE_QTKIT) + list(APPEND videoio_srcs src/cap_qtkit.mm) + list(APPEND VIDEOIO_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit") +endif() + +if(HAVE_INTELPERC) + list(APPEND videoio_srcs src/cap_intelperc.cpp) + ocv_include_directories(${INTELPERC_INCLUDE_DIR}) + list(APPEND VIDEOIO_LIBRARIES ${INTELPERC_LIBRARIES}) +endif(HAVE_INTELPERC) + +if(IOS) + add_definitions(-DHAVE_IOS=1) + list(APPEND videoio_srcs src/ios_conversions.mm src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm) + list(APPEND VIDEOIO_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework AssetsLibrary") +endif() + +if(WIN32) + link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only + include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW + include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests +endif() + +if(UNIX) + #these variables are set by CHECK_MODULE macro + foreach(P ${VIDEOIO_INCLUDE_DIRS}) + ocv_include_directories(${P}) + endforeach() + + foreach(P ${VIDEOIO_LIBRARY_DIRS}) + link_directories(${P}) + endforeach() +endif() + +source_group("Src" FILES ${videoio_srcs} ${videoio_hdrs}) +source_group("Include" FILES ${videoio_ext_hdrs}) +ocv_set_module_sources(HEADERS ${videoio_ext_hdrs} SOURCES ${videoio_srcs} ${videoio_hdrs}) +ocv_module_include_directories() + +ocv_create_module(${VIDEOIO_LIBRARIES}) + +if(APPLE) + ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS) + if(HAVE_OBJC_EXCEPTIONS) + foreach(source ${OPENCV_MODULE_${the_module}_SOURCES}) + if("${source}" MATCHES "\\.mm$") + get_source_file_property(flags "${source}" COMPILE_FLAGS) + if(flags) + set(flags "${_flags} -fobjc-exceptions") + else() + set(flags "-fobjc-exceptions") + endif() + + set_source_files_properties("${source}" PROPERTIES COMPILE_FLAGS "${flags}") + endif() + endforeach() + endif() +endif() + +if(BUILD_SHARED_LIBS) + add_definitions(-DVIDEOIO_EXPORTS) +endif() + +if(MSVC) + set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /NODEFAULTLIB:libcmt.lib /DEBUG") +endif() + +#stop automatic dependencies propagation for this module +set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "") + +ocv_add_precompiled_headers(${the_module}) +ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations) + +if(WIN32 AND WITH_FFMPEG) + #copy ffmpeg dll to the output folder + if(MSVC64 OR MINGW64) + set(FFMPEG_SUFFIX _64) + endif() + + set(ffmpeg_bare_name "opencv_ffmpeg${FFMPEG_SUFFIX}.dll") + set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll") + set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}") + + if(MSVC_IDE) + add_custom_command(TARGET ${the_module} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}" + COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name_ver}" + COMMENT "Copying ${ffmpeg_path} to the output directory") + elseif(MSVC AND (CMAKE_GENERATOR MATCHES "Visual")) + add_custom_command(TARGET ${the_module} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name_ver}" + COMMENT "Copying ${ffmpeg_path} to the output directory") + else() + add_custom_command(TARGET ${the_module} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}" + COMMENT "Copying ${ffmpeg_path} to the output directory") + endif() + + install(FILES "${ffmpeg_path}" DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs RENAME "${ffmpeg_bare_name_ver}") +endif() + +ocv_add_accuracy_tests() +ocv_add_perf_tests() diff --git a/modules/highgui/doc/reading_and_writing_images_and_video.rst b/modules/videoio/doc/reading_and_writing_video.rst similarity index 63% rename from modules/highgui/doc/reading_and_writing_images_and_video.rst rename to modules/videoio/doc/reading_and_writing_video.rst index c094ef7e23..4c7355129a 100644 --- a/modules/highgui/doc/reading_and_writing_images_and_video.rst +++ b/modules/videoio/doc/reading_and_writing_video.rst @@ -1,191 +1,8 @@ -Reading and Writing Images and Video -==================================== +Reading and Writing Video +========================= .. highlight:: cpp -imdecode --------- -Reads an image from a buffer in memory. - -.. ocv:function:: Mat imdecode( InputArray buf, int flags ) - -.. ocv:function:: Mat imdecode( InputArray buf, int flags, Mat* dst ) - -.. ocv:cfunction:: IplImage* cvDecodeImage( const CvMat* buf, int iscolor=CV_LOAD_IMAGE_COLOR) - -.. ocv:cfunction:: CvMat* cvDecodeImageM( const CvMat* buf, int iscolor=CV_LOAD_IMAGE_COLOR) - -.. ocv:pyfunction:: cv2.imdecode(buf, flags) -> retval - - :param buf: Input array or vector of bytes. - - :param flags: The same flags as in :ocv:func:`imread` . - - :param dst: The optional output placeholder for the decoded matrix. It can save the image reallocations when the function is called repeatedly for images of the same size. - -The function reads an image from the specified buffer in the memory. -If the buffer is too short or contains invalid data, the empty matrix/image is returned. - -See -:ocv:func:`imread` for the list of supported formats and flags description. - -.. note:: In the case of color images, the decoded images will have the channels stored in ``B G R`` order. - -imencode --------- -Encodes an image into a memory buffer. - -.. ocv:function:: bool imencode( const String& ext, InputArray img, vector& buf, const vector& params=vector()) - -.. ocv:cfunction:: CvMat* cvEncodeImage( const char* ext, const CvArr* image, const int* params=0 ) - -.. ocv:pyfunction:: cv2.imencode(ext, img[, params]) -> retval, buf - - :param ext: File extension that defines the output format. - - :param img: Image to be written. - - :param buf: Output buffer resized to fit the compressed image. - - :param params: Format-specific parameters. See :ocv:func:`imwrite` . - -The function compresses the image and stores it in the memory buffer that is resized to fit the result. -See -:ocv:func:`imwrite` for the list of supported formats and flags description. - -.. note:: ``cvEncodeImage`` returns single-row matrix of type ``CV_8UC1`` that contains encoded image as array of bytes. - -imread ------- -Loads an image from a file. - -.. ocv:function:: Mat imread( const String& filename, int flags=IMREAD_COLOR ) - -.. ocv:pyfunction:: cv2.imread(filename[, flags]) -> retval - -.. ocv:cfunction:: IplImage* cvLoadImage( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR ) - -.. ocv:cfunction:: CvMat* cvLoadImageM( const char* filename, int iscolor=CV_LOAD_IMAGE_COLOR ) - - :param filename: Name of file to be loaded. - - :param flags: Flags specifying the color type of a loaded image: - - * CV_LOAD_IMAGE_ANYDEPTH - If set, return 16-bit/32-bit image when the input has the corresponding depth, otherwise convert it to 8-bit. - - * CV_LOAD_IMAGE_COLOR - If set, always convert image to the color one - - * CV_LOAD_IMAGE_GRAYSCALE - If set, always convert image to the grayscale one - - * **>0** Return a 3-channel color image. - .. note:: In the current implementation the alpha channel, if any, is stripped from the output image. Use negative value if you need the alpha channel. - - * **=0** Return a grayscale image. - - * **<0** Return the loaded image as is (with alpha channel). - -The function ``imread`` loads an image from the specified file and returns it. If the image cannot be read (because of missing file, improper permissions, unsupported or invalid format), the function returns an empty matrix ( ``Mat::data==NULL`` ). Currently, the following file formats are supported: - - * Windows bitmaps - ``*.bmp, *.dib`` (always supported) - - * JPEG files - ``*.jpeg, *.jpg, *.jpe`` (see the *Notes* section) - - * JPEG 2000 files - ``*.jp2`` (see the *Notes* section) - - * Portable Network Graphics - ``*.png`` (see the *Notes* section) - - * WebP - ``*.webp`` (see the *Notes* section) - - * Portable image format - ``*.pbm, *.pgm, *.ppm`` (always supported) - - * Sun rasters - ``*.sr, *.ras`` (always supported) - - * TIFF files - ``*.tiff, *.tif`` (see the *Notes* section) - -.. note:: - - * The function determines the type of an image by the content, not by the file extension. - - * On Microsoft Windows* OS and MacOSX*, the codecs shipped with an OpenCV image (libjpeg, libpng, libtiff, and libjasper) are used by default. So, OpenCV can always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also an option to use native MacOSX image readers. But beware that currently these native image loaders give images with different pixel values because of the color management embedded into MacOSX. - - * On Linux*, BSD flavors and other Unix-like open-source operating systems, OpenCV looks for codecs supplied with an OS image. Install the relevant packages (do not forget the development files, for example, "libjpeg-dev", in Debian* and Ubuntu*) to get the codec support or turn on the ``OPENCV_BUILD_3RDPARTY_LIBS`` flag in CMake. - -.. note:: In the case of color images, the decoded images will have the channels stored in ``B G R`` order. - -imwrite ------------ -Saves an image to a specified file. - -.. ocv:function:: bool imwrite( const String& filename, InputArray img, const vector& params=vector() ) - -.. ocv:pyfunction:: cv2.imwrite(filename, img[, params]) -> retval - -.. ocv:cfunction:: int cvSaveImage( const char* filename, const CvArr* image, const int* params=0 ) - - :param filename: Name of the file. - - :param image: Image to be saved. - - :param params: Format-specific save parameters encoded as pairs ``paramId_1, paramValue_1, paramId_2, paramValue_2, ...`` . The following parameters are currently supported: - - * For JPEG, it can be a quality ( ``CV_IMWRITE_JPEG_QUALITY`` ) from 0 to 100 (the higher is the better). Default value is 95. - - * For WEBP, it can be a quality ( CV_IMWRITE_WEBP_QUALITY ) from 1 to 100 (the higher is the better). - By default (without any parameter) and for quality above 100 the lossless compression is used. - - * For PNG, it can be the compression level ( ``CV_IMWRITE_PNG_COMPRESSION`` ) from 0 to 9. A higher value means a smaller size and longer compression time. Default value is 3. - - * For PPM, PGM, or PBM, it can be a binary format flag ( ``CV_IMWRITE_PXM_BINARY`` ), 0 or 1. Default value is 1. - -The function ``imwrite`` saves the image to the specified file. The image format is chosen based on the ``filename`` extension (see -:ocv:func:`imread` for the list of extensions). Only 8-bit (or 16-bit unsigned (``CV_16U``) in case of PNG, JPEG 2000, and TIFF) single-channel or 3-channel (with 'BGR' channel order) images can be saved using this function. If the format, depth or channel order is different, use -:ocv:func:`Mat::convertTo` , and -:ocv:func:`cvtColor` to convert it before saving. Or, use the universal :ocv:class:`FileStorage` I/O functions to save the image to XML or YAML format. - -It is possible to store PNG images with an alpha channel using this function. To do this, create 8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535. The sample below shows how to create such a BGRA image and store to PNG file. It also demonstrates how to set custom compression parameters :: - - #include - #include - #include - - using namespace cv; - using namespace std; - - void createAlphaMat(Mat &mat) - { - for (int i = 0; i < mat.rows; ++i) { - for (int j = 0; j < mat.cols; ++j) { - Vec4b& rgba = mat.at(i, j); - rgba[0] = UCHAR_MAX; - rgba[1] = saturate_cast((float (mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX); - rgba[2] = saturate_cast((float (mat.rows - i)) / ((float)mat.rows) * UCHAR_MAX); - rgba[3] = saturate_cast(0.5 * (rgba[1] + rgba[2])); - } - } - } - - int main(int argv, char **argc) - { - // Create mat with alpha channel - Mat mat(480, 640, CV_8UC4); - createAlphaMat(mat); - - vector compression_params; - compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); - compression_params.push_back(9); - - try { - imwrite("alpha.png", mat, compression_params); - } - catch (runtime_error& ex) { - fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what()); - return 1; - } - - fprintf(stdout, "Saved PNG file with alpha data.\n"); - return 0; - } - VideoCapture ------------ diff --git a/modules/videoio/doc/videoio.rst b/modules/videoio/doc/videoio.rst new file mode 100644 index 0000000000..fee114bd05 --- /dev/null +++ b/modules/videoio/doc/videoio.rst @@ -0,0 +1,12 @@ +******************* +videoio. Media I/O +******************* + +videoio provides easy interface to: + +* Read video from camera or file and write video to a file. + +.. toctree:: + :maxdepth: 2 + + reading_and_writing_video diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp new file mode 100644 index 0000000000..763c5bd435 --- /dev/null +++ b/modules/videoio/include/opencv2/videoio.hpp @@ -0,0 +1,392 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_VIDEOIO_HPP__ +#define __OPENCV_VIDEOIO_HPP__ + +#include "opencv2/core.hpp" + + +////////////////////////////////// video io ///////////////////////////////// + +typedef struct CvCapture CvCapture; +typedef struct CvVideoWriter CvVideoWriter; + +namespace cv +{ + +// Camera API +enum { CAP_ANY = 0, // autodetect + CAP_VFW = 200, // platform native + CAP_V4L = 200, + CAP_V4L2 = CAP_V4L, + CAP_FIREWARE = 300, // IEEE 1394 drivers + CAP_FIREWIRE = CAP_FIREWARE, + CAP_IEEE1394 = CAP_FIREWARE, + CAP_DC1394 = CAP_FIREWARE, + CAP_CMU1394 = CAP_FIREWARE, + CAP_QT = 500, // QuickTime + CAP_UNICAP = 600, // Unicap drivers + CAP_DSHOW = 700, // DirectShow (via videoInput) + CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK + CAP_OPENNI = 900, // OpenNI (for Kinect) + CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion) + CAP_ANDROID = 1000, // Android + CAP_XIAPI = 1100, // XIMEA Camera API + CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) + CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK + CAP_MSMF = 1400, // Microsoft Media Foundation (via videoInput) + CAP_INTELPERC = 1500, // Intel Perceptual Computing SDK + CAP_OPENNI2 = 1600 // OpenNI2 (for Kinect) + }; + +// generic properties (based on DC1394 properties) +enum { CAP_PROP_POS_MSEC =0, + CAP_PROP_POS_FRAMES =1, + CAP_PROP_POS_AVI_RATIO =2, + CAP_PROP_FRAME_WIDTH =3, + CAP_PROP_FRAME_HEIGHT =4, + CAP_PROP_FPS =5, + CAP_PROP_FOURCC =6, + CAP_PROP_FRAME_COUNT =7, + CAP_PROP_FORMAT =8, + CAP_PROP_MODE =9, + CAP_PROP_BRIGHTNESS =10, + CAP_PROP_CONTRAST =11, + CAP_PROP_SATURATION =12, + CAP_PROP_HUE =13, + CAP_PROP_GAIN =14, + CAP_PROP_EXPOSURE =15, + CAP_PROP_CONVERT_RGB =16, + CAP_PROP_WHITE_BALANCE_BLUE_U =17, + CAP_PROP_RECTIFICATION =18, + CAP_PROP_MONOCROME =19, + CAP_PROP_SHARPNESS =20, + CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature + CAP_PROP_GAMMA =22, + CAP_PROP_TEMPERATURE =23, + CAP_PROP_TRIGGER =24, + CAP_PROP_TRIGGER_DELAY =25, + CAP_PROP_WHITE_BALANCE_RED_V =26, + CAP_PROP_ZOOM =27, + CAP_PROP_FOCUS =28, + CAP_PROP_GUID =29, + CAP_PROP_ISO_SPEED =30, + CAP_PROP_BACKLIGHT =32, + CAP_PROP_PAN =33, + CAP_PROP_TILT =34, + CAP_PROP_ROLL =35, + CAP_PROP_IRIS =36, + CAP_PROP_SETTINGS =37 + }; + + +// DC1394 only +// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) +// every feature can have only one mode turned on at a time +enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) + CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user + CAP_PROP_DC1394_MODE_AUTO = -2, + CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CAP_PROP_DC1394_MAX = 31 + }; + + +// OpenNI map generators +enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR + }; + +// Properties of cameras available through OpenNI interfaces +enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm + CAP_PROP_OPENNI_BASELINE = 102, // in mm + CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels + CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map + // by changing depth generator's view point (if the flag is "on") or + // sets this view point to its normal one (if the flag is "off"). + CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION, + CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, + CAP_PROP_OPENNI2_SYNC = 110, + CAP_PROP_OPENNI2_MIRROR = 111 + }; + +// OpenNI shortcats +enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT, + CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE, + CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE, + CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH, + CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION, + CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION + }; + +// OpenNI data given from depth generator +enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) + CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) + CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) + CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) + CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 + + // Data given from RGB image generator + CAP_OPENNI_BGR_IMAGE = 5, + CAP_OPENNI_GRAY_IMAGE = 6 + }; + +// Supported output modes of OpenNI image generator +enum { CAP_OPENNI_VGA_30HZ = 0, + CAP_OPENNI_SXGA_15HZ = 1, + CAP_OPENNI_SXGA_30HZ = 2, + CAP_OPENNI_QVGA_30HZ = 3, + CAP_OPENNI_QVGA_60HZ = 4 + }; + + +// GStreamer +enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1 + }; + + +// PVAPI +enum { CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast + CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301 // FrameStartTriggerMode: Determines how a frame is initiated + }; + +// PVAPI: FrameStartTriggerMode +enum { CAP_PVAPI_FSTRIGMODE_FREERUN = 0, // Freerun + CAP_PVAPI_FSTRIGMODE_SYNCIN1 = 1, // SyncIn1 + CAP_PVAPI_FSTRIGMODE_SYNCIN2 = 2, // SyncIn2 + CAP_PVAPI_FSTRIGMODE_FIXEDRATE = 3, // FixedRate + CAP_PVAPI_FSTRIGMODE_SOFTWARE = 4 // Software + }; + +// Properties of cameras available through XIMEA SDK interface +enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. + CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. + CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). + CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). + CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. + CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input + CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode + CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level + CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output + CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode + CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED + CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality + CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) + CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance + CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain + CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). + CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure + CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure + CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) + CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds + }; + + +// Properties for Android cameras +enum { CAP_PROP_ANDROID_AUTOGRAB = 1024, + CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed + CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed + CAP_PROP_ANDROID_FLASH_MODE = 8001, + CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CAP_PROP_ANDROID_ANTIBANDING = 8004, + CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008 + }; + + +// Android camera output formats +enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR + CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR, + CAP_ANDROID_GREY_FRAME = 1, //Y + CAP_ANDROID_COLOR_FRAME_RGB = 2, + CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CAP_ANDROID_COLOR_FRAME_RGBA = 4 + }; + + +// Android camera flash modes +enum { CAP_ANDROID_FLASH_MODE_AUTO = 0, + CAP_ANDROID_FLASH_MODE_OFF = 1, + CAP_ANDROID_FLASH_MODE_ON = 2, + CAP_ANDROID_FLASH_MODE_RED_EYE = 3, + CAP_ANDROID_FLASH_MODE_TORCH = 4 + }; + + +// Android camera focus modes +enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1, + CAP_ANDROID_FOCUS_MODE_EDOF = 2, + CAP_ANDROID_FOCUS_MODE_FIXED = 3, + CAP_ANDROID_FOCUS_MODE_INFINITY = 4, + CAP_ANDROID_FOCUS_MODE_MACRO = 5 + }; + + +// Android camera white balance modes +enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1, + CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2, + CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3, + CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4, + CAP_ANDROID_WHITE_BALANCE_SHADE = 5, + CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6, + CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7 + }; + + +// Android camera antibanding modes +enum { CAP_ANDROID_ANTIBANDING_50HZ = 0, + CAP_ANDROID_ANTIBANDING_60HZ = 1, + CAP_ANDROID_ANTIBANDING_AUTO = 2, + CAP_ANDROID_ANTIBANDING_OFF = 3 + }; + + +// Properties of cameras available through AVFOUNDATION interface +enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CAP_PROP_IOS_DEVICE_FLASH = 9003, + CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CAP_PROP_IOS_DEVICE_TORCH = 9005 + }; + + +// Properties of cameras available through Smartek Giganetix Ethernet Vision interface +/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ +enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006 + }; + +enum { CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, + CAP_PROP_INTELPERC_PROFILE_IDX = 11002, + CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, + CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, + CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, + CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, + CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007 + }; + +// Intel PerC streams +enum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, + CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, + CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR + }; + +enum { CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. + CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. + CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. + CAP_INTELPERC_IMAGE = 3 + }; + + +class IVideoCapture; +class CV_EXPORTS_W VideoCapture +{ +public: + CV_WRAP VideoCapture(); + CV_WRAP VideoCapture(const String& filename); + CV_WRAP VideoCapture(int device); + + virtual ~VideoCapture(); + CV_WRAP virtual bool open(const String& filename); + CV_WRAP virtual bool open(int device); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + + CV_WRAP virtual bool grab(); + CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0); + virtual VideoCapture& operator >> (CV_OUT Mat& image); + virtual VideoCapture& operator >> (CV_OUT UMat& image); + CV_WRAP virtual bool read(OutputArray image); + + CV_WRAP virtual bool set(int propId, double value); + CV_WRAP virtual double get(int propId); + +protected: + Ptr cap; + Ptr icap; +private: + static Ptr createCameraCapture(int index); +}; + +class CV_EXPORTS_W VideoWriter +{ +public: + CV_WRAP VideoWriter(); + CV_WRAP VideoWriter(const String& filename, int fourcc, double fps, + Size frameSize, bool isColor = true); + + virtual ~VideoWriter(); + CV_WRAP virtual bool open(const String& filename, int fourcc, double fps, + Size frameSize, bool isColor = true); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + virtual VideoWriter& operator << (const Mat& image); + CV_WRAP virtual void write(const Mat& image); + + CV_WRAP static int fourcc(char c1, char c2, char c3, char c4); + +protected: + Ptr writer; +}; + +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvCapture* obj) const; +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvVideoWriter* obj) const; + +} // cv + +#endif //__OPENCV_VIDEOIO_HPP__ diff --git a/modules/highgui/include/opencv2/highgui/cap_ios.h b/modules/videoio/include/opencv2/videoio/cap_ios.h similarity index 100% rename from modules/highgui/include/opencv2/highgui/cap_ios.h rename to modules/videoio/include/opencv2/videoio/cap_ios.h diff --git a/modules/videoio/include/opencv2/videoio/videoio.hpp b/modules/videoio/include/opencv2/videoio/videoio.hpp new file mode 100644 index 0000000000..ec84cf7a68 --- /dev/null +++ b/modules/videoio/include/opencv2/videoio/videoio.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/videoio.hpp" diff --git a/modules/videoio/include/opencv2/videoio/videoio_c.h b/modules/videoio/include/opencv2/videoio/videoio_c.h new file mode 100644 index 0000000000..0b08d03d9b --- /dev/null +++ b/modules/videoio/include/opencv2/videoio/videoio_c.h @@ -0,0 +1,420 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_VIDEOIO_H__ +#define __OPENCV_VIDEOIO_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + +/****************************************************************************************\ +* Working with Video Files and Cameras * +\****************************************************************************************/ + +/* "black box" capture structure */ +typedef struct CvCapture CvCapture; + +/* start capturing frames from video file */ +CVAPI(CvCapture*) cvCreateFileCapture( const char* filename ); + +enum +{ + CV_CAP_ANY =0, // autodetect + + CV_CAP_MIL =100, // MIL proprietary drivers + + CV_CAP_VFW =200, // platform native + CV_CAP_V4L =200, + CV_CAP_V4L2 =200, + + CV_CAP_FIREWARE =300, // IEEE 1394 drivers + CV_CAP_FIREWIRE =300, + CV_CAP_IEEE1394 =300, + CV_CAP_DC1394 =300, + CV_CAP_CMU1394 =300, + + CV_CAP_STEREO =400, // TYZX proprietary drivers + CV_CAP_TYZX =400, + CV_TYZX_LEFT =400, + CV_TYZX_RIGHT =401, + CV_TYZX_COLOR =402, + CV_TYZX_Z =403, + + CV_CAP_QT =500, // QuickTime + + CV_CAP_UNICAP =600, // Unicap drivers + + CV_CAP_DSHOW =700, // DirectShow (via videoInput) + CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput) + + CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK + + CV_CAP_OPENNI =900, // OpenNI (for Kinect) + CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion) + + CV_CAP_ANDROID =1000, // Android + CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera + CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera + + CV_CAP_XIAPI =1100, // XIMEA Camera API + + CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) + + CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK + + CV_CAP_INTELPERC = 1500, // Intel Perceptual Computing + + CV_CAP_OPENNI2 = 1600 // OpenNI2 (for Kinect) +}; + +/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */ +CVAPI(CvCapture*) cvCreateCameraCapture( int index ); + +/* grab a frame, return 1 on success, 0 on fail. + this function is thought to be fast */ +CVAPI(int) cvGrabFrame( CvCapture* capture ); + +/* get the frame grabbed with cvGrabFrame(..) + This function may apply some frame processing like + frame decompression, flipping etc. + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) ); + +/* Just a combination of cvGrabFrame and cvRetrieveFrame + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvQueryFrame( CvCapture* capture ); + +/* stop capturing/reading and free resources */ +CVAPI(void) cvReleaseCapture( CvCapture** capture ); + +enum +{ + // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) + // every feature can have only one mode turned on at a time + CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) + CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user + CV_CAP_PROP_DC1394_MODE_AUTO = -2, + CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CV_CAP_PROP_POS_MSEC =0, + CV_CAP_PROP_POS_FRAMES =1, + CV_CAP_PROP_POS_AVI_RATIO =2, + CV_CAP_PROP_FRAME_WIDTH =3, + CV_CAP_PROP_FRAME_HEIGHT =4, + CV_CAP_PROP_FPS =5, + CV_CAP_PROP_FOURCC =6, + CV_CAP_PROP_FRAME_COUNT =7, + CV_CAP_PROP_FORMAT =8, + CV_CAP_PROP_MODE =9, + CV_CAP_PROP_BRIGHTNESS =10, + CV_CAP_PROP_CONTRAST =11, + CV_CAP_PROP_SATURATION =12, + CV_CAP_PROP_HUE =13, + CV_CAP_PROP_GAIN =14, + CV_CAP_PROP_EXPOSURE =15, + CV_CAP_PROP_CONVERT_RGB =16, + CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17, + CV_CAP_PROP_RECTIFICATION =18, + CV_CAP_PROP_MONOCROME =19, + CV_CAP_PROP_SHARPNESS =20, + CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera, + // user can adjust refernce level + // using this feature + CV_CAP_PROP_GAMMA =22, + CV_CAP_PROP_TEMPERATURE =23, + CV_CAP_PROP_TRIGGER =24, + CV_CAP_PROP_TRIGGER_DELAY =25, + CV_CAP_PROP_WHITE_BALANCE_RED_V =26, + CV_CAP_PROP_ZOOM =27, + CV_CAP_PROP_FOCUS =28, + CV_CAP_PROP_GUID =29, + CV_CAP_PROP_ISO_SPEED =30, + CV_CAP_PROP_MAX_DC1394 =31, + CV_CAP_PROP_BACKLIGHT =32, + CV_CAP_PROP_PAN =33, + CV_CAP_PROP_TILT =34, + CV_CAP_PROP_ROLL =35, + CV_CAP_PROP_IRIS =36, + CV_CAP_PROP_SETTINGS =37, + + CV_CAP_PROP_AUTOGRAB =1024, // property for videoio class CvCapture_Android only + CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed + CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed + + // OpenNI map generators + CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR, + + // Properties of cameras available through OpenNI interfaces + CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm + CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm + CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels + CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag + CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map + // by changing depth generator's view point (if the flag is "on") or + // sets this view point to its normal one (if the flag is "off"). + CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, + CV_CAP_PROP_OPENNI2_SYNC = 110, + CV_CAP_PROP_OPENNI2_MIRROR = 111, + + CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, + CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE, + CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE, + CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, + + // Properties of cameras available through GStreamer interface + CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1 + + // PVAPI + CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast + CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated + + // Properties of cameras available through XIMEA SDK interface + CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. + CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. + CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. + CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input + CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode + CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level + CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output + CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode + CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED + CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality + CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) + CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance + CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain + CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). + CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure + CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure + CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) + CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds + + // Properties for Android cameras + CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, + CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, + CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, + CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009, + CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010, + + // Properties of cameras available through AVFOUNDATION interface + CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, + CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CV_CAP_PROP_IOS_DEVICE_TORCH = 9005, + + // Properties of cameras available through Smartek Giganetix Ethernet Vision interface + /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ + CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006, + + CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, + CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002, + CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, + CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, + CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007, + + // Intel PerC streams + CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, + CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, + CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR +}; + +enum +{ + // Data given from depth generator. + CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) + CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) + CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) + CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) + CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 + + // Data given from RGB image generator. + CV_CAP_OPENNI_BGR_IMAGE = 5, + CV_CAP_OPENNI_GRAY_IMAGE = 6 +}; + +// Supported output modes of OpenNI image generator +enum +{ + CV_CAP_OPENNI_VGA_30HZ = 0, + CV_CAP_OPENNI_SXGA_15HZ = 1, + CV_CAP_OPENNI_SXGA_30HZ = 2, + CV_CAP_OPENNI_QVGA_30HZ = 3, + CV_CAP_OPENNI_QVGA_60HZ = 4 +}; + +//supported by Android camera output formats +enum +{ + CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR + CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR, + CV_CAP_ANDROID_GREY_FRAME = 1, //Y + CV_CAP_ANDROID_COLOR_FRAME_RGB = 2, + CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4 +}; + +// supported Android camera flash modes +enum +{ + CV_CAP_ANDROID_FLASH_MODE_AUTO = 0, + CV_CAP_ANDROID_FLASH_MODE_OFF, + CV_CAP_ANDROID_FLASH_MODE_ON, + CV_CAP_ANDROID_FLASH_MODE_RED_EYE, + CV_CAP_ANDROID_FLASH_MODE_TORCH +}; + +// supported Android camera focus modes +enum +{ + CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO, + CV_CAP_ANDROID_FOCUS_MODE_EDOF, + CV_CAP_ANDROID_FOCUS_MODE_FIXED, + CV_CAP_ANDROID_FOCUS_MODE_INFINITY, + CV_CAP_ANDROID_FOCUS_MODE_MACRO +}; + +// supported Android camera white balance modes +enum +{ + CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_SHADE, + CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT +}; + +// supported Android camera antibanding modes +enum +{ + CV_CAP_ANDROID_ANTIBANDING_50HZ = 0, + CV_CAP_ANDROID_ANTIBANDING_60HZ, + CV_CAP_ANDROID_ANTIBANDING_AUTO, + CV_CAP_ANDROID_ANTIBANDING_OFF +}; + +enum +{ + CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. + CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. + CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. + CV_CAP_INTELPERC_IMAGE = 3 +}; + +/* retrieve or set capture properties */ +CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id ); +CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value ); + +// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY +CVAPI(int) cvGetCaptureDomain( CvCapture* capture); + +/* "black box" video file writer structure */ +typedef struct CvVideoWriter CvVideoWriter; + +#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24)) + +CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4) +{ + return CV_FOURCC_MACRO(c1, c2, c3, c4); +} + +#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */ +#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */ + +/* initialize video file writer */ +CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc, + double fps, CvSize frame_size, + int is_color CV_DEFAULT(1)); + +/* write frame to video file */ +CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); + +/* close video file writer */ +CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer ); + +/****************************************************************************************\ +* Obsolete functions/synonyms * +\****************************************************************************************/ + +#define cvCaptureFromFile cvCreateFileCapture +#define cvCaptureFromCAM cvCreateCameraCapture +#define cvCaptureFromAVI cvCaptureFromFile +#define cvCreateAVIWriter cvCreateVideoWriter +#define cvWriteToAVI cvWriteFrame + + +#ifdef __cplusplus +} +#endif + +#endif //__OPENCV_VIDEOIO_H__ diff --git a/modules/highgui/perf/perf_input.cpp b/modules/videoio/perf/perf_input.cpp similarity index 100% rename from modules/highgui/perf/perf_input.cpp rename to modules/videoio/perf/perf_input.cpp diff --git a/modules/highgui/perf/perf_main.cpp b/modules/videoio/perf/perf_main.cpp similarity index 51% rename from modules/highgui/perf/perf_main.cpp rename to modules/videoio/perf/perf_main.cpp index ebe94ab7cc..12b47807b6 100644 --- a/modules/highgui/perf/perf_main.cpp +++ b/modules/videoio/perf/perf_main.cpp @@ -1,3 +1,3 @@ #include "perf_precomp.hpp" -CV_PERF_TEST_MAIN(highgui) +CV_PERF_TEST_MAIN(videoio) diff --git a/modules/highgui/perf/perf_output.cpp b/modules/videoio/perf/perf_output.cpp similarity index 100% rename from modules/highgui/perf/perf_output.cpp rename to modules/videoio/perf/perf_output.cpp diff --git a/modules/highgui/perf/perf_precomp.hpp b/modules/videoio/perf/perf_precomp.hpp similarity index 95% rename from modules/highgui/perf/perf_precomp.hpp rename to modules/videoio/perf/perf_precomp.hpp index faf34617e3..bd27700a9f 100644 --- a/modules/highgui/perf/perf_precomp.hpp +++ b/modules/videoio/perf/perf_precomp.hpp @@ -10,7 +10,8 @@ #define __OPENCV_PERF_PRECOMP_HPP__ #include "opencv2/ts.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #ifdef GTEST_CREATE_SHARED_LIBRARY #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined diff --git a/modules/highgui/src/cap.cpp b/modules/videoio/src/cap.cpp similarity index 96% rename from modules/highgui/src/cap.cpp rename to modules/videoio/src/cap.cpp index 9311694d0a..e36dc05730 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/videoio/src/cap.cpp @@ -41,6 +41,7 @@ #include "precomp.hpp" #include "cap_intelperc.hpp" +#include "cap_dshow.hpp" #if defined _M_X64 && defined _MSC_VER && !defined CV_ICC #pragma optimize("",off) @@ -115,9 +116,6 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) { int domains[] = { -#ifdef HAVE_DSHOW - CV_CAP_DSHOW, -#endif #ifdef HAVE_MSMF CV_CAP_MSMF, #endif @@ -145,6 +143,9 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) #ifdef HAVE_OPENNI CV_CAP_OPENNI, #endif +#ifdef HAVE_OPENNI2 + CV_CAP_OPENNI2, +#endif #ifdef HAVE_ANDROID_NATIVE_CAMERA CV_CAP_ANDROID, #endif @@ -175,8 +176,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) // try every possibly installed camera API for (int i = 0; domains[i] >= 0; i++) { -#if defined(HAVE_DSHOW) || \ - defined(HAVE_MSMF) || \ +#if defined(HAVE_MSMF) || \ defined(HAVE_TYZX) || \ defined(HAVE_VFW) || \ defined(HAVE_LIBV4L) || \ @@ -193,6 +193,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) defined(HAVE_UNICAP) || \ defined(HAVE_PVAPI) || \ defined(HAVE_OPENNI) || \ + defined(HAVE_OPENNI2) || \ defined(HAVE_XIMEA) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_ANDROID_NATIVE_CAMERA) || \ @@ -205,13 +206,6 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) switch (domains[i]) { -#ifdef HAVE_DSHOW - case CV_CAP_DSHOW: - capture = cvCreateCameraCapture_DShow (index); - if (capture) - return capture; - break; -#endif #ifdef HAVE_MSMF case CV_CAP_MSMF: capture = cvCreateCameraCapture_MSMF (index); @@ -315,6 +309,14 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) break; #endif +#ifdef HAVE_OPENNI2 + case CV_CAP_OPENNI2: + capture = cvCreateCameraCapture_OpenNI(index); + if (capture) + return capture; + break; +#endif + #ifdef HAVE_ANDROID_NATIVE_CAMERA case CV_CAP_ANDROID: capture = cvCreateCameraCapture_Android (index); @@ -589,6 +591,9 @@ Ptr VideoCapture::createCameraCapture(int index) { int domains[] = { +#ifdef HAVE_DSHOW + CV_CAP_DSHOW, +#endif #ifdef HAVE_INTELPERC CV_CAP_INTELPERC, #endif @@ -607,18 +612,26 @@ Ptr VideoCapture::createCameraCapture(int index) // try every possibly installed camera API for (int i = 0; domains[i] >= 0; i++) { -#if defined(HAVE_INTELPERC) || \ +#if defined(HAVE_DSHOW) || \ + defined(HAVE_INTELPERC) || \ (0) Ptr capture; switch (domains[i]) { +#ifdef HAVE_DSHOW + case CV_CAP_DSHOW: + capture = Ptr(new cv::VideoCapture_DShow(index)); + if (capture) + return capture; + break; // CV_CAP_DSHOW +#endif #ifdef HAVE_INTELPERC case CV_CAP_INTELPERC: capture = Ptr(new cv::VideoCapture_IntelPerC()); if (capture) return capture; - break; // CV_CAP_INTEL_PERC + break; // CV_CAP_INTEL_PERC #endif } #endif @@ -628,7 +641,6 @@ Ptr VideoCapture::createCameraCapture(int index) return Ptr(); } - VideoWriter::VideoWriter() {} diff --git a/modules/highgui/src/cap_android.cpp b/modules/videoio/src/cap_android.cpp similarity index 97% rename from modules/highgui/src/cap_android.cpp rename to modules/videoio/src/cap_android.cpp index dac245d2b3..700e397421 100644 --- a/modules/highgui/src/cap_android.cpp +++ b/modules/videoio/src/cap_android.cpp @@ -57,7 +57,7 @@ #define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)) #define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)) -class HighguiAndroidCameraActivity; +class VideoIOAndroidCameraActivity; class CvCapture_Android : public CvCapture { @@ -133,14 +133,14 @@ private: bool convertYUV2Grey(int width, int height, const unsigned char* yuv, cv::Mat& resmat); bool convertYUV2BGR(int width, int height, const unsigned char* yuv, cv::Mat& resmat, bool inRGBorder, bool withAlpha); - friend class HighguiAndroidCameraActivity; + friend class VideoIOAndroidCameraActivity; }; -class HighguiAndroidCameraActivity : public CameraActivity +class VideoIOAndroidCameraActivity : public CameraActivity { public: - HighguiAndroidCameraActivity(CvCapture_Android* capture) + VideoIOAndroidCameraActivity(CvCapture_Android* capture) { m_capture = capture; m_framesReceived = 0; @@ -204,7 +204,7 @@ CvCapture_Android::CvCapture_Android(int cameraId) //try connect to camera LOGD("CvCapture_Android::CvCapture_Android(%i)", cameraId); - m_activity = new HighguiAndroidCameraActivity(this); + m_activity = new VideoIOAndroidCameraActivity(this); if (m_activity == 0) return; @@ -232,7 +232,7 @@ CvCapture_Android::~CvCapture_Android() { if (m_activity) { - ((HighguiAndroidCameraActivity*)m_activity)->LogFramesRate(); + ((VideoIOAndroidCameraActivity*)m_activity)->LogFramesRate(); pthread_mutex_lock(&m_nextFrameMutex); @@ -344,7 +344,7 @@ bool CvCapture_Android::setProperty( int propIdx, double propValue ) // Only changes in frame size require camera restart if ((propIdx == CV_CAP_PROP_FRAME_WIDTH) || (propIdx == CV_CAP_PROP_FRAME_HEIGHT)) - { // property for highgui class CvCapture_Android only + { // property for videoio class CvCapture_Android only m_CameraParamsChanged = true; } @@ -475,7 +475,7 @@ void CvCapture_Android::setFrame(const void* buffer, int bufferSize) cv::Mat m_frameYUV420next_ref = m_frameYUV420next; memcpy(m_frameYUV420next_ref.ptr(), buffer, bufferSize); // LOGD("CvCapture_Android::setFrame -- memcpy is done"); - // ((HighguiAndroidCameraActivity*)m_activity)->LogFramesRate(); + // ((VideoIOAndroidCameraActivity*)m_activity)->LogFramesRate(); m_dataState = CVCAPTURE_ANDROID_STATE_HAS_NEW_FRAME_UNGRABBED; m_waitingNextFrame = false;//set flag that no more frames required at this moment diff --git a/modules/highgui/src/cap_avfoundation.mm b/modules/videoio/src/cap_avfoundation.mm similarity index 99% rename from modules/highgui/src/cap_avfoundation.mm rename to modules/videoio/src/cap_avfoundation.mm index e24ae32976..e66f03a8da 100644 --- a/modules/highgui/src/cap_avfoundation.mm +++ b/modules/videoio/src/cap_avfoundation.mm @@ -428,7 +428,7 @@ void CvCaptureCAM::setWidthHeight() { [localpool drain]; } -//added macros into headers in highgui_c.h +//added macros into headers in videoio_c.h /* #define CV_CAP_PROP_IOS_DEVICE_FOCUS 9001 #define CV_CAP_PROP_IOS_DEVICE_EXPOSURE 9002 diff --git a/modules/highgui/src/cap_cmu.cpp b/modules/videoio/src/cap_cmu.cpp similarity index 100% rename from modules/highgui/src/cap_cmu.cpp rename to modules/videoio/src/cap_cmu.cpp diff --git a/modules/highgui/src/cap_dc1394.cpp b/modules/videoio/src/cap_dc1394.cpp similarity index 99% rename from modules/highgui/src/cap_dc1394.cpp rename to modules/videoio/src/cap_dc1394.cpp index 9706bd12e3..acae61ecb4 100644 --- a/modules/highgui/src/cap_dc1394.cpp +++ b/modules/videoio/src/cap_dc1394.cpp @@ -1,5 +1,5 @@ /* This is the contributed code: -Firewire and video4linux camera support for highgui +Firewire and video4linux camera support for videoio 2003-03-12 Magnus Lundin lundin@mlu.mine.nu @@ -17,21 +17,21 @@ INSTALLATION Install OpenCV Install v4l Install dc1394 raw1394 - coriander should work with your camera - Backup highgui folder + Backup videoio folder Copy new files - cd into highgui folder + cd into videoio folder make clean (cvcap.cpp must be rebuilt) make make install -The build is controlled by the following entries in the highgui Makefile: +The build is controlled by the following entries in the videoio Makefile: -libhighgui_la_LIBADD = -L/usr/X11R6/lib -lXm -lMrm -lUil -lpng -ljpeg -lz -ltiff -lavcodec -lraw1394 -ldc1394_control +libvideoio_la_LIBADD = -L/usr/X11R6/lib -lXm -lMrm -lUil -lpng -ljpeg -lz -ltiff -lavcodec -lraw1394 -ldc1394_control DEFS = -DHAVE_CONFIG_H -DHAVE_DC1394 HAVE_CAMV4L -Now it should be possible to use highgui camera functions, works for me. +Now it should be possible to use videoio camera functions, works for me. THINGS TO DO diff --git a/modules/highgui/src/cap_dc1394_v2.cpp b/modules/videoio/src/cap_dc1394_v2.cpp similarity index 100% rename from modules/highgui/src/cap_dc1394_v2.cpp rename to modules/videoio/src/cap_dc1394_v2.cpp diff --git a/modules/highgui/src/cap_dshow.cpp b/modules/videoio/src/cap_dshow.cpp similarity index 96% rename from modules/highgui/src/cap_dshow.cpp rename to modules/videoio/src/cap_dshow.cpp index 90ffa00034..bc9f05eb8f 100644 --- a/modules/highgui/src/cap_dshow.cpp +++ b/modules/videoio/src/cap_dshow.cpp @@ -42,6 +42,7 @@ #include "precomp.hpp" #if (defined WIN32 || defined _WIN32) && defined HAVE_DSHOW +#include "cap_dshow.hpp" /* DirectShow-based Video Capturing module is based on @@ -455,13 +456,7 @@ class videoDevice{ }; - - - ////////////////////////////////////// VIDEO INPUT ///////////////////////////////////// - - - class videoInput{ public: @@ -3098,131 +3093,52 @@ HRESULT videoInput::routeCrossbar(ICaptureGraphBuilder2 **ppBuild, IBaseFilter * return hr; } - -/********************* Capturing video from camera via DirectShow *********************/ - -class CvCaptureCAM_DShow : public CvCapture -{ -public: - CvCaptureCAM_DShow(); - virtual ~CvCaptureCAM_DShow(); - - virtual bool open( int index ); - virtual void close(); - virtual double getProperty(int); - virtual bool setProperty(int, double); - virtual bool grabFrame(); - virtual IplImage* retrieveFrame(int); - virtual int getCaptureDomain() { return CV_CAP_DSHOW; } // Return the type of the capture object: CV_CAP_VFW, etc... - -protected: - void init(); - - int index, width, height,fourcc; - int widthSet, heightSet; - IplImage* frame; - static videoInput VI; -}; - - struct SuppressVideoInputMessages { SuppressVideoInputMessages() { videoInput::setVerbose(false); } }; static SuppressVideoInputMessages do_it; -videoInput CvCaptureCAM_DShow::VI; -CvCaptureCAM_DShow::CvCaptureCAM_DShow() +namespace cv { - index = -1; - frame = 0; - width = height = fourcc = -1; - widthSet = heightSet = -1; - CoInitialize(0); -} +videoInput VideoCapture_DShow::g_VI; -CvCaptureCAM_DShow::~CvCaptureCAM_DShow() +VideoCapture_DShow::VideoCapture_DShow(int index) + : m_index(-1) + , m_width(-1) + , m_height(-1) + , m_fourcc(-1) + , m_widthSet(-1) + , m_heightSet(-1) +{ + CoInitialize(0); + open(index); +} +VideoCapture_DShow::~VideoCapture_DShow() { close(); CoUninitialize(); } -void CvCaptureCAM_DShow::close() +double VideoCapture_DShow::getProperty(int propIdx) { - if( index >= 0 ) - { - VI.stopDevice(index); - index = -1; - cvReleaseImage(&frame); - } - widthSet = heightSet = width = height = -1; -} - -// Initialize camera input -bool CvCaptureCAM_DShow::open( int _index ) -{ - int devices = 0; - - close(); - devices = VI.listDevices(true); - if (devices == 0) - return false; - if (_index < 0 || _index > devices-1) - return false; - VI.setupDevice(_index); - if( !VI.isDeviceSetup(_index) ) - return false; - index = _index; - return true; -} - -bool CvCaptureCAM_DShow::grabFrame() -{ - return true; -} - - -IplImage* CvCaptureCAM_DShow::retrieveFrame(int) -{ - if( !frame || VI.getWidth(index) != frame->width || VI.getHeight(index) != frame->height ) - { - if (frame) - cvReleaseImage( &frame ); - int w = VI.getWidth(index), h = VI.getHeight(index); - frame = cvCreateImage( cvSize(w,h), 8, 3 ); - } - - if (VI.getPixels( index, (uchar*)frame->imageData, false, true )) - return frame; - else - return NULL; -} - -double CvCaptureCAM_DShow::getProperty( int property_id ) -{ - - long min_value,max_value,stepping_delta,current_value,flags,defaultValue; - - // image format proprrties - switch( property_id ) + + long min_value, max_value, stepping_delta, current_value, flags, defaultValue; + + switch (propIdx) { + // image format properties case CV_CAP_PROP_FRAME_WIDTH: - return VI.getWidth(index); - + return g_VI.getWidth(m_index); case CV_CAP_PROP_FRAME_HEIGHT: - return VI.getHeight(index); - + return g_VI.getHeight(m_index); case CV_CAP_PROP_FOURCC: - return VI.getFourcc(index); - + return g_VI.getFourcc(m_index); case CV_CAP_PROP_FPS: - return VI.getFPS(index); - } + return g_VI.getFPS(m_index); // video filter properties - switch( property_id ) - { case CV_CAP_PROP_BRIGHTNESS: case CV_CAP_PROP_CONTRAST: case CV_CAP_PROP_HUE: @@ -3233,12 +3149,10 @@ double CvCaptureCAM_DShow::getProperty( int property_id ) case CV_CAP_PROP_WHITE_BALANCE_BLUE_U: case CV_CAP_PROP_BACKLIGHT: case CV_CAP_PROP_GAIN: - if (VI.getVideoSettingFilter(index,VI.getVideoPropertyFromCV(property_id),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value; - } + if (g_VI.getVideoSettingFilter(m_index, g_VI.getVideoPropertyFromCV(propIdx), min_value, max_value, stepping_delta, current_value, flags, defaultValue)) + return (double)current_value; // camera properties - switch( property_id ) - { case CV_CAP_PROP_PAN: case CV_CAP_PROP_TILT: case CV_CAP_PROP_ROLL: @@ -3246,33 +3160,33 @@ double CvCaptureCAM_DShow::getProperty( int property_id ) case CV_CAP_PROP_EXPOSURE: case CV_CAP_PROP_IRIS: case CV_CAP_PROP_FOCUS: - if (VI.getVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),min_value,max_value,stepping_delta,current_value,flags,defaultValue) ) return (double)current_value; - + if (g_VI.getVideoSettingCamera(m_index, g_VI.getCameraPropertyFromCV(propIdx), min_value, max_value, stepping_delta, current_value, flags, defaultValue)) + return (double)current_value; } // unknown parameter or value not available return -1; } - -bool CvCaptureCAM_DShow::setProperty( int property_id, double value ) +bool VideoCapture_DShow::setProperty(int propIdx, double propVal) { // image capture properties bool handled = false; - switch( property_id ) + switch (propIdx) { case CV_CAP_PROP_FRAME_WIDTH: - width = cvRound(value); + m_width = cvRound(propVal); handled = true; break; case CV_CAP_PROP_FRAME_HEIGHT: - height = cvRound(value); + m_height = cvRound(propVal); handled = true; break; case CV_CAP_PROP_FOURCC: - fourcc = (int)(unsigned long)(value); - if ( fourcc == -1 ) { + m_fourcc = (int)(unsigned long)(propVal); + if (-1 == m_fourcc) + { // following cvCreateVideo usage will pop up caprturepindialog here if fourcc=-1 // TODO - how to create a capture pin dialog } @@ -3280,38 +3194,38 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value ) break; case CV_CAP_PROP_FPS: - int fps = cvRound(value); - if (fps != VI.getFPS(index)) + int fps = cvRound(propVal); + if (fps != g_VI.getFPS(m_index)) { - VI.stopDevice(index); - VI.setIdealFramerate(index,fps); - if (widthSet > 0 && heightSet > 0) - VI.setupDevice(index, widthSet, heightSet); + g_VI.stopDevice(m_index); + g_VI.setIdealFramerate(m_index, fps); + if (m_widthSet > 0 && m_heightSet > 0) + g_VI.setupDevice(m_index, m_widthSet, m_heightSet); else - VI.setupDevice(index); + g_VI.setupDevice(m_index); } - return VI.isDeviceSetup(index); - + return g_VI.isDeviceSetup(m_index); } - if ( handled ) { + if (handled) + { // a stream setting - if( width > 0 && height > 0 ) + if (m_width > 0 && m_height > 0) { - if( width != VI.getWidth(index) || height != VI.getHeight(index) )//|| fourcc != VI.getFourcc(index) ) + if (m_width != g_VI.getWidth(m_index) || m_height != g_VI.getHeight(m_index) )//|| fourcc != VI.getFourcc(index) ) { - int fps = static_cast(VI.getFPS(index)); - VI.stopDevice(index); - VI.setIdealFramerate(index, fps); - VI.setupDeviceFourcc(index, width, height, fourcc); + int fps = static_cast(g_VI.getFPS(m_index)); + g_VI.stopDevice(m_index); + g_VI.setIdealFramerate(m_index, fps); + g_VI.setupDeviceFourcc(m_index, m_width, m_height, m_fourcc); } - bool success = VI.isDeviceSetup(index); + bool success = g_VI.isDeviceSetup(m_index); if (success) { - widthSet = width; - heightSet = height; - width = height = fourcc = -1; + m_widthSet = m_width; + m_heightSet = m_height; + m_width = m_height = m_fourcc = -1; } return success; } @@ -3319,13 +3233,14 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value ) } // show video/camera filter dialog - if ( property_id == CV_CAP_PROP_SETTINGS ) { - VI.showSettingsWindow(index); + if (propIdx == CV_CAP_PROP_SETTINGS ) + { + g_VI.showSettingsWindow(m_index); return true; } //video Filter properties - switch( property_id ) + switch (propIdx) { case CV_CAP_PROP_BRIGHTNESS: case CV_CAP_PROP_CONTRAST: @@ -3337,11 +3252,11 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value ) case CV_CAP_PROP_WHITE_BALANCE_BLUE_U: case CV_CAP_PROP_BACKLIGHT: case CV_CAP_PROP_GAIN: - return VI.setVideoSettingFilter(index,VI.getVideoPropertyFromCV(property_id),(long)value); + return g_VI.setVideoSettingFilter(m_index, g_VI.getVideoPropertyFromCV(propIdx), (long)propVal); } //camera properties - switch( property_id ) + switch (propIdx) { case CV_CAP_PROP_PAN: case CV_CAP_PROP_TILT: @@ -3350,30 +3265,55 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value ) case CV_CAP_PROP_EXPOSURE: case CV_CAP_PROP_IRIS: case CV_CAP_PROP_FOCUS: - return VI.setVideoSettingCamera(index,VI.getCameraPropertyFromCV(property_id),(long)value); + return g_VI.setVideoSettingCamera(m_index, g_VI.getCameraPropertyFromCV(propIdx), (long)propVal); } return false; } - -CvCapture* cvCreateCameraCapture_DShow( int index ) +bool VideoCapture_DShow::grabFrame() { - CvCaptureCAM_DShow* capture = new CvCaptureCAM_DShow; + return true; +} +bool VideoCapture_DShow::retrieveFrame(int, OutputArray frame) +{ + frame.create(Size(g_VI.getWidth(m_index), g_VI.getHeight(m_index)), CV_8UC3); + cv::Mat mat = frame.getMat(); + return g_VI.getPixels(m_index, mat.ptr(), false, true ); +} +int VideoCapture_DShow::getCaptureDomain() +{ + return CV_CAP_DSHOW; +} +bool VideoCapture_DShow::isOpened() const +{ + return (-1 != m_index); +} - try - { - if( capture->open( index )) - return capture; - } - catch(...) - { - delete capture; - throw; - } +void VideoCapture_DShow::open(int index) +{ + close(); + int devices = g_VI.listDevices(true); + if (0 == devices) + return; + if (index < 0 || index > devices-1) + return; + g_VI.setupDevice(index); + if (!g_VI.isDeviceSetup(index)) + return; + m_index = index; +} + +void VideoCapture_DShow::close() +{ + if (m_index >= 0) + { + g_VI.stopDevice(m_index); + m_index = -1; + } + m_widthSet = m_heightSet = m_width = m_height = -1; +} - delete capture; - return 0; } #endif diff --git a/modules/videoio/src/cap_dshow.hpp b/modules/videoio/src/cap_dshow.hpp new file mode 100644 index 0000000000..2225145faa --- /dev/null +++ b/modules/videoio/src/cap_dshow.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2014, Itseez, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +//M*/ + +#ifndef _CAP_DSHOW_HPP_ +#define _CAP_DSHOW_HPP_ + +#include "precomp.hpp" + +#ifdef HAVE_DSHOW + +class videoInput; +namespace cv +{ + +class VideoCapture_DShow : public IVideoCapture +{ +public: + VideoCapture_DShow(int index); + virtual ~VideoCapture_DShow(); + + virtual double getProperty(int propIdx); + virtual bool setProperty(int propIdx, double propVal); + + virtual bool grabFrame(); + virtual bool retrieveFrame(int outputType, OutputArray frame); + virtual int getCaptureDomain(); + bool isOpened() const; +protected: + void open(int index); + void close(); + + int m_index, m_width, m_height, m_fourcc; + int m_widthSet, m_heightSet; + static videoInput g_VI; +}; + +} + +#endif //HAVE_DSHOW +#endif //_CAP_DSHOW_HPP_ \ No newline at end of file diff --git a/modules/highgui/src/cap_ffmpeg.cpp b/modules/videoio/src/cap_ffmpeg.cpp similarity index 100% rename from modules/highgui/src/cap_ffmpeg.cpp rename to modules/videoio/src/cap_ffmpeg.cpp diff --git a/modules/highgui/src/cap_ffmpeg_api.hpp b/modules/videoio/src/cap_ffmpeg_api.hpp similarity index 100% rename from modules/highgui/src/cap_ffmpeg_api.hpp rename to modules/videoio/src/cap_ffmpeg_api.hpp diff --git a/modules/highgui/src/cap_ffmpeg_impl.hpp b/modules/videoio/src/cap_ffmpeg_impl.hpp similarity index 100% rename from modules/highgui/src/cap_ffmpeg_impl.hpp rename to modules/videoio/src/cap_ffmpeg_impl.hpp diff --git a/modules/highgui/src/cap_giganetix.cpp b/modules/videoio/src/cap_giganetix.cpp similarity index 100% rename from modules/highgui/src/cap_giganetix.cpp rename to modules/videoio/src/cap_giganetix.cpp diff --git a/modules/highgui/src/cap_gstreamer.cpp b/modules/videoio/src/cap_gstreamer.cpp similarity index 100% rename from modules/highgui/src/cap_gstreamer.cpp rename to modules/videoio/src/cap_gstreamer.cpp diff --git a/modules/highgui/src/cap_images.cpp b/modules/videoio/src/cap_images.cpp similarity index 100% rename from modules/highgui/src/cap_images.cpp rename to modules/videoio/src/cap_images.cpp diff --git a/modules/highgui/src/cap_intelperc.cpp b/modules/videoio/src/cap_intelperc.cpp similarity index 100% rename from modules/highgui/src/cap_intelperc.cpp rename to modules/videoio/src/cap_intelperc.cpp diff --git a/modules/highgui/src/cap_intelperc.hpp b/modules/videoio/src/cap_intelperc.hpp similarity index 100% rename from modules/highgui/src/cap_intelperc.hpp rename to modules/videoio/src/cap_intelperc.hpp diff --git a/modules/highgui/src/cap_ios_abstract_camera.mm b/modules/videoio/src/cap_ios_abstract_camera.mm similarity index 99% rename from modules/highgui/src/cap_ios_abstract_camera.mm rename to modules/videoio/src/cap_ios_abstract_camera.mm index b40b3648de..08547663ca 100644 --- a/modules/highgui/src/cap_ios_abstract_camera.mm +++ b/modules/videoio/src/cap_ios_abstract_camera.mm @@ -30,7 +30,7 @@ */ -#import "opencv2/highgui/cap_ios.h" +#import "opencv2/videoio/cap_ios.h" #include "precomp.hpp" #pragma mark - Private Interface diff --git a/modules/highgui/src/cap_ios_photo_camera.mm b/modules/videoio/src/cap_ios_photo_camera.mm similarity index 99% rename from modules/highgui/src/cap_ios_photo_camera.mm rename to modules/videoio/src/cap_ios_photo_camera.mm index f05cfa5f87..c6c93a8cf0 100644 --- a/modules/highgui/src/cap_ios_photo_camera.mm +++ b/modules/videoio/src/cap_ios_photo_camera.mm @@ -29,7 +29,7 @@ */ -#import "opencv2/highgui/cap_ios.h" +#import "opencv2/videoio/cap_ios.h" #include "precomp.hpp" #pragma mark - Private Interface diff --git a/modules/highgui/src/cap_ios_video_camera.mm b/modules/videoio/src/cap_ios_video_camera.mm similarity index 99% rename from modules/highgui/src/cap_ios_video_camera.mm rename to modules/videoio/src/cap_ios_video_camera.mm index 20973c3133..c094de79c1 100644 --- a/modules/highgui/src/cap_ios_video_camera.mm +++ b/modules/videoio/src/cap_ios_video_camera.mm @@ -29,7 +29,7 @@ * */ -#import "opencv2/highgui/cap_ios.h" +#import "opencv2/videoio/cap_ios.h" #include "precomp.hpp" #import diff --git a/modules/highgui/src/cap_libv4l.cpp b/modules/videoio/src/cap_libv4l.cpp similarity index 96% rename from modules/highgui/src/cap_libv4l.cpp rename to modules/videoio/src/cap_libv4l.cpp index e7aa5b5dfe..a3a02d9ef9 100644 --- a/modules/highgui/src/cap_libv4l.cpp +++ b/modules/videoio/src/cap_libv4l.cpp @@ -1,7 +1,7 @@ /* This is the contributed code: File: cvcap_v4l.cpp -Current Location: ../opencv-0.9.6/otherlibs/highgui +Current Location: ../opencv-0.9.6/otherlibs/videoio Original Version: 2003-03-12 Magnus Lundin lundin@mlu.mine.nu Original Comments: @@ -71,7 +71,7 @@ For Release: OpenCV-Linux Beta4 Opencv-0.9.6 [FD] I modified the following: - handle YUV420P, YUV420, and YUV411P palettes (for many webcams) without using floating-point - cvGrabFrame should not wait for the end of the first frame, and should return quickly - (see highgui doc) + (see videoio doc) - cvRetrieveFrame should in turn wait for the end of frame capture, and should not trigger the capture of the next frame (the user choses when to do it using GrabFrame) To get the old behavior, re-call cvRetrieveFrame just after cvGrabFrame. @@ -179,7 +179,7 @@ make & enjoy! Planning for future rewrite of this whole library (July/August 2010) 15th patch: May 12, 2010, Filipe Almeida filipe.almeida@ist.utl.pt -- Broken compile of library (include "_highgui.h") +- Broken compile of library (include "_videoio.h") */ /*M/////////////////////////////////////////////////////////////////////////////////////// @@ -654,7 +654,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) if ((capture->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) { /* Nope. */ - fprintf( stderr, "HIGHGUI ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName); + fprintf( stderr, "VIDEOIO ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName); icvCloseCAM_V4L(capture); return -1; } @@ -673,7 +673,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) /* V4L2 have a status field from selected video mode */ if (-1 == xioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) { - fprintf (stderr, "HIGHGUI ERROR: V4L2: Aren't able to set channel number\n"); + fprintf (stderr, "VIDEOIO ERROR: V4L2: Aren't able to set channel number\n"); icvCloseCAM_V4L (capture); return -1; } @@ -684,7 +684,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { - fprintf( stderr, "HIGHGUI ERROR: V4L2: Could not obtain specifics of capture window.\n\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L2: Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); return -1; } @@ -698,12 +698,12 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) capture->form.fmt.pix.height = capture->height; if (-1 == xioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form)) { - fprintf(stderr, "HIGHGUI ERROR: libv4l unable to ioctl S_FMT\n"); + fprintf(stderr, "VIDEOIO ERROR: libv4l unable to ioctl S_FMT\n"); return -1; } if (V4L2_PIX_FMT_BGR24 != capture->form.fmt.pix.pixelformat) { - fprintf( stderr, "HIGHGUI ERROR: libv4l unable convert to requested pixfmt\n"); + fprintf( stderr, "VIDEOIO ERROR: libv4l unable convert to requested pixfmt\n"); return -1; } @@ -829,7 +829,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) if (detect_v4l == -1) { - fprintf (stderr, "HIGHGUI ERROR: V4L" + fprintf (stderr, "VIDEOIO ERROR: V4L" ": device %s: Unable to open for READ ONLY\n", deviceName); return -1; @@ -837,7 +837,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) if (detect_v4l <= 0) { - fprintf (stderr, "HIGHGUI ERROR: V4L" + fprintf (stderr, "VIDEOIO ERROR: V4L" ": device %s: Unable to query number of channels\n", deviceName); return -1; @@ -846,7 +846,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) { if ((capture->capability.type & VID_TYPE_CAPTURE) == 0) { /* Nope. */ - fprintf( stderr, "HIGHGUI ERROR: V4L: " + fprintf( stderr, "VIDEOIO ERROR: V4L: " "device %s is unable to capture video memory.\n",deviceName); icvCloseCAM_V4L(capture); return -1; @@ -884,7 +884,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) { if(v4l1_ioctl(capture->deviceHandle, VIDIOCGWIN, &capture->captureWindow) == -1) { - fprintf( stderr, "HIGHGUI ERROR: V4L: " + fprintf( stderr, "VIDEOIO ERROR: V4L: " "Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); return -1; @@ -894,7 +894,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) { if(v4l1_ioctl(capture->deviceHandle, VIDIOCGPICT, &capture->imageProperties) < 0) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Unable to determine size of incoming image\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Unable to determine size of incoming image\n"); icvCloseCAM_V4L(capture); return -1; } @@ -902,17 +902,17 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) capture->imageProperties.palette = VIDEO_PALETTE_RGB24; capture->imageProperties.depth = 24; if (v4l1_ioctl(capture->deviceHandle, VIDIOCSPICT, &capture->imageProperties) < 0) { - fprintf( stderr, "HIGHGUI ERROR: libv4l unable to ioctl VIDIOCSPICT\n\n"); + fprintf( stderr, "VIDEOIO ERROR: libv4l unable to ioctl VIDIOCSPICT\n\n"); icvCloseCAM_V4L(capture); return -1; } if (v4l1_ioctl(capture->deviceHandle, VIDIOCGPICT, &capture->imageProperties) < 0) { - fprintf( stderr, "HIGHGUI ERROR: libv4l unable to ioctl VIDIOCGPICT\n\n"); + fprintf( stderr, "VIDEOIO ERROR: libv4l unable to ioctl VIDIOCGPICT\n\n"); icvCloseCAM_V4L(capture); return -1; } if (capture->imageProperties.palette != VIDEO_PALETTE_RGB24) { - fprintf( stderr, "HIGHGUI ERROR: libv4l unable convert to requested pixfmt\n\n"); + fprintf( stderr, "VIDEOIO ERROR: libv4l unable convert to requested pixfmt\n\n"); icvCloseCAM_V4L(capture); return -1; } @@ -929,7 +929,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) capture->deviceHandle, 0); if (capture->memoryMap == MAP_FAILED) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno)); + fprintf( stderr, "VIDEOIO ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno)); icvCloseCAM_V4L(capture); return -1; } @@ -939,7 +939,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) capture->mmaps = (struct video_mmap *) (malloc(capture->memoryBuffer.frames * sizeof(struct video_mmap))); if (!capture->mmaps) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Could not memory map video frames.\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Could not memory map video frames.\n"); icvCloseCAM_V4L(capture); return -1; } @@ -972,14 +972,14 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index) //search index in indexList if ( (index>-1) && ! ((1 << index) & indexList) ) { - fprintf( stderr, "HIGHGUI ERROR: V4L: index %d is not correct!\n",index); + fprintf( stderr, "VIDEOIO ERROR: V4L: index %d is not correct!\n",index); return NULL; /* Did someone ask for not correct video source number? */ } /* Allocate memory for this humongus CvCaptureCAM_V4L structure that contains ALL the handles for V4L processing */ CvCaptureCAM_V4L * capture = (CvCaptureCAM_V4L*)cvAlloc(sizeof(CvCaptureCAM_V4L)); if (!capture) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Could not allocate memory for capture process.\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Could not allocate memory for capture process.\n"); return NULL; } @@ -1161,7 +1161,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { capture->mmaps[capture->bufferIndex].format = capture->imageProperties.palette; if (v4l1_ioctl(capture->deviceHandle, VIDIOCMCAPTURE, &capture->mmaps[capture->bufferIndex]) == -1) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n"); return 0; } } @@ -1208,7 +1208,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { /* [FD] this really belongs here */ if (v4l1_ioctl(capture->deviceHandle, VIDIOCSYNC, &capture->mmaps[capture->bufferIndex].frame) == -1) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno)); + fprintf( stderr, "VIDEOIO ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno)); } } @@ -1266,7 +1266,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { break; default: fprintf( stderr, - "HIGHGUI ERROR: V4L: Cannot convert from palette %d to RGB\n", + "VIDEOIO ERROR: V4L: Cannot convert from palette %d to RGB\n", capture->imageProperties.palette); return 0; } @@ -1291,7 +1291,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, /* display an error message, and return an error code */ perror ("VIDIOC_G_FMT"); if (v4l1_ioctl (capture->deviceHandle, VIDIOCGWIN, &capture->captureWindow) < 0) { - fprintf (stderr, "HIGHGUI ERROR: V4L: Unable to determine size of incoming image\n"); + fprintf (stderr, " ERROR: V4L: Unable to determine size of incoming image\n"); icvCloseCAM_V4L(capture); return -1; } else { @@ -1333,7 +1333,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, /* all went well */ is_v4l2_device = 1; } else { - fprintf(stderr, "HIGHGUI ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno)); + fprintf(stderr, "VIDEOIO ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno)); } if (is_v4l2_device == 1) { @@ -1342,7 +1342,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, int v4l2_max = v4l2_get_ctrl_max(capture, capture->control.id); if ((v4l2_min == -1) && (v4l2_max == -1)) { - fprintf(stderr, "HIGHGUI ERROR: V4L2: Property %s(%u) not supported by device\n", name, property_id); + fprintf(stderr, "VIDEOIO ERROR: V4L2: Property %s(%u) not supported by device\n", name, property_id); return -1; } @@ -1367,11 +1367,11 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, retval = capture->imageProperties.hue; break; case CV_CAP_PROP_GAIN: - fprintf(stderr, "HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n"); return -1; break; case CV_CAP_PROP_EXPOSURE: - fprintf(stderr, "HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n"); return -1; break; } @@ -1440,7 +1440,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { /* Get window info again, to get the real value */ if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { - fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); @@ -1530,14 +1530,14 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val v4l2_max = v4l2_get_ctrl_max(capture, capture->control.id); if ((v4l2_min == -1) && (v4l2_max == -1)) { - fprintf(stderr, "HIGHGUI ERROR: V4L: Property %s(%u) not supported by device\n", name, property_id); + fprintf(stderr, "VIDEOIO ERROR: V4L: Property %s(%u) not supported by device\n", name, property_id); return -1; } if(v4l2_ioctl(capture->deviceHandle, VIDIOC_G_CTRL, &capture->control) == 0) { /* all went well */ } else { - fprintf(stderr, "HIGHGUI ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno)); + fprintf(stderr, "VIDEOIO ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno)); } if (v4l2_max != 0) { @@ -1558,7 +1558,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val if (v4l2_ioctl(capture->deviceHandle, VIDIOC_S_CTRL, &c) != 0) { /* The driver may clamp the value or return ERANGE, ignored here */ if (errno != ERANGE) { - fprintf(stderr, "HIGHGUI ERROR: V4L2: Failed to set control \"%d\": %s (value %d)\n", c.id, strerror(errno), c.value); + fprintf(stderr, "VIDEOIO ERROR: V4L2: Failed to set control \"%d\": %s (value %d)\n", c.id, strerror(errno), c.value); is_v4l2 = 0; } else { return 0; @@ -1568,7 +1568,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val } if (is_v4l2 == 0) { /* use v4l1_ioctl */ - fprintf(stderr, "HIGHGUI WARNING: Setting property %u through v4l2 failed. Trying with v4l1.\n", c.id); + fprintf(stderr, "VIDEOIO WARNING: Setting property %u through v4l2 failed. Trying with v4l1.\n", c.id); int v4l_value; /* scale the value to the wanted integer one */ v4l_value = (int)(0xFFFF * value); @@ -1587,18 +1587,18 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val capture->imageProperties.hue = v4l_value; break; case CV_CAP_PROP_GAIN: - fprintf(stderr, "HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n"); return -1; case CV_CAP_PROP_EXPOSURE: - fprintf(stderr, "HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n"); return -1; default: - fprintf(stderr, "HIGHGUI ERROR: V4L: property #%d is not supported\n", property_id); + fprintf(stderr, "VIDEOIO ERROR: V4L: property #%d is not supported\n", property_id); return -1; } if (v4l1_ioctl(capture->deviceHandle, VIDIOCSPICT, &capture->imageProperties) < 0){ - fprintf(stderr, "HIGHGUI ERROR: V4L: Unable to set video informations\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set video informations\n"); icvCloseCAM_V4L(capture); return -1; } @@ -1643,7 +1643,7 @@ static int icvSetPropertyCAM_V4L(CvCaptureCAM_V4L* capture, int property_id, dou setfps.parm.capture.timeperframe.numerator = 1; setfps.parm.capture.timeperframe.denominator = value; if (xioctl (capture->deviceHandle, VIDIOC_S_PARM, &setfps) < 0){ - fprintf(stderr, "HIGHGUI ERROR: V4L: Unable to set camera FPS\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set camera FPS\n"); retval=0; } break; diff --git a/modules/highgui/src/cap_msmf.cpp b/modules/videoio/src/cap_msmf.cpp similarity index 100% rename from modules/highgui/src/cap_msmf.cpp rename to modules/videoio/src/cap_msmf.cpp diff --git a/modules/highgui/src/cap_msmf.hpp b/modules/videoio/src/cap_msmf.hpp similarity index 99% rename from modules/highgui/src/cap_msmf.hpp rename to modules/videoio/src/cap_msmf.hpp index c212ca910d..3ecd43cb57 100644 --- a/modules/highgui/src/cap_msmf.hpp +++ b/modules/videoio/src/cap_msmf.hpp @@ -2277,7 +2277,7 @@ protected: /* Be sure to declare webcam device capability in manifest For better media capture support, add the following snippet with correct module name to the project manifest - (highgui needs DLL activation class factoryentry points): + (videoio needs DLL activation class factoryentry points): diff --git a/modules/highgui/src/cap_openni.cpp b/modules/videoio/src/cap_openni.cpp similarity index 100% rename from modules/highgui/src/cap_openni.cpp rename to modules/videoio/src/cap_openni.cpp diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp new file mode 100644 index 0000000000..62aacb2faa --- /dev/null +++ b/modules/videoio/src/cap_openni2.cpp @@ -0,0 +1,921 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" + +#ifdef HAVE_OPENNI2 + +#if defined TBB_INTERFACE_VERSION && TBB_INTERFACE_VERSION < 5000 +# undef HAVE_TBB +#endif + +#include + +#ifndef i386 +# define i386 0 +#endif +#ifndef __arm__ +# define __arm__ 0 +#endif +#ifndef _ARC +# define _ARC 0 +#endif +#ifndef __APPLE__ +# define __APPLE__ 0 +#endif + +#define CV_STREAM_TIMEOUT 2000 + +#define CV_DEPTH_STREAM 0 +#define CV_COLOR_STREAM 1 + +#define CV_NUM_STREAMS 2 + +#include "OpenNI.h" +#include "PS1080.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class CvCapture_OpenNI2 : public CvCapture +{ +public: + enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 }; + + static const int INVALID_PIXEL_VAL = 0; + static const int INVALID_COORDINATE_VAL = 0; + +#ifdef HAVE_TBB + static const int DEFAULT_MAX_BUFFER_SIZE = 8; +#else + static const int DEFAULT_MAX_BUFFER_SIZE = 2; +#endif + static const int DEFAULT_IS_CIRCLE_BUFFER = 0; + static const int DEFAULT_MAX_TIME_DURATION = 20; + + CvCapture_OpenNI2(int index = 0); + CvCapture_OpenNI2(const char * filename); + virtual ~CvCapture_OpenNI2(); + + virtual double getProperty(int propIdx); + virtual bool setProperty(int probIdx, double propVal); + virtual bool grabFrame(); + virtual IplImage* retrieveFrame(int outputType); + + bool isOpened() const; + +protected: + struct OutputMap + { + public: + cv::Mat mat; + IplImage* getIplImagePtr(); + private: + IplImage iplHeader; + }; + + static const int outputMapsTypesCount = 7; + + static openni::VideoMode defaultColorOutputMode(); + static openni::VideoMode defaultDepthOutputMode(); + + IplImage* retrieveDepthMap(); + IplImage* retrievePointCloudMap(); + IplImage* retrieveDisparityMap(); + IplImage* retrieveDisparityMap_32F(); + IplImage* retrieveValidDepthMask(); + IplImage* retrieveBGRImage(); + IplImage* retrieveGrayImage(); + + bool readCamerasParams(); + + double getDepthGeneratorProperty(int propIdx); + bool setDepthGeneratorProperty(int propIdx, double propVal); + double getImageGeneratorProperty(int propIdx); + bool setImageGeneratorProperty(int propIdx, double propVal); + double getCommonProperty(int propIdx); + bool setCommonProperty(int propIdx, double propVal); + + // OpenNI context + openni::Device device; + bool isContextOpened; + openni::Recorder recorder; + + // Data generators with its metadata + openni::VideoStream depth, color, **streams; + openni::VideoFrameRef depthFrame, colorFrame; + cv::Mat depthImage, colorImage; + + int maxBufferSize, maxTimeDuration; // for approx sync + bool isCircleBuffer; + //cv::Ptr approxSyncGrabber; + + // Cameras settings: + // TODO find in OpenNI function to convert z->disparity and remove fields "baseline" and depthFocalLength_VGA + // Distance between IR projector and IR camera (in meters) + double baseline; + // Focal length for the IR camera in VGA resolution (in pixels) + int depthFocalLength_VGA; + + // The value for shadow (occluded pixels) + int shadowValue; + // The value for pixels without a valid disparity measurement + int noSampleValue; + + int currentStream; + + std::vector outputMaps; +}; + +IplImage* CvCapture_OpenNI2::OutputMap::getIplImagePtr() +{ + if( mat.empty() ) + return 0; + + iplHeader = IplImage(mat); + return &iplHeader; +} + +bool CvCapture_OpenNI2::isOpened() const +{ + return isContextOpened; +} + +openni::VideoMode CvCapture_OpenNI2::defaultColorOutputMode() +{ + openni::VideoMode mode; + mode.setResolution(640, 480); + mode.setFps(30); + mode.setPixelFormat(openni::PIXEL_FORMAT_RGB888); + return mode; +} + +openni::VideoMode CvCapture_OpenNI2::defaultDepthOutputMode() +{ + openni::VideoMode mode; + mode.setResolution(640, 480); + mode.setFps(30); + mode.setPixelFormat(openni::PIXEL_FORMAT_DEPTH_1_MM); + return mode; +} + +CvCapture_OpenNI2::CvCapture_OpenNI2( int index ) +{ + const char* deviceURI = openni::ANY_DEVICE; + openni::Status status; + int deviceType = DEVICE_DEFAULT; + + noSampleValue = shadowValue = 0; + + isContextOpened = false; + maxBufferSize = DEFAULT_MAX_BUFFER_SIZE; + isCircleBuffer = DEFAULT_IS_CIRCLE_BUFFER; + maxTimeDuration = DEFAULT_MAX_TIME_DURATION; + + if( index >= 10 ) + { + deviceType = index / 10; + index %= 10; + } + + if( deviceType > DEVICE_MAX ) + return; + + // Initialize and configure the context. + status = openni::OpenNI::initialize(); + + if (status != openni::STATUS_OK) + { + CV_Error(CV_StsError, cv::format("Failed to initialize:", openni::OpenNI::getExtendedError())); + return; + } + + status = device.open(deviceURI); + if( status != openni::STATUS_OK ) + { + CV_Error(CV_StsError, cv::format("OpenCVKinect: Device open failed see: %s\n", openni::OpenNI::getExtendedError())); + openni::OpenNI::shutdown(); + return; + } + + //device.setDepthColorSyncEnabled(true); + + + status = depth.create(device, openni::SENSOR_DEPTH); + if (status == openni::STATUS_OK) + { + if (depth.isValid()) + { + CV_DbgAssert(depth.setVideoMode(defaultDepthOutputMode()) == openni::STATUS_OK); // xn::DepthGenerator supports VGA only! (Jan 2011) + } + + status = depth.start(); + if (status != openni::STATUS_OK) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::CvCapture_OpenNI2 : Couldn't start depth stream: %s\n", openni::OpenNI::getExtendedError())); + depth.destroy(); + return; + } + } + else + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::CvCapture_OpenNI2 : Couldn't find depth stream:: %s\n", openni::OpenNI::getExtendedError())); + return; + } + // create a color object + status = color.create(device, openni::SENSOR_COLOR); + if (status == openni::STATUS_OK) + { + // Set map output mode. + if (color.isValid()) + { + CV_DbgAssert(color.setVideoMode(defaultColorOutputMode()) == openni::STATUS_OK); + } + status = color.start(); + if (status != openni::STATUS_OK) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::CvCapture_OpenNI2 : Couldn't start color stream: %s\n", openni::OpenNI::getExtendedError())); + color.destroy(); + return; + } + } + else + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::CvCapture_OpenNI2 : Couldn't find color stream: %s\n", openni::OpenNI::getExtendedError())); + return; + } + + +// if( deviceType == DEVICE_ASUS_XTION ) +// { +// //ps/asus specific +// imageGenerator.SetIntProperty("InputFormat", 1 /*XN_IO_IMAGE_FORMAT_YUV422*/); +// imageGenerator.SetPixelFormat(XN_PIXEL_FORMAT_RGB24); +// depthGenerator.SetIntProperty("RegistrationType", 1 /*XN_PROCESSING_HARDWARE*/); +// } + + if( !readCamerasParams() ) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::CvCapture_OpenNI2 : Could not read cameras parameters\n")); + return; + } + streams = new openni::VideoStream*[CV_NUM_STREAMS]; + streams[CV_DEPTH_STREAM] = &depth; + streams[CV_COLOR_STREAM] = &color; + + outputMaps.resize( outputMapsTypesCount ); + + isContextOpened = true; + + setProperty(CV_CAP_PROP_OPENNI_REGISTRATION, 1.0); +} + +CvCapture_OpenNI2::CvCapture_OpenNI2(const char * filename) +{ + openni::Status status; + + isContextOpened = false; + maxBufferSize = DEFAULT_MAX_BUFFER_SIZE; + isCircleBuffer = DEFAULT_IS_CIRCLE_BUFFER; + maxTimeDuration = DEFAULT_MAX_TIME_DURATION; + + // Initialize and configure the context. + status = openni::OpenNI::initialize(); + + if (status != openni::STATUS_OK) + { + CV_Error(CV_StsError, cv::format("Failed to initialize:", openni::OpenNI::getExtendedError())); + return; + } + + // Open file + status = device.open(filename); + if( status != openni::STATUS_OK ) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::CvCapture_OpenNI2 : Failed to open input file (%s): %s\n", filename, openni::OpenNI::getExtendedError())); + return; + } + + if( !readCamerasParams() ) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::CvCapture_OpenNI2 : Could not read cameras parameters\n")); + return; + } + + outputMaps.resize( outputMapsTypesCount ); + + isContextOpened = true; +} + +CvCapture_OpenNI2::~CvCapture_OpenNI2() +{ + this->depthFrame.release(); + this->colorFrame.release(); + this->depth.stop(); + this->color.stop(); + openni::OpenNI::shutdown(); +} + +bool CvCapture_OpenNI2::readCamerasParams() +{ + double pixelSize = 0; + if (depth.getProperty(XN_STREAM_PROPERTY_ZERO_PLANE_PIXEL_SIZE, &pixelSize) != openni::STATUS_OK) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::readCamerasParams : Could not read pixel size!\n")); + return false; + } + + // pixel size @ VGA = pixel size @ SXGA x 2 + pixelSize *= 2.0; // in mm + + // focal length of IR camera in pixels for VGA resolution + int zeroPlanDistance; // in mm + if (depth.getProperty(XN_STREAM_PROPERTY_ZERO_PLANE_DISTANCE, &zeroPlanDistance) != openni::STATUS_OK) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::readCamerasParams : Could not read virtual plane distance!\n")); + return false; + } + + if (depth.getProperty(XN_STREAM_PROPERTY_EMITTER_DCMOS_DISTANCE, &baseline) != openni::STATUS_OK) + { + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::readCamerasParams : Could not read base line!\n")); + return false; + } + + // baseline from cm -> mm + baseline *= 10; + + // focal length from mm -> pixels (valid for 640x480) + depthFocalLength_VGA = (int)((double)zeroPlanDistance / (double)pixelSize); + + return true; +} + +double CvCapture_OpenNI2::getProperty( int propIdx ) +{ + double propValue = 0; + + if( isOpened() ) + { + int purePropIdx = propIdx & ~CV_CAP_OPENNI_GENERATORS_MASK; + + if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_IMAGE_GENERATOR ) + { + propValue = getImageGeneratorProperty( purePropIdx ); + } + else if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_DEPTH_GENERATOR ) + { + propValue = getDepthGeneratorProperty( purePropIdx ); + } + else + { + propValue = getCommonProperty( purePropIdx ); + } + } + + return propValue; +} + +bool CvCapture_OpenNI2::setProperty( int propIdx, double propValue ) +{ + bool isSet = false; + if( isOpened() ) + { + int purePropIdx = propIdx & ~CV_CAP_OPENNI_GENERATORS_MASK; + + if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_IMAGE_GENERATOR ) + { + isSet = setImageGeneratorProperty( purePropIdx, propValue ); + } + else if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_DEPTH_GENERATOR ) + { + isSet = setDepthGeneratorProperty( purePropIdx, propValue ); + } + else + { + isSet = setCommonProperty( purePropIdx, propValue ); + } + } + + return isSet; +} + +double CvCapture_OpenNI2::getCommonProperty( int propIdx ) +{ + double propValue = 0; + + switch( propIdx ) + { + // There is a set of properties that correspond to depth generator by default + // (is they are pass without particular generator flag). Two reasons of this: + // 1) We can assume that depth generator is the main one for depth sensor. + // 2) In the initial vertions of OpenNI integration to OpenCV the value of + // flag CV_CAP_OPENNI_DEPTH_GENERATOR was 0 (it isn't zero now). + case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT : + case CV_CAP_PROP_FRAME_WIDTH : + case CV_CAP_PROP_FRAME_HEIGHT : + case CV_CAP_PROP_FPS : + case CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH : + case CV_CAP_PROP_OPENNI_BASELINE : + case CV_CAP_PROP_OPENNI_FOCAL_LENGTH : + case CV_CAP_PROP_OPENNI_REGISTRATION : + propValue = getDepthGeneratorProperty( propIdx ); + break; + case CV_CAP_PROP_OPENNI2_SYNC : + propValue = device.getDepthColorSyncEnabled(); + case CV_CAP_PROP_OPENNI2_MIRROR: + { + bool isMirroring = color.getMirroringEnabled() && depth.getMirroringEnabled(); + propValue = isMirroring ? 1.0 : 0.0; + break; + } + default : + CV_Error( CV_StsBadArg, cv::format("Such parameter (propIdx=%d) isn't supported for getting.\n", propIdx) ); + } + + return propValue; +} + +bool CvCapture_OpenNI2::setCommonProperty( int propIdx, double propValue ) +{ + bool isSet = false; + + switch( propIdx ) + { + case CV_CAP_PROP_OPENNI2_MIRROR: + { + bool mirror = propValue > 0.0 ? true : false; + isSet = color.setMirroringEnabled(mirror) == openni::STATUS_OK; + isSet = depth.setMirroringEnabled(mirror) == openni::STATUS_OK; + } + break; + // There is a set of properties that correspond to depth generator by default + // (is they are pass without particular generator flag). + case CV_CAP_PROP_OPENNI_REGISTRATION: + isSet = setDepthGeneratorProperty( propIdx, propValue ); + break; + case CV_CAP_PROP_OPENNI2_SYNC: + isSet = device.setDepthColorSyncEnabled(propValue > 0.0) == openni::STATUS_OK; + break; + default: + CV_Error( CV_StsBadArg, cv::format("Such parameter (propIdx=%d) isn't supported for setting.\n", propIdx) ); + } + + return isSet; +} + +double CvCapture_OpenNI2::getDepthGeneratorProperty( int propIdx ) +{ + double propValue = 0; + if( !depth.isValid() ) + return propValue; + + openni::VideoMode mode; + + switch( propIdx ) + { + case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT : + CV_DbgAssert(depth.isValid()); + propValue = 1.; + break; + case CV_CAP_PROP_FRAME_WIDTH : + propValue = depth.getVideoMode().getResolutionX(); + break; + case CV_CAP_PROP_FRAME_HEIGHT : + propValue = depth.getVideoMode().getResolutionY(); + break; + case CV_CAP_PROP_FPS : + mode = depth.getVideoMode(); + propValue = mode.getFps(); + break; + case CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH : + propValue = depth.getMaxPixelValue(); + break; + case CV_CAP_PROP_OPENNI_BASELINE : + propValue = baseline; + break; + case CV_CAP_PROP_OPENNI_FOCAL_LENGTH : + propValue = (double)depthFocalLength_VGA; + break; + case CV_CAP_PROP_OPENNI_REGISTRATION : + propValue = device.getImageRegistrationMode(); + break; + case CV_CAP_PROP_POS_MSEC : + propValue = (double)depthFrame.getTimestamp(); + break; + case CV_CAP_PROP_POS_FRAMES : + propValue = depthFrame.getFrameIndex(); + break; + default : + CV_Error( CV_StsBadArg, cv::format("Depth generator does not support such parameter (propIdx=%d) for getting.\n", propIdx) ); + } + + return propValue; +} + +bool CvCapture_OpenNI2::setDepthGeneratorProperty( int propIdx, double propValue ) +{ + bool isSet = false; + + CV_Assert( depth.isValid() ); + + switch( propIdx ) + { + case CV_CAP_PROP_OPENNI_REGISTRATION: + { + if( propValue < 1.0 ) // "on" + { + // if there isn't image generator (i.e. ASUS XtionPro doesn't have it) + // then the property isn't avaliable + if ( color.isValid() ) + { + openni::ImageRegistrationMode mode = propValue < 1.0 ? openni::IMAGE_REGISTRATION_OFF : openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR; + if( !device.getImageRegistrationMode() == mode ) + { + if (device.isImageRegistrationModeSupported(mode)) + { + openni::Status status = device.setImageRegistrationMode(mode); + if( status != openni::STATUS_OK ) + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::setDepthGeneratorProperty : %s\n", openni::OpenNI::getExtendedError())); + else + isSet = true; + } + else + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::setDepthGeneratorProperty : Unsupported viewpoint.\n")); + } + else + isSet = true; + } + } + else // "off" + { + openni::Status status = device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_OFF); + if( status != openni::STATUS_OK ) + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::setDepthGeneratorProperty : %s\n", openni::OpenNI::getExtendedError())); + else + isSet = true; + } + } + break; + default: + CV_Error( CV_StsBadArg, cv::format("Depth generator does not support such parameter (propIdx=%d) for setting.\n", propIdx) ); + } + + return isSet; +} + +double CvCapture_OpenNI2::getImageGeneratorProperty( int propIdx ) +{ + double propValue = 0.; + if( !color.isValid() ) + return propValue; + + openni::VideoMode mode; + switch( propIdx ) + { + case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT : + CV_DbgAssert( color.isValid() ); + propValue = 1.; + break; + case CV_CAP_PROP_FRAME_WIDTH : + propValue = color.getVideoMode().getResolutionX(); + break; + case CV_CAP_PROP_FRAME_HEIGHT : + propValue = color.getVideoMode().getResolutionY(); + break; + case CV_CAP_PROP_FPS : + propValue = color.getVideoMode().getFps(); + break; + case CV_CAP_PROP_POS_MSEC : + propValue = (double)colorFrame.getTimestamp(); + break; + case CV_CAP_PROP_POS_FRAMES : + propValue = (double)colorFrame.getFrameIndex(); + break; + default : + CV_Error( CV_StsBadArg, cv::format("Image generator does not support such parameter (propIdx=%d) for getting.\n", propIdx) ); + } + + return propValue; +} + +bool CvCapture_OpenNI2::setImageGeneratorProperty(int propIdx, double propValue) +{ + bool isSet = false; + if( !color.isValid() ) + return isSet; + + switch( propIdx ) + { + case CV_CAP_PROP_OPENNI_OUTPUT_MODE : + { + openni::VideoMode mode; + + switch( cvRound(propValue) ) + { + case CV_CAP_OPENNI_VGA_30HZ : + mode.setResolution(640,480); + mode.setFps(30); + break; + case CV_CAP_OPENNI_SXGA_15HZ : + mode.setResolution(1280, 960); + mode.setFps(15); + break; + case CV_CAP_OPENNI_SXGA_30HZ : + mode.setResolution(1280, 960); + mode.setFps(30); + break; + case CV_CAP_OPENNI_QVGA_30HZ : + mode.setResolution(320, 240); + mode.setFps(30); + break; + case CV_CAP_OPENNI_QVGA_60HZ : + mode.setResolution(320, 240); + mode.setFps(60); + break; + default : + CV_Error( CV_StsBadArg, "Unsupported image generator output mode.\n"); + } + + openni::Status status = color.setVideoMode( mode ); + if( status != openni::STATUS_OK ) + CV_Error(CV_StsError, cv::format("CvCapture_OpenNI2::setImageGeneratorProperty : %s\n", openni::OpenNI::getExtendedError())); + else + isSet = true; + break; + } + default: + CV_Error( CV_StsBadArg, cv::format("Image generator does not support such parameter (propIdx=%d) for setting.\n", propIdx) ); + } + + return isSet; +} + +bool CvCapture_OpenNI2::grabFrame() +{ + if( !isOpened() ) + return false; + + bool isGrabbed = false; + + openni::Status status = openni::OpenNI::waitForAnyStream(streams, CV_NUM_STREAMS, ¤tStream, CV_STREAM_TIMEOUT); + if( status != openni::STATUS_OK ) + return false; + + if( depth.isValid() ) + depth.readFrame(&depthFrame); + if (color.isValid()) + color.readFrame(&colorFrame); + isGrabbed = true; + + return isGrabbed; +} + +inline void getDepthMapFromMetaData(const openni::VideoFrameRef& depthMetaData, cv::Mat& depthMap, int noSampleValue, int shadowValue) +{ + depthMap.create(depthMetaData.getHeight(), depthMetaData.getWidth(), CV_16UC1); + depthMap.data = (uchar*)depthMetaData.getData(); + + cv::Mat badMask = (depthMap == (double)noSampleValue) | (depthMap == (double)shadowValue) | (depthMap == 0); + + // mask the pixels with invalid depth + depthMap.setTo( cv::Scalar::all( CvCapture_OpenNI2::INVALID_PIXEL_VAL ), badMask ); +} + +IplImage* CvCapture_OpenNI2::retrieveDepthMap() +{ + if( !depth.isValid() ) + return 0; + + getDepthMapFromMetaData( depthFrame, outputMaps[CV_CAP_OPENNI_DEPTH_MAP].mat, noSampleValue, shadowValue ); + + return outputMaps[CV_CAP_OPENNI_DEPTH_MAP].getIplImagePtr(); +} + +IplImage* CvCapture_OpenNI2::retrievePointCloudMap() +{ + if( !depthFrame.isValid() ) + return 0; + + cv::Mat depthImg; + getDepthMapFromMetaData(depthFrame, depthImg, noSampleValue, shadowValue); + + const int badPoint = INVALID_PIXEL_VAL; + const float badCoord = INVALID_COORDINATE_VAL; + int cols = depthFrame.getWidth(), rows = depthFrame.getHeight(); + cv::Mat pointCloud_XYZ( rows, cols, CV_32FC3, cv::Scalar::all(badPoint) ); + + float worldX, worldY, worldZ; + for( int y = 0; y < rows; y++ ) + { + for (int x = 0; x < cols; x++) + { + openni::CoordinateConverter::convertDepthToWorld(depth, x, y, depthImg.at(y, x), &worldX, &worldY, &worldZ); + + if (depthImg.at(y, x) == badPoint) // not valid + pointCloud_XYZ.at(y, x) = cv::Point3f(badCoord, badCoord, badCoord); + else + { + pointCloud_XYZ.at(y, x) = cv::Point3f(worldX*0.001f, worldY*0.001f, worldZ*0.001f); // from mm to meters + } + } + } + + outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].mat = pointCloud_XYZ; + + return outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].getIplImagePtr(); +} + +static void computeDisparity_32F( const openni::VideoFrameRef& depthMetaData, cv::Mat& disp, double baseline, int F, int noSampleValue, int shadowValue) +{ + cv::Mat depth; + getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue ); + CV_Assert( depth.type() == CV_16UC1 ); + + // disparity = baseline * F / z; + + float mult = (float)(baseline /*mm*/ * F /*pixels*/); + + disp.create( depth.size(), CV_32FC1); + disp = cv::Scalar::all( CvCapture_OpenNI2::INVALID_PIXEL_VAL ); + for( int y = 0; y < disp.rows; y++ ) + { + for( int x = 0; x < disp.cols; x++ ) + { + unsigned short curDepth = depth.at(y,x); + if( curDepth != CvCapture_OpenNI2::INVALID_PIXEL_VAL ) + disp.at(y,x) = mult / curDepth; + } + } +} + +IplImage* CvCapture_OpenNI2::retrieveDisparityMap() +{ + if (!depthFrame.isValid()) + return 0; + + cv::Mat disp32; + computeDisparity_32F(depthFrame, disp32, baseline, depthFocalLength_VGA, noSampleValue, shadowValue); + + disp32.convertTo( outputMaps[CV_CAP_OPENNI_DISPARITY_MAP].mat, CV_8UC1 ); + + return outputMaps[CV_CAP_OPENNI_DISPARITY_MAP].getIplImagePtr(); +} + +IplImage* CvCapture_OpenNI2::retrieveDisparityMap_32F() +{ + if (!depthFrame.isValid()) + return 0; + + computeDisparity_32F(depthFrame, outputMaps[CV_CAP_OPENNI_DISPARITY_MAP_32F].mat, baseline, depthFocalLength_VGA, noSampleValue, shadowValue); + + return outputMaps[CV_CAP_OPENNI_DISPARITY_MAP_32F].getIplImagePtr(); +} + +IplImage* CvCapture_OpenNI2::retrieveValidDepthMask() +{ + if (!depthFrame.isValid()) + return 0; + + cv::Mat depth; + getDepthMapFromMetaData(depthFrame, depth, noSampleValue, shadowValue); + + outputMaps[CV_CAP_OPENNI_VALID_DEPTH_MASK].mat = depth != CvCapture_OpenNI2::INVALID_PIXEL_VAL; + + return outputMaps[CV_CAP_OPENNI_VALID_DEPTH_MASK].getIplImagePtr(); +} + +inline void getBGRImageFromMetaData( const openni::VideoFrameRef& imageMetaData, cv::Mat& bgrImage ) +{ + cv::Mat bufferImage; + if( imageMetaData.getVideoMode().getPixelFormat() != openni::PIXEL_FORMAT_RGB888 ) + CV_Error( CV_StsUnsupportedFormat, "Unsupported format of grabbed image\n" ); + + bgrImage.create(imageMetaData.getHeight(), imageMetaData.getWidth(), CV_8UC3); + bufferImage.create(imageMetaData.getHeight(), imageMetaData.getWidth(), CV_8UC3); + bufferImage.data = (uchar*)imageMetaData.getData(); + + cv::cvtColor(bufferImage, bgrImage, cv::COLOR_RGB2BGR); +} + +IplImage* CvCapture_OpenNI2::retrieveBGRImage() +{ + if( !color.isValid() ) + return 0; + + getBGRImageFromMetaData( colorFrame, outputMaps[CV_CAP_OPENNI_BGR_IMAGE].mat ); + + return outputMaps[CV_CAP_OPENNI_BGR_IMAGE].getIplImagePtr(); +} + +IplImage* CvCapture_OpenNI2::retrieveGrayImage() +{ + if (!colorFrame.isValid()) + return 0; + + CV_Assert(colorFrame.getVideoMode().getPixelFormat() == openni::PIXEL_FORMAT_RGB888); // RGB + + cv::Mat rgbImage; + getBGRImageFromMetaData(colorFrame, rgbImage); + cv::cvtColor( rgbImage, outputMaps[CV_CAP_OPENNI_GRAY_IMAGE].mat, CV_BGR2GRAY ); + + return outputMaps[CV_CAP_OPENNI_GRAY_IMAGE].getIplImagePtr(); +} + +IplImage* CvCapture_OpenNI2::retrieveFrame( int outputType ) +{ + IplImage* image = 0; + CV_Assert( outputType < outputMapsTypesCount && outputType >= 0); + + if( outputType == CV_CAP_OPENNI_DEPTH_MAP ) + { + image = retrieveDepthMap(); + } + else if( outputType == CV_CAP_OPENNI_POINT_CLOUD_MAP ) + { + image = retrievePointCloudMap(); + } + else if( outputType == CV_CAP_OPENNI_DISPARITY_MAP ) + { + image = retrieveDisparityMap(); + } + else if( outputType == CV_CAP_OPENNI_DISPARITY_MAP_32F ) + { + image = retrieveDisparityMap_32F(); + } + else if( outputType == CV_CAP_OPENNI_VALID_DEPTH_MASK ) + { + image = retrieveValidDepthMask(); + } + else if( outputType == CV_CAP_OPENNI_BGR_IMAGE ) + { + image = retrieveBGRImage(); + } + else if( outputType == CV_CAP_OPENNI_GRAY_IMAGE ) + { + image = retrieveGrayImage(); + } + + return image; +} + +CvCapture* cvCreateCameraCapture_OpenNI( int index ) +{ + CvCapture_OpenNI2* capture = new CvCapture_OpenNI2( index ); + + if( capture->isOpened() ) + return capture; + + delete capture; + return 0; +} + +CvCapture* cvCreateFileCapture_OpenNI( const char* filename ) +{ + CvCapture_OpenNI2* capture = new CvCapture_OpenNI2( filename ); + + if( capture->isOpened() ) + return capture; + + delete capture; + return 0; +} + +#endif diff --git a/modules/highgui/src/cap_pvapi.cpp b/modules/videoio/src/cap_pvapi.cpp similarity index 100% rename from modules/highgui/src/cap_pvapi.cpp rename to modules/videoio/src/cap_pvapi.cpp diff --git a/modules/highgui/src/cap_qt.cpp b/modules/videoio/src/cap_qt.cpp similarity index 100% rename from modules/highgui/src/cap_qt.cpp rename to modules/videoio/src/cap_qt.cpp diff --git a/modules/highgui/src/cap_qtkit.mm b/modules/videoio/src/cap_qtkit.mm similarity index 100% rename from modules/highgui/src/cap_qtkit.mm rename to modules/videoio/src/cap_qtkit.mm diff --git a/modules/highgui/src/cap_unicap.cpp b/modules/videoio/src/cap_unicap.cpp similarity index 100% rename from modules/highgui/src/cap_unicap.cpp rename to modules/videoio/src/cap_unicap.cpp diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/videoio/src/cap_v4l.cpp similarity index 97% rename from modules/highgui/src/cap_v4l.cpp rename to modules/videoio/src/cap_v4l.cpp index c9fca05819..efa9a8b44c 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/videoio/src/cap_v4l.cpp @@ -1,7 +1,7 @@ /* This is the contributed code: File: cvcap_v4l.cpp -Current Location: ../opencv-0.9.6/otherlibs/highgui +Current Location: ../opencv-0.9.6/otherlibs/videoio Original Version: 2003-03-12 Magnus Lundin lundin@mlu.mine.nu Original Comments: @@ -71,7 +71,7 @@ For Release: OpenCV-Linux Beta4 Opencv-0.9.6 [FD] I modified the following: - handle YUV420P, YUV420, and YUV411P palettes (for many webcams) without using floating-point - cvGrabFrame should not wait for the end of the first frame, and should return quickly - (see highgui doc) + (see videoio doc) - cvRetrieveFrame should in turn wait for the end of frame capture, and should not trigger the capture of the next frame (the user choses when to do it using GrabFrame) To get the old behavior, re-call cvRetrieveFrame just after cvGrabFrame. @@ -590,7 +590,7 @@ static int autosetup_capture_mode_v4l2(CvCaptureCAM_V4L* capture) } else { - fprintf(stderr, "HIGHGUI ERROR: V4L2: Pixel format of incoming image is unsupported by OpenCV\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L2: Pixel format of incoming image is unsupported by OpenCV\n"); icvCloseCAM_V4L(capture); return -1; } @@ -607,7 +607,7 @@ static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture) { if(ioctl(capture->deviceHandle, VIDIOCGPICT, &capture->imageProperties) < 0) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Unable to determine size of incoming image\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Unable to determine size of incoming image\n"); icvCloseCAM_V4L(capture); return -1; } @@ -627,7 +627,7 @@ static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture) //printf("negotiated palette YUV420P\n"); } else { - fprintf(stderr, "HIGHGUI ERROR: V4L: Pixel format of incoming image is unsupported by OpenCV\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L: Pixel format of incoming image is unsupported by OpenCV\n"); icvCloseCAM_V4L(capture); return -1; } @@ -829,7 +829,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) if ((capture->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) { /* Nope. */ - fprintf( stderr, "HIGHGUI ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName); + fprintf( stderr, "VIDEOIO ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName); icvCloseCAM_V4L(capture); return -1; } @@ -848,7 +848,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) /* V4L2 have a status field from selected video mode */ if (-1 == ioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp)) { - fprintf (stderr, "HIGHGUI ERROR: V4L2: Aren't able to set channel number\n"); + fprintf (stderr, "VIDEOIO ERROR: V4L2: Aren't able to set channel number\n"); icvCloseCAM_V4L (capture); return -1; } @@ -859,7 +859,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName) capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { - fprintf( stderr, "HIGHGUI ERROR: V4L2: Could not obtain specifics of capture window.\n\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L2: Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); return -1; } @@ -990,7 +990,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) if ((detect_v4l == -1) ) { - fprintf (stderr, "HIGHGUI ERROR: V4L" + fprintf (stderr, "VIDEOIO ERROR: V4L" ": device %s: Unable to open for READ ONLY\n", deviceName); return -1; @@ -999,7 +999,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) if ((detect_v4l <= 0) ) { - fprintf (stderr, "HIGHGUI ERROR: V4L" + fprintf (stderr, "VIDEOIO ERROR: V4L" ": device %s: Unable to query number of channels\n", deviceName); return -1; @@ -1008,7 +1008,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) { if ((capture->capability.type & VID_TYPE_CAPTURE) == 0) { /* Nope. */ - fprintf( stderr, "HIGHGUI ERROR: V4L: " + fprintf( stderr, "VIDEOIO ERROR: V4L: " "device %s is unable to capture video memory.\n",deviceName); icvCloseCAM_V4L(capture); return -1; @@ -1047,7 +1047,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) { if(ioctl(capture->deviceHandle, VIDIOCGWIN, &capture->captureWindow) == -1) { - fprintf( stderr, "HIGHGUI ERROR: V4L: " + fprintf( stderr, "VIDEOIO ERROR: V4L: " "Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); return -1; @@ -1072,7 +1072,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) capture->deviceHandle, 0); if (capture->memoryMap == MAP_FAILED) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno)); + fprintf( stderr, "VIDEOIO ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno)); icvCloseCAM_V4L(capture); } @@ -1081,7 +1081,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName) capture->mmaps = (struct video_mmap *) (malloc(capture->memoryBuffer.frames * sizeof(struct video_mmap))); if (!capture->mmaps) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Could not memory map video frames.\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Could not memory map video frames.\n"); icvCloseCAM_V4L(capture); return -1; } @@ -1116,14 +1116,14 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index) //search index in indexList if ( (index>-1) && ! ((1 << index) & indexList) ) { - fprintf( stderr, "HIGHGUI ERROR: V4L: index %d is not correct!\n",index); + fprintf( stderr, "VIDEOIO ERROR: V4L: index %d is not correct!\n",index); return NULL; /* Did someone ask for not correct video source number? */ } /* Allocate memory for this humongus CvCaptureCAM_V4L structure that contains ALL the handles for V4L processing */ CvCaptureCAM_V4L * capture = (CvCaptureCAM_V4L*)cvAlloc(sizeof(CvCaptureCAM_V4L)); if (!capture) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Could not allocate memory for capture process.\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Could not allocate memory for capture process.\n"); return NULL; } /* Select camera, or rather, V4L video source */ @@ -1317,7 +1317,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) { capture->mmaps[capture->bufferIndex].format = capture->imageProperties.palette; if (ioctl(capture->deviceHandle, VIDIOCMCAPTURE, &capture->mmaps[capture->bufferIndex]) == -1) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n"); + fprintf( stderr, "VIDEOIO ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n"); return 0; } } @@ -2098,7 +2098,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { /* [FD] this really belongs here */ if (ioctl(capture->deviceHandle, VIDIOCSYNC, &capture->mmaps[capture->bufferIndex].frame) == -1) { - fprintf( stderr, "HIGHGUI ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno)); + fprintf( stderr, "VIDEOIO ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno)); } } @@ -2255,7 +2255,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) { break; default: fprintf( stderr, - "HIGHGUI ERROR: V4L: Cannot convert from palette %d to RGB\n", + "VIDEOIO ERROR: V4L: Cannot convert from palette %d to RGB\n", capture->imageProperties.palette); return 0; @@ -2326,7 +2326,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, break; default: fprintf(stderr, - "HIGHGUI ERROR: V4L2: getting property #%d is not supported\n", + "VIDEOIO ERROR: V4L2: getting property #%d is not supported\n", property_id); return -1; } @@ -2334,7 +2334,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_CTRL, &capture->control)) { - fprintf( stderr, "HIGHGUI ERROR: V4L2: "); + fprintf( stderr, "VIDEOIO ERROR: V4L2: "); switch (property_id) { case CV_CAP_PROP_BRIGHTNESS: fprintf (stderr, "Brightness"); @@ -2405,7 +2405,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, if (ioctl (capture->deviceHandle, VIDIOCGWIN, &capture->captureWindow) < 0) { fprintf (stderr, - "HIGHGUI ERROR: V4L: " + "VIDEOIO ERROR: V4L: " "Unable to determine size of incoming image\n"); icvCloseCAM_V4L(capture); return -1; @@ -2432,17 +2432,17 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, break; case CV_CAP_PROP_GAIN: fprintf(stderr, - "HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n"); + "VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n"); return -1; break; case CV_CAP_PROP_EXPOSURE: fprintf(stderr, - "HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n"); + "VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n"); return -1; break; default: fprintf(stderr, - "HIGHGUI ERROR: V4L: getting property #%d is not supported\n", + "VIDEOIO ERROR: V4L: getting property #%d is not supported\n", property_id); } @@ -2470,7 +2470,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { capture->cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (ioctl (capture->deviceHandle, VIDIOC_CROPCAP, &capture->cropcap) < 0) { - fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_CROPCAP\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: VIDIOC_CROPCAP\n"); } else { CLEAR (capture->crop); @@ -2479,7 +2479,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { /* set the crop area, but don't exit if the device don't support croping */ if (ioctl (capture->deviceHandle, VIDIOC_S_CROP, &capture->crop) < 0) { - fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_S_CROP\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: VIDIOC_S_CROP\n"); } } @@ -2519,7 +2519,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) { /* Get window info again, to get the real value */ if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) { - fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n"); + fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n"); icvCloseCAM_V4L(capture); @@ -2611,7 +2611,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, break; default: fprintf(stderr, - "HIGHGUI ERROR: V4L2: setting property #%d is not supported\n", + "VIDEOIO ERROR: V4L2: setting property #%d is not supported\n", property_id); return -1; } @@ -2678,7 +2678,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, break; default: fprintf(stderr, - "HIGHGUI ERROR: V4L2: setting property #%d is not supported\n", + "VIDEOIO ERROR: V4L2: setting property #%d is not supported\n", property_id); return -1; } @@ -2720,15 +2720,15 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, break; case CV_CAP_PROP_GAIN: fprintf(stderr, - "HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n"); + "VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n"); return -1; case CV_CAP_PROP_EXPOSURE: fprintf(stderr, - "HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n"); + "VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n"); return -1; default: fprintf(stderr, - "HIGHGUI ERROR: V4L: property #%d is not supported\n", + "VIDEOIO ERROR: V4L: property #%d is not supported\n", property_id); return -1; } @@ -2737,7 +2737,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, < 0) { fprintf(stderr, - "HIGHGUI ERROR: V4L: Unable to set video informations\n"); + "VIDEOIO ERROR: V4L: Unable to set video informations\n"); icvCloseCAM_V4L(capture); return -1; } @@ -2786,7 +2786,7 @@ static int icvSetPropertyCAM_V4L( CvCaptureCAM_V4L* capture, break; default: fprintf(stderr, - "HIGHGUI ERROR: V4L: setting property #%d is not supported\n", + "VIDEOIO ERROR: V4L: setting property #%d is not supported\n", property_id); } diff --git a/modules/highgui/src/cap_vfw.cpp b/modules/videoio/src/cap_vfw.cpp similarity index 100% rename from modules/highgui/src/cap_vfw.cpp rename to modules/videoio/src/cap_vfw.cpp diff --git a/modules/highgui/src/cap_ximea.cpp b/modules/videoio/src/cap_ximea.cpp similarity index 100% rename from modules/highgui/src/cap_ximea.cpp rename to modules/videoio/src/cap_ximea.cpp diff --git a/modules/highgui/src/cap_xine.cpp b/modules/videoio/src/cap_xine.cpp similarity index 100% rename from modules/highgui/src/cap_xine.cpp rename to modules/videoio/src/cap_xine.cpp diff --git a/modules/highgui/src/ffmpeg_codecs.hpp b/modules/videoio/src/ffmpeg_codecs.hpp similarity index 100% rename from modules/highgui/src/ffmpeg_codecs.hpp rename to modules/videoio/src/ffmpeg_codecs.hpp diff --git a/modules/videoio/src/precomp.hpp b/modules/videoio/src/precomp.hpp new file mode 100644 index 0000000000..13c57023d7 --- /dev/null +++ b/modules/videoio/src/precomp.hpp @@ -0,0 +1,180 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __VIDEOIO_H_ +#define __VIDEOIO_H_ + +#include "opencv2/videoio.hpp" + +#include "opencv2/core/utility.hpp" +#include "opencv2/core/private.hpp" + +#include "opencv2/imgcodecs.hpp" + +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/imgcodecs/imgcodecs_c.h" +#include "opencv2/videoio/videoio_c.h" + +#include +#include +#include +#include +#include +#include + +#if defined WIN32 || defined WINCE + #if !defined _WIN32_WINNT + #ifdef HAVE_MSMF + #define _WIN32_WINNT 0x0600 // Windows Vista + #else + #define _WIN32_WINNT 0x0500 // Windows 2000 + #endif + #endif + + #include + #undef small + #undef min + #undef max + #undef abs +#endif + +#ifdef HAVE_TEGRA_OPTIMIZATION +#include "opencv2/videoio/videoio_tegra.hpp" +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +/***************************** CvCapture structure ******************************/ + +struct CvCapture +{ + virtual ~CvCapture() {} + virtual double getProperty(int) { return 0; } + virtual bool setProperty(int, double) { return 0; } + virtual bool grabFrame() { return true; } + virtual IplImage* retrieveFrame(int) { return 0; } + virtual int getCaptureDomain() { return CV_CAP_ANY; } // Return the type of the capture object: CV_CAP_VFW, etc... +}; + +/*************************** CvVideoWriter structure ****************************/ + +struct CvVideoWriter +{ + virtual ~CvVideoWriter() {} + virtual bool writeFrame(const IplImage*) { return false; } +}; + +CvCapture * cvCreateCameraCapture_V4L( int index ); +CvCapture * cvCreateCameraCapture_DC1394( int index ); +CvCapture * cvCreateCameraCapture_DC1394_2( int index ); +CvCapture* cvCreateCameraCapture_MIL( int index ); +CvCapture* cvCreateCameraCapture_Giganetix( int index ); +CvCapture * cvCreateCameraCapture_CMU( int index ); +CV_IMPL CvCapture * cvCreateCameraCapture_TYZX( int index ); +CvCapture* cvCreateFileCapture_Win32( const char* filename ); +CvCapture* cvCreateCameraCapture_VFW( int index ); +CvCapture* cvCreateFileCapture_VFW( const char* filename ); +CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); +CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); +CvCapture* cvCreateCameraCapture_DShow( int index ); +CvCapture* cvCreateCameraCapture_MSMF( int index ); +CvCapture* cvCreateFileCapture_MSMF (const char* filename); +CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); +CvCapture* cvCreateCameraCapture_OpenNI( int index ); +CvCapture* cvCreateFileCapture_OpenNI( const char* filename ); +CvCapture* cvCreateCameraCapture_Android( int index ); +CvCapture* cvCreateCameraCapture_XIMEA( int index ); +CvCapture* cvCreateCameraCapture_AVFoundation(int index); + +CvCapture* cvCreateFileCapture_Images(const char* filename); +CvVideoWriter* cvCreateVideoWriter_Images(const char* filename); + +CvCapture* cvCreateFileCapture_XINE (const char* filename); + + +#define CV_CAP_GSTREAMER_1394 0 +#define CV_CAP_GSTREAMER_V4L 1 +#define CV_CAP_GSTREAMER_V4L2 2 +#define CV_CAP_GSTREAMER_FILE 3 + +CvCapture* cvCreateCapture_GStreamer(int type, const char *filename); +CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char* filename); + + +CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); + +CvCapture * cvCreateFileCapture_QT (const char * filename); +CvCapture * cvCreateCameraCapture_QT (const int index); + +CvVideoWriter* cvCreateVideoWriter_QT ( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); + +CvCapture* cvCreateFileCapture_AVFoundation (const char * filename); +CvVideoWriter* cvCreateVideoWriter_AVFoundation( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); + + +CvCapture * cvCreateCameraCapture_Unicap (const int index); +CvCapture * cvCreateCameraCapture_PvAPI (const int index); +CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc, + double fps, CvSize frameSize, int is_color ); + +namespace cv +{ + class IVideoCapture + { + public: + virtual ~IVideoCapture() {} + virtual double getProperty(int) { return 0; } + virtual bool setProperty(int, double) { return 0; } + virtual bool grabFrame() = 0; + virtual bool retrieveFrame(int, cv::OutputArray) = 0; + virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_VFW, etc... + }; +}; + +#endif /* __VIDEOIO_H_ */ diff --git a/modules/highgui/test/test_ffmpeg.cpp b/modules/videoio/test/test_ffmpeg.cpp similarity index 98% rename from modules/highgui/test/test_ffmpeg.cpp rename to modules/videoio/test/test_ffmpeg.cpp index 61fc3d49a4..2f95cb21d1 100644 --- a/modules/highgui/test/test_ffmpeg.cpp +++ b/modules/videoio/test/test_ffmpeg.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/videoio.hpp" using namespace cv; @@ -145,7 +145,7 @@ public: } }; -TEST(Highgui_Video, ffmpeg_writebig) { CV_FFmpegWriteBigVideoTest test; test.safe_run(); } +TEST(Videoio_Video, ffmpeg_writebig) { CV_FFmpegWriteBigVideoTest test; test.safe_run(); } class CV_FFmpegReadImageTest : public cvtest::BaseTest { @@ -174,7 +174,7 @@ public: } }; -TEST(Highgui_Video, ffmpeg_image) { CV_FFmpegReadImageTest test; test.safe_run(); } +TEST(Videoio_Video, ffmpeg_image) { CV_FFmpegReadImageTest test; test.safe_run(); } #endif @@ -360,7 +360,7 @@ private: bool ReadImageAndTest::next; -TEST(Highgui_Video_parallel_writers_and_readers, accuracy) +TEST(Videoio_Video_parallel_writers_and_readers, accuracy) { const unsigned int threadsCount = 4; cvtest::TS* ts = cvtest::TS::ptr(); diff --git a/modules/highgui/test/test_fourcc.cpp b/modules/videoio/test/test_fourcc.cpp similarity index 99% rename from modules/highgui/test/test_fourcc.cpp rename to modules/videoio/test/test_fourcc.cpp index 82f699b098..15bf7118ac 100644 --- a/modules/highgui/test/test_fourcc.cpp +++ b/modules/videoio/test/test_fourcc.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/videoio.hpp" #undef DEFINE_GUID #define DEFINE_GUID(n, fourcc, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) fourcc, @@ -101,7 +101,7 @@ DEFINE_GUID(PIN_CATEGORY_PREVIEW,0xfb6c4282,0x0353,0x11d1,0x90,0x5f,0x00,0x00,0x 0}; -TEST(Highgui_dshow, fourcc_conversion) +TEST(Videoio_dshow, fourcc_conversion) { for(int i = 0; allfourcc[i]; ++i) { diff --git a/modules/highgui/test/test_framecount.cpp b/modules/videoio/test/test_framecount.cpp similarity index 97% rename from modules/highgui/test/test_framecount.cpp rename to modules/videoio/test/test_framecount.cpp index 30f6e67ce6..94ddbf800a 100644 --- a/modules/highgui/test/test_framecount.cpp +++ b/modules/videoio/test/test_framecount.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui/highgui_c.h" +#include "opencv2/videoio/videoio_c.h" #include using namespace cv; @@ -110,5 +110,5 @@ void CV_FramecountTest::run(int) } } #if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined HAVE_FFMPEG -TEST(Highgui_Video, framecount) {CV_FramecountTest test; test.safe_run();} +TEST(Videoio_Video, framecount) {CV_FramecountTest test; test.safe_run();} #endif diff --git a/modules/videoio/test/test_main.cpp b/modules/videoio/test/test_main.cpp new file mode 100644 index 0000000000..3ef2a376ea --- /dev/null +++ b/modules/videoio/test/test_main.cpp @@ -0,0 +1,3 @@ +#include "test_precomp.hpp" + +CV_TEST_MAIN("videoio") diff --git a/modules/highgui/test/test_positioning.cpp b/modules/videoio/test/test_positioning.cpp similarity index 97% rename from modules/highgui/test/test_positioning.cpp rename to modules/videoio/test/test_positioning.cpp index 993a76cb43..398a160a25 100644 --- a/modules/highgui/test/test_positioning.cpp +++ b/modules/videoio/test/test_positioning.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui/highgui_c.h" +#include "opencv2/videoio/videoio_c.h" #include using namespace cv; @@ -218,6 +218,6 @@ void CV_VideoRandomPositioningTest::run(int) } #if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined HAVE_FFMPEG -TEST (Highgui_Video, seek_progressive) { CV_VideoProgressivePositioningTest test; test.safe_run(); } -TEST (Highgui_Video, seek_random) { CV_VideoRandomPositioningTest test; test.safe_run(); } +TEST (Videoio_Video, seek_progressive) { CV_VideoProgressivePositioningTest test; test.safe_run(); } +TEST (Videoio_Video, seek_random) { CV_VideoRandomPositioningTest test; test.safe_run(); } #endif diff --git a/modules/videoio/test/test_precomp.hpp b/modules/videoio/test/test_precomp.hpp new file mode 100644 index 0000000000..0dd9caa819 --- /dev/null +++ b/modules/videoio/test/test_precomp.hpp @@ -0,0 +1,91 @@ +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wmissing-declarations" +# if defined __clang__ || defined __APPLE__ +# pragma GCC diagnostic ignored "-Wmissing-prototypes" +# pragma GCC diagnostic ignored "-Wextra" +# endif +#endif + +#ifndef __OPENCV_TEST_PRECOMP_HPP__ +#define __OPENCV_TEST_PRECOMP_HPP__ + +#include +#include "opencv2/ts.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/imgproc/imgproc_c.h" + +#include "opencv2/core/private.hpp" + +#if defined(HAVE_DSHOW) || \ + defined(HAVE_TYZX) || \ + defined(HAVE_VFW) || \ + defined(HAVE_LIBV4L) || \ + (defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_DC1394_2) || \ + defined(HAVE_DC1394) || \ + defined(HAVE_CMU1394) || \ + defined(HAVE_MIL) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ + defined(HAVE_UNICAP) || \ + defined(HAVE_PVAPI) || \ + defined(HAVE_OPENNI) || \ + defined(HAVE_XIMEA) || \ + defined(HAVE_AVFOUNDATION) || \ + defined(HAVE_GIGE_API) || \ + defined(HAVE_INTELPERC) || \ + (0) + //defined(HAVE_ANDROID_NATIVE_CAMERA) || - enable after #1193 +# define BUILD_WITH_CAMERA_SUPPORT 1 +#else +# define BUILD_WITH_CAMERA_SUPPORT 0 +#endif + +#if defined(HAVE_XINE) || \ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ + defined(HAVE_AVFOUNDATION) || \ + /*defined(HAVE_OPENNI) || too specialized */ \ + defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1 +#else +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 +#endif + +#if /*defined(HAVE_XINE) || */\ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ + defined(HAVE_AVFOUNDATION) || \ + defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) +# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1 +#else +# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0 +#endif + +namespace cvtest +{ + +string fourccToString(int fourcc); + +struct VideoFormat +{ + VideoFormat() { fourcc = -1; } + VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {} + bool empty() const { return ext.empty(); } + + string ext; + int fourcc; +}; + +extern const VideoFormat g_specific_fmt_list[]; + +} + +#endif diff --git a/modules/highgui/test/test_video_io.cpp b/modules/videoio/test/test_video_io.cpp similarity index 95% rename from modules/highgui/test/test_video_io.cpp rename to modules/videoio/test/test_video_io.cpp index f380e0d26b..b3d13e7c2f 100644 --- a/modules/highgui/test/test_video_io.cpp +++ b/modules/videoio/test/test_video_io.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui/highgui_c.h" +#include "opencv2/videoio/videoio_c.h" using namespace cv; using namespace std; @@ -99,7 +99,7 @@ const VideoFormat g_specific_fmt_list[] = } -class CV_HighGuiTest : public cvtest::BaseTest +class CV_VideoIOTest : public cvtest::BaseTest { protected: void ImageTest (const string& dir); @@ -107,12 +107,12 @@ protected: void SpecificImageTest (const string& dir); void SpecificVideoTest (const string& dir, const cvtest::VideoFormat& fmt); - CV_HighGuiTest() {} - ~CV_HighGuiTest() {} + CV_VideoIOTest() {} + ~CV_VideoIOTest() {} virtual void run(int) = 0; }; -class CV_ImageTest : public CV_HighGuiTest +class CV_ImageTest : public CV_VideoIOTest { public: CV_ImageTest() {} @@ -120,7 +120,7 @@ public: void run(int); }; -class CV_SpecificImageTest : public CV_HighGuiTest +class CV_SpecificImageTest : public CV_VideoIOTest { public: CV_SpecificImageTest() {} @@ -128,7 +128,7 @@ public: void run(int); }; -class CV_VideoTest : public CV_HighGuiTest +class CV_VideoTest : public CV_VideoIOTest { public: CV_VideoTest() {} @@ -136,7 +136,7 @@ public: void run(int); }; -class CV_SpecificVideoTest : public CV_HighGuiTest +class CV_SpecificVideoTest : public CV_VideoIOTest { public: CV_SpecificVideoTest() {} @@ -145,7 +145,7 @@ public: }; -void CV_HighGuiTest::ImageTest(const string& dir) +void CV_VideoIOTest::ImageTest(const string& dir) { string _name = dir + string("../cv/shared/baboon.png"); ts->printf(ts->LOG, "reading image : %s\n", _name.c_str()); @@ -251,7 +251,7 @@ void CV_HighGuiTest::ImageTest(const string& dir) } -void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt) +void CV_VideoIOTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt) { string src_file = dir + "../cv/shared/video_for_test.avi"; string tmp_name = cv::tempfile((cvtest::fourccToString(fmt.fourcc) + "." + fmt.ext).c_str()); @@ -337,7 +337,7 @@ void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt ts->printf(ts->LOG, "end test function : ImagesVideo \n"); } -void CV_HighGuiTest::SpecificImageTest(const string& dir) +void CV_VideoIOTest::SpecificImageTest(const string& dir) { const size_t IMAGE_COUNT = 10; @@ -423,7 +423,7 @@ void CV_HighGuiTest::SpecificImageTest(const string& dir) } -void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFormat& fmt) +void CV_VideoIOTest::SpecificVideoTest(const string& dir, const cvtest::VideoFormat& fmt) { string ext = fmt.ext; int fourcc = fmt.fourcc; @@ -568,12 +568,12 @@ void CV_SpecificVideoTest::run(int) } #ifdef HAVE_JPEG -TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); } +TEST(Videoio_Image, regression) { CV_ImageTest test; test.safe_run(); } #endif #if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && !defined(__APPLE__) -TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); } -TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); } +TEST(Videoio_Video, regression) { CV_VideoTest test; test.safe_run(); } +TEST(Videoio_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); } #endif -TEST(Highgui_Image, write_read) { CV_SpecificImageTest test; test.safe_run(); } +TEST(Videoio_Image, write_read) { CV_SpecificImageTest test; test.safe_run(); } diff --git a/modules/highgui/test/test_video_pos.cpp b/modules/videoio/test/test_video_pos.cpp similarity index 93% rename from modules/highgui/test/test_video_pos.cpp rename to modules/videoio/test/test_video_pos.cpp index c8fe4050da..cd21bc2e23 100644 --- a/modules/highgui/test/test_video_pos.cpp +++ b/modules/videoio/test/test_video_pos.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/videoio.hpp" using namespace cv; using namespace std; @@ -114,7 +114,7 @@ public: cap.set(CAP_PROP_POS_FRAMES, 0); int N = (int)cap.get(CAP_PROP_FRAME_COUNT); - // See the same hack in CV_HighGuiTest::SpecificVideoTest for explanation. + // See the same hack in CV_VideoIOTest::SpecificVideoTest for explanation. int allowed_extra_frames = 0; if (fmt.fourcc == VideoWriter::fourcc('M', 'P', 'E', 'G') && fmt.ext == "mkv") allowed_extra_frames = 1; @@ -148,8 +148,6 @@ public: idx1, idx); ts->printf(ts->LOG, "Saving both frames ...\n"); ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT); - // imwrite("opencv_test_highgui_postest_actual.png", img); - // imwrite("opencv_test_highgui_postest_expected.png", img0); return; } @@ -167,8 +165,6 @@ public: ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err); ts->printf(ts->LOG, "Saving both frames ...\n"); ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT); - // imwrite("opencv_test_highgui_postest_actual.png", img); - // imwrite("opencv_test_highgui_postest_expected.png", img0); return; } } @@ -179,5 +175,5 @@ public: }; #if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && defined HAVE_FFMPEG -TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); } +TEST(Videoio_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); } #endif diff --git a/modules/videostab/CMakeLists.txt b/modules/videostab/CMakeLists.txt index de82e22239..e252bdbf53 100644 --- a/modules/videostab/CMakeLists.txt +++ b/modules/videostab/CMakeLists.txt @@ -1,3 +1,3 @@ set(the_description "Video stabilization") ocv_define_module(videostab opencv_imgproc opencv_features2d opencv_video opencv_photo opencv_calib3d - OPTIONAL opencv_cuda opencv_cudawarping opencv_cudaoptflow opencv_highgui) + OPTIONAL opencv_cuda opencv_cudawarping opencv_cudaoptflow opencv_videoio) diff --git a/modules/videostab/src/frame_source.cpp b/modules/videostab/src/frame_source.cpp index 7ca4b73de7..9db9d52a05 100644 --- a/modules/videostab/src/frame_source.cpp +++ b/modules/videostab/src/frame_source.cpp @@ -45,8 +45,8 @@ #include "opencv2/videostab/ring_buffer.hpp" #include "opencv2/opencv_modules.hpp" -#ifdef HAVE_OPENCV_HIGHGUI -# include "opencv2/highgui.hpp" +#ifdef HAVE_OPENCV_VIDEOIO +# include "opencv2/videoio.hpp" #endif namespace cv @@ -64,7 +64,7 @@ public: virtual void reset() { -#ifdef HAVE_OPENCV_HIGHGUI +#ifdef HAVE_OPENCV_VIDEOIO vc.release(); vc.open(path_); if (!vc.isOpened()) @@ -77,13 +77,13 @@ public: virtual Mat nextFrame() { Mat frame; -#ifdef HAVE_OPENCV_HIGHGUI +#ifdef HAVE_OPENCV_VIDEOIO vc >> frame; #endif return volatileFrame_ ? frame : frame.clone(); } -#ifdef HAVE_OPENCV_HIGHGUI +#ifdef HAVE_OPENCV_VIDEOIO int width() {return static_cast(vc.get(CAP_PROP_FRAME_WIDTH));} int height() {return static_cast(vc.get(CAP_PROP_FRAME_HEIGHT));} int count() {return static_cast(vc.get(CAP_PROP_FRAME_COUNT));} @@ -98,7 +98,7 @@ public: private: String path_; bool volatileFrame_; -#ifdef HAVE_OPENCV_HIGHGUI +#ifdef HAVE_OPENCV_VIDEOIO VideoCapture vc; #endif }; diff --git a/modules/world/CMakeLists.txt b/modules/world/CMakeLists.txt index 6a84c1b6ae..33a9304c87 100644 --- a/modules/world/CMakeLists.txt +++ b/modules/world/CMakeLists.txt @@ -106,11 +106,11 @@ macro(ios_include_3party_libs) ocv_list_filterout(objlist jmemansi) # <<= dirty fix endmacro() -if(IOS AND WITH_PNG) +if( (IOS OR APPLE) AND WITH_PNG) ios_include_3party_libs(zlib libpng) endif() -if(IOS AND WITH_JPEG) +if( (IOS OR APPLE) AND WITH_JPEG) ios_include_3party_libs(libjpeg) endif() diff --git a/platforms/osx/Info.plist.in b/platforms/osx/Info.plist.in new file mode 100644 index 0000000000..b2a3baf524 --- /dev/null +++ b/platforms/osx/Info.plist.in @@ -0,0 +1,18 @@ + + + + + CFBundleName + OpenCV + CFBundleIdentifier + org.opencv + CFBundleVersion + ${OPENCV_LIBVERSION} + CFBundleShortVersionString + ${OPENCV_LIBVERSION} + CFBundleSignature + ???? + CFBundlePackageType + FMWK + + diff --git a/platforms/osx/build_framework.py b/platforms/osx/build_framework.py new file mode 100755 index 0000000000..456ea95725 --- /dev/null +++ b/platforms/osx/build_framework.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python +""" +The script builds OpenCV.framework for iOS. +The built framework is universal, it can be used to build app and run it on either iOS simulator or real device. + +Usage: + ./build_framework.py + +By cmake conventions (and especially if you work with OpenCV repository), +the output dir should not be a subdirectory of OpenCV source tree. + +Script will create , if it's missing, and a few its subdirectories: + + + build/ + iPhoneOS-*/ + [cmake-generated build tree for an iOS device target] + iPhoneSimulator/ + [cmake-generated build tree for iOS simulator] + opencv2.framework/ + [the framework content] + +The script should handle minor OpenCV updates efficiently +- it does not recompile the library from scratch each time. +However, opencv2.framework directory is erased and recreated on each run. +""" + +import glob, re, os, os.path, shutil, string, sys + +def build_opencv(srcroot, buildroot, target, arch): + "builds OpenCV for device or simulator" + + builddir = os.path.join(buildroot, target + '-' + arch) + if not os.path.isdir(builddir): + os.makedirs(builddir) + currdir = os.getcwd() + os.chdir(builddir) + # for some reason, if you do not specify CMAKE_BUILD_TYPE, it puts libs to "RELEASE" rather than "Release" + cmakeargs = ("-GXcode " + + "-DCMAKE_BUILD_TYPE=Release " + + "-DBUILD_SHARED_LIBS=OFF " + + "-DBUILD_DOCS=OFF " + + "-DBUILD_EXAMPLES=OFF " + + "-DBUILD_TESTS=OFF " + + "-DBUILD_PERF_TESTS=OFF " + + "-DBUILD_opencv_apps=OFF " + + "-DBUILD_opencv_world=ON " + + "-DBUILD_opencv_matlab=OFF " + + "-DWITH_TIFF=OFF -DBUILD_TIFF=OFF " + + "-DWITH_JASPER=OFF -DBUILD_JASPER=OFF " + + "-DWITH_WEBP=OFF -DBUILD_WEBP=OFF " + + "-DWITH_OPENEXR=OFF -DBUILD_OPENEXR=OFF " + + "-DWITH_IPP=OFF -DWITH_IPP_A=OFF " + + "-DCMAKE_C_FLAGS=\"-Wno-implicit-function-declaration\" " + + "-DCMAKE_INSTALL_PREFIX=install") + # if cmake cache exists, just rerun cmake to update OpenCV.xproj if necessary + if os.path.isfile(os.path.join(builddir, "CMakeCache.txt")): + os.system("cmake %s ." % (cmakeargs,)) + else: + os.system("cmake %s %s" % (cmakeargs, srcroot)) + + for wlib in [builddir + "/modules/world/UninstalledProducts/libopencv_world.a", + builddir + "/lib/Release/libopencv_world.a"]: + if os.path.isfile(wlib): + os.remove(wlib) + + os.system("xcodebuild -parallelizeTargets ARCHS=%s -jobs 2 -sdk %s -configuration Release -target ALL_BUILD" % (arch, target.lower())) + os.system("xcodebuild ARCHS=%s -sdk %s -configuration Release -target install install" % (arch, target.lower())) + os.chdir(currdir) + +def put_framework_together(srcroot, dstroot): + "constructs the framework directory after all the targets are built" + + # find the list of targets (basically, ["iPhoneOS", "iPhoneSimulator"]) + targetlist = glob.glob(os.path.join(dstroot, "build", "*")) + targetlist = [os.path.basename(t) for t in targetlist] + + # set the current dir to the dst root + currdir = os.getcwd() + framework_dir = dstroot + "/opencv2.framework" + if os.path.isdir(framework_dir): + shutil.rmtree(framework_dir) + os.makedirs(framework_dir) + os.chdir(framework_dir) + + # form the directory tree + dstdir = "Versions/A" + os.makedirs(dstdir + "/Resources") + + tdir0 = "../build/" + targetlist[0] + # copy headers + shutil.copytree(tdir0 + "/install/include/opencv2", dstdir + "/Headers") + + # make universal static lib + wlist = " ".join(["../build/" + t + "/lib/Release/libopencv_world.a" for t in targetlist]) + os.system("lipo -create " + wlist + " -o " + dstdir + "/opencv2") + + # copy Info.plist + shutil.copyfile(tdir0 + "/osx/Info.plist", dstdir + "/Resources/Info.plist") + + # make symbolic links + os.symlink("A", "Versions/Current") + os.symlink("Versions/Current/Headers", "Headers") + os.symlink("Versions/Current/Resources", "Resources") + os.symlink("Versions/Current/opencv2", "opencv2") + + +def build_framework(srcroot, dstroot): + "main function to do all the work" + + targets = ["MacOSX", "MacOSX" ] + archs = ["x86_64", "i386" ] + for i in range(len(targets)): + build_opencv(srcroot, os.path.join(dstroot, "build"), targets[i], archs[i]) + + put_framework_together(srcroot, dstroot) + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print "Usage:\n\t./build_framework.py \n\n" + sys.exit(0) + + build_framework(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../..")), os.path.abspath(sys.argv[1])) diff --git a/samples/android/CMakeLists.txt b/samples/android/CMakeLists.txt index 0dc4a3cd69..1ca60fbb91 100644 --- a/samples/android/CMakeLists.txt +++ b/samples/android/CMakeLists.txt @@ -19,9 +19,9 @@ add_subdirectory(native-activity) # hello-android sample if(HAVE_opencv_highgui) - ocv_include_modules_recurse(opencv_highgui opencv_core) + ocv_include_modules_recurse(opencv_imgcodecs opencv_videoio opencv_highgui opencv_core) add_executable(hello-android hello-android/main.cpp) - target_link_libraries(hello-android ${OPENCV_LINKER_LIBS} opencv_highgui opencv_core) + target_link_libraries(hello-android ${OPENCV_LINKER_LIBS} opencv_imgcodecs opencv_videoio opencv_highgui opencv_core) set_target_properties(hello-android PROPERTIES OUTPUT_NAME hello-android RUNTIME_OUTPUT_DIRECTORY "${EXECUTABLE_OUTPUT_PATH}") add_dependencies(opencv_android_examples hello-android) endif() diff --git a/samples/android/hello-android/main.cpp b/samples/android/hello-android/main.cpp index cd02e1742a..3e9f762455 100644 --- a/samples/android/hello-android/main.cpp +++ b/samples/android/hello-android/main.cpp @@ -1,4 +1,5 @@ #include +#include #include using namespace cv; diff --git a/samples/android/native-activity/CMakeLists.txt b/samples/android/native-activity/CMakeLists.txt index 1f6d97439a..0cc0571ed9 100644 --- a/samples/android/native-activity/CMakeLists.txt +++ b/samples/android/native-activity/CMakeLists.txt @@ -3,7 +3,7 @@ set(sample example-native-activity) if(BUILD_FAT_JAVA_LIB) set(native_deps opencv_java) else() - set(native_deps opencv_highgui opencv_imgproc) + set(native_deps opencv_videoio opencv_imgcodecs opencv_highgui opencv_imgproc) endif() add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 9 ${ANDROID_SDK_TARGET} NATIVE_DEPS ${native_deps}) diff --git a/samples/cpp/3calibration.cpp b/samples/cpp/3calibration.cpp index 55644e9db4..53254050c0 100644 --- a/samples/cpp/3calibration.cpp +++ b/samples/cpp/3calibration.cpp @@ -4,6 +4,7 @@ #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index e597c0c387..f22483cf6e 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -4,8 +4,9 @@ # ---------------------------------------------------------------------------- SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann - opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree - opencv_features2d opencv_calib3d opencv_stitching opencv_videostab opencv_shape) + opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video + opencv_objdetect opencv_photo opencv_nonfree opencv_features2d opencv_calib3d + opencv_stitching opencv_videostab opencv_shape) ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS}) diff --git a/samples/cpp/bagofwords_classification.cpp b/samples/cpp/bagofwords_classification.cpp index 5601aa53ac..ef4f3c73c6 100644 --- a/samples/cpp/bagofwords_classification.cpp +++ b/samples/cpp/bagofwords_classification.cpp @@ -1,4 +1,5 @@ #include "opencv2/opencv_modules.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/features2d/features2d.hpp" diff --git a/samples/cpp/bgfg_gmg.cpp b/samples/cpp/bgfg_gmg.cpp index 226eea4635..a70bec9ee0 100644 --- a/samples/cpp/bgfg_gmg.cpp +++ b/samples/cpp/bgfg_gmg.cpp @@ -6,6 +6,7 @@ */ #include "opencv2/video.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include #include diff --git a/samples/cpp/bgfg_segm.cpp b/samples/cpp/bgfg_segm.cpp index a3d02009a7..31c7da05f7 100644 --- a/samples/cpp/bgfg_segm.cpp +++ b/samples/cpp/bgfg_segm.cpp @@ -2,6 +2,7 @@ #include #include "opencv2/imgproc.hpp" #include "opencv2/video/background_segm.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/calibration.cpp b/samples/cpp/calibration.cpp index bb7c396b43..9a71715b3d 100644 --- a/samples/cpp/calibration.cpp +++ b/samples/cpp/calibration.cpp @@ -2,6 +2,8 @@ #include #include "opencv2/imgproc.hpp" #include "opencv2/calib3d.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/camshiftdemo.cpp b/samples/cpp/camshiftdemo.cpp index 6439cef78e..6400f1ec7d 100644 --- a/samples/cpp/camshiftdemo.cpp +++ b/samples/cpp/camshiftdemo.cpp @@ -1,6 +1,7 @@ #include #include "opencv2/video/tracking.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/cloning_demo.cpp b/samples/cpp/cloning_demo.cpp index 24d9b7facf..be5da04bea 100644 --- a/samples/cpp/cloning_demo.cpp +++ b/samples/cpp/cloning_demo.cpp @@ -23,6 +23,7 @@ #include "opencv2/photo.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/core.hpp" #include diff --git a/samples/cpp/cloning_gui.cpp b/samples/cpp/cloning_gui.cpp index 2457b12154..db6cfcfde9 100644 --- a/samples/cpp/cloning_gui.cpp +++ b/samples/cpp/cloning_gui.cpp @@ -33,6 +33,7 @@ #include #include "opencv2/photo.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/core.hpp" #include diff --git a/samples/cpp/connected_components.cpp b/samples/cpp/connected_components.cpp index 50c677d372..32bd0dc053 100644 --- a/samples/cpp/connected_components.cpp +++ b/samples/cpp/connected_components.cpp @@ -1,5 +1,6 @@ #include #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/create_mask.cpp b/samples/cpp/create_mask.cpp index 6da64d738f..42225ebc6c 100644 --- a/samples/cpp/create_mask.cpp +++ b/samples/cpp/create_mask.cpp @@ -10,6 +10,7 @@ */ #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/core.hpp" #include diff --git a/samples/cpp/dbt_face_detection.cpp b/samples/cpp/dbt_face_detection.cpp index a66b90282d..d7409bf5b5 100644 --- a/samples/cpp/dbt_face_detection.cpp +++ b/samples/cpp/dbt_face_detection.cpp @@ -2,6 +2,7 @@ #include // Gaussian Blur #include // Basic OpenCV structures (cv::Mat, Scalar) +#include #include // OpenCV window I/O #include #include diff --git a/samples/cpp/demhist.cpp b/samples/cpp/demhist.cpp index ba60115df0..033b63d82c 100644 --- a/samples/cpp/demhist.cpp +++ b/samples/cpp/demhist.cpp @@ -1,5 +1,6 @@ #include "opencv2/core/utility.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/descriptor_extractor_matcher.cpp b/samples/cpp/descriptor_extractor_matcher.cpp index fe3d4d212e..6f25ac48d7 100644 --- a/samples/cpp/descriptor_extractor_matcher.cpp +++ b/samples/cpp/descriptor_extractor_matcher.cpp @@ -1,3 +1,4 @@ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/dft.cpp b/samples/cpp/dft.cpp index 4708defabb..c4034e896c 100644 --- a/samples/cpp/dft.cpp +++ b/samples/cpp/dft.cpp @@ -1,6 +1,7 @@ #include "opencv2/core.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/distrans.cpp b/samples/cpp/distrans.cpp index 706f506637..8f0ae57b55 100644 --- a/samples/cpp/distrans.cpp +++ b/samples/cpp/distrans.cpp @@ -1,5 +1,6 @@ #include #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/edge.cpp b/samples/cpp/edge.cpp index 262833c672..a4c0f5d0bb 100644 --- a/samples/cpp/edge.cpp +++ b/samples/cpp/edge.cpp @@ -1,5 +1,6 @@ #include "opencv2/core/utility.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/facedetect.cpp b/samples/cpp/facedetect.cpp index 4c5250664d..0c747d394b 100644 --- a/samples/cpp/facedetect.cpp +++ b/samples/cpp/facedetect.cpp @@ -1,8 +1,11 @@ #include "opencv2/objdetect.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/videoio/videoio_c.h" #include "opencv2/highgui/highgui_c.h" #include diff --git a/samples/cpp/fback.cpp b/samples/cpp/fback.cpp index 5293fd1f7f..476cfe6548 100644 --- a/samples/cpp/fback.cpp +++ b/samples/cpp/fback.cpp @@ -1,5 +1,6 @@ #include "opencv2/video/tracking.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/ffilldemo.cpp b/samples/cpp/ffilldemo.cpp index 1cdce9bb5c..46d08186a3 100644 --- a/samples/cpp/ffilldemo.cpp +++ b/samples/cpp/ffilldemo.cpp @@ -1,4 +1,6 @@ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/fitellipse.cpp b/samples/cpp/fitellipse.cpp index c42f8f3c3e..1e9a85857a 100644 --- a/samples/cpp/fitellipse.cpp +++ b/samples/cpp/fitellipse.cpp @@ -15,6 +15,7 @@ * ********************************************************************************/ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include using namespace cv; diff --git a/samples/cpp/grabcut.cpp b/samples/cpp/grabcut.cpp index d6cf201f4b..110e0ff770 100644 --- a/samples/cpp/grabcut.cpp +++ b/samples/cpp/grabcut.cpp @@ -1,3 +1,4 @@ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/houghcircles.cpp b/samples/cpp/houghcircles.cpp index 12f1c57677..2a20b62a76 100644 --- a/samples/cpp/houghcircles.cpp +++ b/samples/cpp/houghcircles.cpp @@ -1,3 +1,4 @@ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/houghlines.cpp b/samples/cpp/houghlines.cpp index 2fdabe8397..ad472a47cc 100644 --- a/samples/cpp/houghlines.cpp +++ b/samples/cpp/houghlines.cpp @@ -1,3 +1,4 @@ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/image.cpp b/samples/cpp/image.cpp index 4d7a5715a3..f63d3b77ec 100644 --- a/samples/cpp/image.cpp +++ b/samples/cpp/image.cpp @@ -22,6 +22,7 @@ static void help() #ifdef DEMO_MIXED_API_USE # include +# include #endif int main( int argc, char** argv ) diff --git a/samples/cpp/image_alignment.cpp b/samples/cpp/image_alignment.cpp index 56e8209dd9..85fcc919de 100644 --- a/samples/cpp/image_alignment.cpp +++ b/samples/cpp/image_alignment.cpp @@ -13,6 +13,7 @@ * Authors: G. Evangelidis, INRIA, Grenoble, France * M. Asbach, Fraunhofer IAIS, St. Augustin, Germany */ +#include #include #include #include diff --git a/samples/cpp/image_sequence.cpp b/samples/cpp/image_sequence.cpp index a68017d56f..b87b3043e2 100644 --- a/samples/cpp/image_sequence.cpp +++ b/samples/cpp/image_sequence.cpp @@ -1,4 +1,5 @@ #include +#include #include #include diff --git a/samples/cpp/imagelist_creator.cpp b/samples/cpp/imagelist_creator.cpp index 96114a9c4c..f2abb11c9a 100644 --- a/samples/cpp/imagelist_creator.cpp +++ b/samples/cpp/imagelist_creator.cpp @@ -2,6 +2,7 @@ */ #include "opencv2/core/core.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/inpaint.cpp b/samples/cpp/inpaint.cpp index 7690595cc2..7d17d0bf9f 100644 --- a/samples/cpp/inpaint.cpp +++ b/samples/cpp/inpaint.cpp @@ -1,3 +1,4 @@ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/photo/photo.hpp" diff --git a/samples/cpp/intelperc_capture.cpp b/samples/cpp/intelperc_capture.cpp index 40349e0fbf..5726ccdf4f 100644 --- a/samples/cpp/intelperc_capture.cpp +++ b/samples/cpp/intelperc_capture.cpp @@ -1,6 +1,7 @@ // testOpenCVCam.cpp : Defines the entry point for the console application. // +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/laplace.cpp b/samples/cpp/laplace.cpp index 45264e3107..bac432ca50 100644 --- a/samples/cpp/laplace.cpp +++ b/samples/cpp/laplace.cpp @@ -1,3 +1,4 @@ +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/linemod.cpp b/samples/cpp/linemod.cpp index 4d11da36b5..f13bac2196 100644 --- a/samples/cpp/linemod.cpp +++ b/samples/cpp/linemod.cpp @@ -3,6 +3,7 @@ #include // cvFindContours #include #include +#include #include #include #include diff --git a/samples/cpp/lkdemo.cpp b/samples/cpp/lkdemo.cpp index c665cfdfe2..82c1fac2d0 100644 --- a/samples/cpp/lkdemo.cpp +++ b/samples/cpp/lkdemo.cpp @@ -1,5 +1,6 @@ #include "opencv2/video/tracking.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/lsd_lines.cpp b/samples/cpp/lsd_lines.cpp index 92452a99c8..82842b2548 100644 --- a/samples/cpp/lsd_lines.cpp +++ b/samples/cpp/lsd_lines.cpp @@ -4,6 +4,7 @@ #include "opencv2/core/core.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" using namespace std; diff --git a/samples/cpp/morphology2.cpp b/samples/cpp/morphology2.cpp index e4ec8162c2..1bb4ec55e3 100644 --- a/samples/cpp/morphology2.cpp +++ b/samples/cpp/morphology2.cpp @@ -1,4 +1,5 @@ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/motempl.cpp b/samples/cpp/motempl.cpp index 76b69ade7b..66449981e7 100644 --- a/samples/cpp/motempl.cpp +++ b/samples/cpp/motempl.cpp @@ -1,5 +1,6 @@ #include "opencv2/video/tracking_c.h" #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/videoio/videoio_c.h" #include "opencv2/highgui/highgui_c.h" #include #include diff --git a/samples/cpp/npr_demo.cpp b/samples/cpp/npr_demo.cpp index 5579ca269f..ff6a3fce60 100644 --- a/samples/cpp/npr_demo.cpp +++ b/samples/cpp/npr_demo.cpp @@ -17,6 +17,7 @@ #include #include "opencv2/photo.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/core.hpp" #include diff --git a/samples/cpp/openni_capture.cpp b/samples/cpp/openni_capture.cpp index 802b474207..76b092298a 100644 --- a/samples/cpp/openni_capture.cpp +++ b/samples/cpp/openni_capture.cpp @@ -1,3 +1,4 @@ +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/pca.cpp b/samples/cpp/pca.cpp index d4272736c2..db5fb3802f 100644 --- a/samples/cpp/pca.cpp +++ b/samples/cpp/pca.cpp @@ -43,6 +43,7 @@ #include #include +#include "opencv2/imgcodecs.hpp" #include using namespace cv; diff --git a/samples/cpp/phase_corr.cpp b/samples/cpp/phase_corr.cpp index cfee809412..5e8685fcfa 100644 --- a/samples/cpp/phase_corr.cpp +++ b/samples/cpp/phase_corr.cpp @@ -1,4 +1,5 @@ #include "opencv2/core/core.hpp" +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/polar_transforms.cpp b/samples/cpp/polar_transforms.cpp index 8ce4831637..3e2810e74b 100644 --- a/samples/cpp/polar_transforms.cpp +++ b/samples/cpp/polar_transforms.cpp @@ -1,4 +1,5 @@ #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/videoio/videoio_c.h" #include "opencv2/highgui/highgui_c.h" #include diff --git a/samples/cpp/segment_objects.cpp b/samples/cpp/segment_objects.cpp index 852fa15be8..32c1f39768 100644 --- a/samples/cpp/segment_objects.cpp +++ b/samples/cpp/segment_objects.cpp @@ -1,4 +1,5 @@ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/video/background_segm.hpp" #include diff --git a/samples/cpp/select3dobj.cpp b/samples/cpp/select3dobj.cpp index 32d30552f5..7df95d17c5 100644 --- a/samples/cpp/select3dobj.cpp +++ b/samples/cpp/select3dobj.cpp @@ -12,6 +12,8 @@ #include #include "opencv2/imgproc.hpp" #include "opencv2/calib3d.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/shape_example.cpp b/samples/cpp/shape_example.cpp index 42630033eb..42bcffdb96 100644 --- a/samples/cpp/shape_example.cpp +++ b/samples/cpp/shape_example.cpp @@ -3,6 +3,7 @@ */ #include "opencv2/shape.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include diff --git a/samples/cpp/shape_transformation.cpp b/samples/cpp/shape_transformation.cpp index 5dac0a5116..62e5554101 100644 --- a/samples/cpp/shape_transformation.cpp +++ b/samples/cpp/shape_transformation.cpp @@ -3,6 +3,7 @@ */ #include "opencv2/shape.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/features2d/features2d.hpp" diff --git a/samples/cpp/simpleflow_demo.cpp b/samples/cpp/simpleflow_demo.cpp index c9cde783da..cc84249ec6 100644 --- a/samples/cpp/simpleflow_demo.cpp +++ b/samples/cpp/simpleflow_demo.cpp @@ -1,6 +1,7 @@ #include #include "opencv2/video/tracking.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/cpp/smiledetect.cpp b/samples/cpp/smiledetect.cpp index 61f990cd2d..305cdad8d7 100644 --- a/samples/cpp/smiledetect.cpp +++ b/samples/cpp/smiledetect.cpp @@ -3,6 +3,7 @@ #include "opencv2/imgproc.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/videoio/videoio_c.h" #include "opencv2/highgui/highgui_c.h" #include diff --git a/samples/cpp/squares.cpp b/samples/cpp/squares.cpp index 392075d79f..23d2ad0f13 100644 --- a/samples/cpp/squares.cpp +++ b/samples/cpp/squares.cpp @@ -4,6 +4,7 @@ #include "opencv2/core/core.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/starter_imagelist.cpp b/samples/cpp/starter_imagelist.cpp index fe89579301..a576030b95 100644 --- a/samples/cpp/starter_imagelist.cpp +++ b/samples/cpp/starter_imagelist.cpp @@ -8,6 +8,7 @@ * that was generated with imagelist_creator.cpp * easy as CV_PI right? */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/starter_video.cpp b/samples/cpp/starter_video.cpp index 1a3d5b0c9a..34f23d376c 100644 --- a/samples/cpp/starter_video.cpp +++ b/samples/cpp/starter_video.cpp @@ -11,6 +11,8 @@ * easy as CV_PI right? */ +#include +#include #include #include diff --git a/samples/cpp/stereo_calib.cpp b/samples/cpp/stereo_calib.cpp index a989e2a011..f042d638e5 100644 --- a/samples/cpp/stereo_calib.cpp +++ b/samples/cpp/stereo_calib.cpp @@ -23,6 +23,7 @@ ************************************************** */ #include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" diff --git a/samples/cpp/stereo_match.cpp b/samples/cpp/stereo_match.cpp index 92ebc485ad..aaeea6d810 100644 --- a/samples/cpp/stereo_match.cpp +++ b/samples/cpp/stereo_match.cpp @@ -9,6 +9,7 @@ #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/core/utility.hpp" diff --git a/samples/cpp/stitching.cpp b/samples/cpp/stitching.cpp index 602edfe86e..5b4437ac9e 100644 --- a/samples/cpp/stitching.cpp +++ b/samples/cpp/stitching.cpp @@ -42,6 +42,7 @@ #include #include +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/stitching.hpp" diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp index df0a9abc84..3ba8744586 100644 --- a/samples/cpp/stitching_detailed.cpp +++ b/samples/cpp/stitching_detailed.cpp @@ -46,6 +46,7 @@ #include #include "opencv2/opencv_modules.hpp" #include +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/stitching/detail/autocalib.hpp" #include "opencv2/stitching/detail/blenders.hpp" @@ -527,7 +528,7 @@ int main(int argc, char* argv[]) { vector rmats; for (size_t i = 0; i < cameras.size(); ++i) - rmats.push_back(cameras[i].R); + rmats.push_back(cameras[i].R.clone()); waveCorrect(rmats, wave_correct); for (size_t i = 0; i < cameras.size(); ++i) cameras[i].R = rmats[i]; diff --git a/samples/cpp/textdetection.cpp b/samples/cpp/textdetection.cpp index f1b4ce2650..8f853250fe 100644 --- a/samples/cpp/textdetection.cpp +++ b/samples/cpp/textdetection.cpp @@ -10,6 +10,7 @@ #include "opencv2/opencv.hpp" #include "opencv2/objdetect.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" diff --git a/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp b/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp index eb858a4728..475675b417 100644 --- a/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp +++ b/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/tutorial_code/HighGUI/BasicLinearTransformsTrackbar.cpp b/samples/cpp/tutorial_code/HighGUI/BasicLinearTransformsTrackbar.cpp index 88c1dd4968..213850f995 100644 --- a/samples/cpp/tutorial_code/HighGUI/BasicLinearTransformsTrackbar.cpp +++ b/samples/cpp/tutorial_code/HighGUI/BasicLinearTransformsTrackbar.cpp @@ -5,6 +5,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" using namespace cv; diff --git a/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp b/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp index bee1c5b200..4c5bf9f586 100644 --- a/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp +++ b/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp @@ -5,6 +5,7 @@ #include // Basic OpenCV structures (cv::Mat, Scalar) #include // Gaussian Blur +#include #include // OpenCV window I/O using namespace std; diff --git a/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp b/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp index d3b8e44303..9218cf2d31 100644 --- a/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp +++ b/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp @@ -2,7 +2,7 @@ #include // for strings #include // Basic OpenCV structures (cv::Mat) -#include // Video write +#include // Video write using namespace std; using namespace cv; diff --git a/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp index 38a5839bb2..3edfa8c3df 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp index c9e29a4aeb..e106cb461e 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp index 86d2f2e154..ce0e911237 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp index 28181c2d31..ff7b369e2c 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp index 577c8a8b9c..766ca3bf42 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc/imgproc.hpp" #include #include diff --git a/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp index 424a38e93a..122e19bebc 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ImgProc/AddingImages.cpp b/samples/cpp/tutorial_code/ImgProc/AddingImages.cpp index 4e974275c6..c32a6a6fd6 100644 --- a/samples/cpp/tutorial_code/ImgProc/AddingImages.cpp +++ b/samples/cpp/tutorial_code/ImgProc/AddingImages.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp b/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp index fee56bf0c2..9ffe1563de 100644 --- a/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp +++ b/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp b/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp index 13a96a1f51..6d4e6eba55 100644 --- a/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp b/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp index a963bf83e6..96eb26ee0a 100644 --- a/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp b/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp index fc98d1c21a..4751a5b53f 100644 --- a/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp b/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp index 8513bcf76a..238f921e92 100644 --- a/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp @@ -7,6 +7,7 @@ #include #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/features2d/features2d.hpp" diff --git a/samples/cpp/tutorial_code/ImgProc/Threshold.cpp b/samples/cpp/tutorial_code/ImgProc/Threshold.cpp index 96d5686a8d..0944f6cd3b 100644 --- a/samples/cpp/tutorial_code/ImgProc/Threshold.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Threshold.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp index 7851c9f58b..f5611b01f6 100644 --- a/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp index 0a2a2f5fa4..00184a3f87 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp index 29648ceca5..5fc36a2f7f 100644 --- a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp index 6400c43794..2d9b7b6454 100644 --- a/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp index f6dff102d8..b6ccdf5a4c 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp index 86b6a2cb60..49727e9cf0 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp @@ -4,6 +4,7 @@ * @author Ana Huaman */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp index f8c97c4117..2647a4d049 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp b/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp index 0a441ccdfc..17cffc0c05 100644 --- a/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp b/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp index 86db4d8ed4..cf1577f917 100644 --- a/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp @@ -5,6 +5,7 @@ */ #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp index 5eb2d92a7e..cd29b1c2e7 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp index d481d03898..a9c22e60fc 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp index e1f44712b7..c6fd379328 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp index 499eb452e2..0b354291a9 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp index 9ca230f875..c3a5cbf60a 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp index 31c7004787..c5dceab48b 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp @@ -3,6 +3,7 @@ * @brief Demo code for detecting corners using OpenCV built-in functions * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp index e048f057b2..4314a97e26 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp index 5957a95d6d..775e566ce7 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp index b45d60a083..cff59f5390 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp @@ -4,6 +4,7 @@ * @author OpenCV team */ +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include diff --git a/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp b/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp index 13fde6f857..34e2504c6e 100644 --- a/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp +++ b/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #ifndef _CRT_SECURE_NO_WARNINGS diff --git a/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp b/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp index 64c650bb82..f4493bb56c 100644 --- a/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp +++ b/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp @@ -8,6 +8,7 @@ #include #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/core/core.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" using namespace cv; diff --git a/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp b/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp index de7a32ca6b..581a968246 100644 --- a/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp +++ b/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp @@ -1,5 +1,6 @@ #include "opencv2/core/core.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp b/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp index f257d6ed6e..ca40fc76d9 100644 --- a/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp +++ b/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp @@ -1,5 +1,6 @@ #include #include +#include "opencv2/imgcodecs.hpp" #include #include #include diff --git a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp index e13f2b6960..1b1c1bc6b3 100644 --- a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp +++ b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp @@ -3,6 +3,7 @@ #include #include +#include "opencv2/imgcodecs.hpp" #include #include @@ -24,6 +25,7 @@ static void help( char* progName) #ifdef DEMO_MIXED_API_USE # include +# include #endif int main( int argc, char** argv ) diff --git a/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp b/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp index dea929960c..93e22ceb65 100644 --- a/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp +++ b/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp b/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp index 7ccebcdd5f..e861a5f108 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp @@ -8,6 +8,7 @@ #include #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/nonfree/features2d.hpp" diff --git a/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp b/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp index 47bc3ecfe3..f3d4df88fb 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp @@ -8,6 +8,7 @@ #include #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/nonfree/features2d.hpp" diff --git a/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp b/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp index 89b60d4baa..140136d3a6 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp @@ -8,6 +8,7 @@ #include #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/nonfree/features2d.hpp" diff --git a/samples/cpp/tutorial_code/features2D/SURF_detector.cpp b/samples/cpp/tutorial_code/features2D/SURF_detector.cpp index a1288a8c85..2a14bdc079 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_detector.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_detector.cpp @@ -8,6 +8,7 @@ #include #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/nonfree/features2d.hpp" diff --git a/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp b/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp index 501f87c484..515f7a38b1 100644 --- a/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp +++ b/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp @@ -4,7 +4,7 @@ #include // Basic OpenCV structures #include #include // Image processing methods for the CPU -#include // Read images +#include // Read images // CUDA structures and methods #include diff --git a/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp b/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp index 847cd8f6dc..67403708a1 100644 --- a/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp +++ b/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp @@ -1,4 +1,5 @@ #include +#include #include #include diff --git a/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/introduction_windows_vs.cpp b/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/introduction_windows_vs.cpp index 8db86d8de8..c3343fb86b 100644 --- a/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/introduction_windows_vs.cpp +++ b/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/introduction_windows_vs.cpp @@ -1,4 +1,5 @@ #include +#include #include #include diff --git a/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp b/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp index 1c8dbd24a6..2b4a97d54d 100644 --- a/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp +++ b/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp @@ -1,4 +1,5 @@ #include +#include "opencv2/imgcodecs.hpp" #include #include diff --git a/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp b/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp index 4f6daecaa9..bfab746cdf 100644 --- a/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp +++ b/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp @@ -1,5 +1,6 @@ #include #include +#include "opencv2/imgcodecs.hpp" #include #include diff --git a/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp b/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp index e7dc3e98fc..fbd76e4f08 100644 --- a/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp +++ b/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp @@ -1,4 +1,5 @@ #include "opencv2/objdetect.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" diff --git a/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp b/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp index 3264858398..d28fa2bba6 100644 --- a/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp +++ b/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp @@ -4,6 +4,7 @@ * @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream - Using LBP here */ #include "opencv2/objdetect.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" diff --git a/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp b/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp index aa84b15d4e..6ff6687678 100644 --- a/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp +++ b/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp @@ -1,4 +1,5 @@ #include +#include "opencv2/imgcodecs.hpp" #include #include #include diff --git a/samples/cpp/tutorial_code/video/bg_sub.cpp b/samples/cpp/tutorial_code/video/bg_sub.cpp index 24805a1edc..ace8220587 100644 --- a/samples/cpp/tutorial_code/video/bg_sub.cpp +++ b/samples/cpp/tutorial_code/video/bg_sub.cpp @@ -5,6 +5,8 @@ */ //opencv +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include #include //C diff --git a/samples/cpp/tvl1_optical_flow.cpp b/samples/cpp/tvl1_optical_flow.cpp index f5e71863c2..dee9cf6e33 100644 --- a/samples/cpp/tvl1_optical_flow.cpp +++ b/samples/cpp/tvl1_optical_flow.cpp @@ -3,6 +3,7 @@ #include #include "opencv2/video.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" using namespace cv; diff --git a/samples/cpp/ufacedetect.cpp b/samples/cpp/ufacedetect.cpp index 5e13a8211e..a4a027157a 100644 --- a/samples/cpp/ufacedetect.cpp +++ b/samples/cpp/ufacedetect.cpp @@ -1,4 +1,6 @@ #include "opencv2/objdetect.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/core/utility.hpp" diff --git a/samples/cpp/video_homography.cpp b/samples/cpp/video_homography.cpp index cefdc891ab..1b12fa04d9 100644 --- a/samples/cpp/video_homography.cpp +++ b/samples/cpp/video_homography.cpp @@ -6,6 +6,7 @@ */ #include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/features2d/features2d.hpp" diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp index 675d483f3d..261badd45f 100644 --- a/samples/cpp/videostab.cpp +++ b/samples/cpp/videostab.cpp @@ -7,6 +7,7 @@ #include #include "opencv2/video.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/videostab.hpp" #include "opencv2/opencv_modules.hpp" diff --git a/samples/cpp/watershed.cpp b/samples/cpp/watershed.cpp index cd47505045..e57c5aa1cf 100644 --- a/samples/cpp/watershed.cpp +++ b/samples/cpp/watershed.cpp @@ -1,5 +1,6 @@ #include #include "opencv2/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include diff --git a/samples/directx/CMakeLists.txt b/samples/directx/CMakeLists.txt index 0bd7de941e..1083894100 100644 --- a/samples/directx/CMakeLists.txt +++ b/samples/directx/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(OPENCV_DIRECTX_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_highgui) +SET(OPENCV_DIRECTX_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui) ocv_check_dependencies(${OPENCV_DIRECTX_SAMPLES_REQUIRED_DEPS}) diff --git a/samples/directx/d3d_base.inl.hpp b/samples/directx/d3d_base.inl.hpp index 0ef5c00a00..0ba44eca15 100644 --- a/samples/directx/d3d_base.inl.hpp +++ b/samples/directx/d3d_base.inl.hpp @@ -8,6 +8,7 @@ #include "opencv2/imgproc/types_c.h" // cvtColor #include "opencv2/highgui.hpp" // imread #include "opencv2/core/directx.hpp" +#include "opencv2/imgcodecs.hpp" #include #include diff --git a/samples/gpu/CMakeLists.txt b/samples/gpu/CMakeLists.txt index ca5243aa0b..849e3109df 100644 --- a/samples/gpu/CMakeLists.txt +++ b/samples/gpu/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui +SET(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_cuda opencv_nonfree opencv_softcascade opencv_superres diff --git a/samples/java/clojure/simple-sample/src/simple_sample/core.clj b/samples/java/clojure/simple-sample/src/simple_sample/core.clj index 86a1b31130..fa25727b92 100644 --- a/samples/java/clojure/simple-sample/src/simple_sample/core.clj +++ b/samples/java/clojure/simple-sample/src/simple_sample/core.clj @@ -4,13 +4,13 @@ (ns simple-sample.core (:import [org.opencv.core Point Rect Mat CvType Size Scalar] - org.opencv.highgui.Highgui + org.opencv.imgcodecs.Imgcodecs org.opencv.imgproc.Imgproc)) (defn -main [& args] - (let [lena (Highgui/imread "resources/images/lena.png") + (let [lena (Imgcodecs/imread "resources/images/lena.png") blurred (Mat. 512 512 CvType/CV_8UC3)] (print "Blurring...") (Imgproc/GaussianBlur lena blurred (Size. 5 5) 3 3) - (Highgui/imwrite "resources/images/blurred.png" blurred) + (Imgcodecs/imwrite "resources/images/blurred.png" blurred) (println "done!"))) diff --git a/samples/java/sbt/src/main/java/DetectFaceDemo.java b/samples/java/sbt/src/main/java/DetectFaceDemo.java index 07b4202e6a..686df02f93 100644 --- a/samples/java/sbt/src/main/java/DetectFaceDemo.java +++ b/samples/java/sbt/src/main/java/DetectFaceDemo.java @@ -4,7 +4,7 @@ import org.opencv.core.MatOfRect; import org.opencv.core.Point; import org.opencv.core.Rect; import org.opencv.core.Scalar; -import org.opencv.highgui.Highgui; +import org.opencv.imgcodecs.Imgcodecs; import org.opencv.objdetect.CascadeClassifier; /* @@ -19,7 +19,7 @@ public class DetectFaceDemo { // directory. CascadeClassifier faceDetector = new CascadeClassifier(getClass() .getResource("/lbpcascade_frontalface.xml").getPath()); - Mat image = Highgui.imread(getClass().getResource( + Mat image = Imgcodecs.imread(getClass().getResource( "/AverageMaleFace.jpg").getPath()); // Detect faces in the image. @@ -39,6 +39,6 @@ public class DetectFaceDemo { // Save the visualized detection. String filename = "faceDetection.png"; System.out.println(String.format("Writing %s", filename)); - Highgui.imwrite(filename, image); + Imgcodecs.imwrite(filename, image); } } diff --git a/samples/java/sbt/src/main/scala/ScalaCorrespondenceMatchingDemo.scala b/samples/java/sbt/src/main/scala/ScalaCorrespondenceMatchingDemo.scala index ea50200c9a..e8259827a3 100644 --- a/samples/java/sbt/src/main/scala/ScalaCorrespondenceMatchingDemo.scala +++ b/samples/java/sbt/src/main/scala/ScalaCorrespondenceMatchingDemo.scala @@ -1,4 +1,4 @@ -import org.opencv.highgui.Highgui +import org.opencv.imgcodecs.Imgcodecs import org.opencv.features2d.DescriptorExtractor import org.opencv.features2d.Features2d import org.opencv.core.MatOfKeyPoint @@ -45,8 +45,8 @@ object ScalaCorrespondenceMatchingDemo { } // Load the images from the |resources| directory. - val leftImage = Highgui.imread(getClass.getResource("/img1.png").getPath) - val rightImage = Highgui.imread(getClass.getResource("/img2.png").getPath) + val leftImage = Imgcodecs.imread(getClass.getResource("/img1.png").getPath) + val rightImage = Imgcodecs.imread(getClass.getResource("/img2.png").getPath) // Detect KeyPoints and extract descriptors. val (leftKeyPoints, leftDescriptors) = detectAndExtract(leftImage) @@ -64,6 +64,6 @@ object ScalaCorrespondenceMatchingDemo { Features2d.drawMatches(leftImage, leftKeyPoints, rightImage, rightKeyPoints, dmatches, correspondenceImage) val filename = "scalaCorrespondences.png" println(s"Writing ${filename}") - assert(Highgui.imwrite(filename, correspondenceImage)) + assert(Imgcodecs.imwrite(filename, correspondenceImage)) } } diff --git a/samples/java/sbt/src/main/scala/ScalaDetectFaceDemo.scala b/samples/java/sbt/src/main/scala/ScalaDetectFaceDemo.scala index 85fdeb0d7f..479a7ec7fb 100644 --- a/samples/java/sbt/src/main/scala/ScalaDetectFaceDemo.scala +++ b/samples/java/sbt/src/main/scala/ScalaDetectFaceDemo.scala @@ -2,7 +2,7 @@ import org.opencv.core.Core import org.opencv.core.MatOfRect import org.opencv.core.Point import org.opencv.core.Scalar -import org.opencv.highgui.Highgui +import org.opencv.imgcodecs.Imgcodecs import org.opencv.objdetect.CascadeClassifier import reflect._ @@ -16,7 +16,7 @@ object ScalaDetectFaceDemo { // Create a face detector from the cascade file in the resources directory. val faceDetector = new CascadeClassifier(getClass.getResource("/lbpcascade_frontalface.xml").getPath) - val image = Highgui.imread(getClass.getResource("/AverageMaleFace.jpg").getPath) + val image = Imgcodecs.imread(getClass.getResource("/AverageMaleFace.jpg").getPath) // Detect faces in the image. // MatOfRect is a special container class for Rect. @@ -38,6 +38,6 @@ object ScalaDetectFaceDemo { // Save the visualized detection. val filename = "scalaFaceDetection.png" println(s"Writing ${filename}") - assert(Highgui.imwrite(filename, image)) + assert(Imgcodecs.imwrite(filename, image)) } } diff --git a/samples/tapi/CMakeLists.txt b/samples/tapi/CMakeLists.txt index e1fc8552c6..cf88f3a98f 100644 --- a/samples/tapi/CMakeLists.txt +++ b/samples/tapi/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(OPENCV_TAPI_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_video opencv_highgui opencv_objdetect opencv_features2d opencv_calib3d opencv_nonfree opencv_flann) +SET(OPENCV_TAPI_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_video opencv_imgcodecs opencv_videoio opencv_highgui opencv_objdetect opencv_features2d opencv_calib3d opencv_nonfree opencv_flann) ocv_check_dependencies(${OPENCV_TAPI_SAMPLES_REQUIRED_DEPS}) diff --git a/samples/tapi/bgfg_segm.cpp b/samples/tapi/bgfg_segm.cpp index 2fa12bba9d..b9facd6324 100644 --- a/samples/tapi/bgfg_segm.cpp +++ b/samples/tapi/bgfg_segm.cpp @@ -4,6 +4,7 @@ #include "opencv2/core.hpp" #include "opencv2/core/ocl.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui.hpp" #include "opencv2/video.hpp" diff --git a/samples/tapi/camshift.cpp b/samples/tapi/camshift.cpp index 22c65bf698..c0f1d8fb69 100644 --- a/samples/tapi/camshift.cpp +++ b/samples/tapi/camshift.cpp @@ -2,6 +2,7 @@ #include "opencv2/core/ocl.hpp" #include "opencv2/video/tracking.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/videoio/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include diff --git a/samples/tapi/clahe.cpp b/samples/tapi/clahe.cpp index a28f2ab05d..905ea1f1ae 100644 --- a/samples/tapi/clahe.cpp +++ b/samples/tapi/clahe.cpp @@ -3,6 +3,8 @@ #include "opencv2/core/ocl.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui/highgui.hpp" using namespace cv; diff --git a/samples/tapi/hog.cpp b/samples/tapi/hog.cpp index ee537b310e..389e1e5bef 100644 --- a/samples/tapi/hog.cpp +++ b/samples/tapi/hog.cpp @@ -6,6 +6,9 @@ #include #include #include +#include "opencv2/imgcodecs.hpp" +#include +#include #include #include #include diff --git a/samples/tapi/pyrlk_optical_flow.cpp b/samples/tapi/pyrlk_optical_flow.cpp index d4b77294be..9cdbd7c5bf 100644 --- a/samples/tapi/pyrlk_optical_flow.cpp +++ b/samples/tapi/pyrlk_optical_flow.cpp @@ -3,7 +3,9 @@ #include #include "opencv2/core/utility.hpp" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/highgui.hpp" #include "opencv2/core/ocl.hpp" #include "opencv2/video/video.hpp" diff --git a/samples/tapi/squares.cpp b/samples/tapi/squares.cpp index 402702e497..87a3894b93 100644 --- a/samples/tapi/squares.cpp +++ b/samples/tapi/squares.cpp @@ -6,6 +6,7 @@ #include "opencv2/core/ocl.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui/highgui.hpp" #include #include diff --git a/samples/tapi/surf_matcher.cpp b/samples/tapi/surf_matcher.cpp index 9066bfd3fb..2aca96f1bc 100644 --- a/samples/tapi/surf_matcher.cpp +++ b/samples/tapi/surf_matcher.cpp @@ -3,6 +3,7 @@ #include "opencv2/core/core.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/core/ocl.hpp" +#include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/features2d.hpp" #include "opencv2/calib3d.hpp" diff --git a/samples/tapi/tvl1_optical_flow.cpp b/samples/tapi/tvl1_optical_flow.cpp index 436ba715c8..f7bebacbeb 100644 --- a/samples/tapi/tvl1_optical_flow.cpp +++ b/samples/tapi/tvl1_optical_flow.cpp @@ -4,6 +4,8 @@ #include "opencv2/core/ocl.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/video/video.hpp"