Merge branch 4.x

This commit is contained in:
Alexander Alekhin 2021-10-15 15:59:36 +00:00
commit 7ba26ada12
537 changed files with 39768 additions and 10712 deletions

View File

@ -1778,30 +1778,30 @@ TegraCvtColor_Invoker(bgrx2hsvf, bgrx2hsv, src_data + static_cast<size_t>(range.
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_CVT2PYUVTOBGR(src_data, src_step, dst_data, dst_step, dst_width, dst_height, dcn, swapBlue, uIdx) \
#define TEGRA_CVT2PYUVTOBGR_EX(y_data, y_step, uv_data, uv_step, dst_data, dst_step, dst_width, dst_height, dcn, swapBlue, uIdx) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
dcn == 3 ? \
uIdx == 0 ? \
(swapBlue ? \
CAROTENE_NS::yuv420i2rgb(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step) : \
CAROTENE_NS::yuv420i2bgr(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step)), \
CV_HAL_ERROR_OK : \
uIdx == 1 ? \
(swapBlue ? \
CAROTENE_NS::yuv420sp2rgb(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step) : \
CAROTENE_NS::yuv420sp2bgr(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step)), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED : \
@ -1809,29 +1809,32 @@ TegraCvtColor_Invoker(bgrx2hsvf, bgrx2hsv, src_data + static_cast<size_t>(range.
uIdx == 0 ? \
(swapBlue ? \
CAROTENE_NS::yuv420i2rgbx(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step) : \
CAROTENE_NS::yuv420i2bgrx(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step)), \
CV_HAL_ERROR_OK : \
uIdx == 1 ? \
(swapBlue ? \
CAROTENE_NS::yuv420sp2rgbx(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step) : \
CAROTENE_NS::yuv420sp2bgrx(CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, \
src_data + src_step * dst_height, src_step, \
y_data, y_step, \
uv_data, uv_step, \
dst_data, dst_step)), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED : \
CV_HAL_ERROR_NOT_IMPLEMENTED \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_CVT2PYUVTOBGR(src_data, src_step, dst_data, dst_step, dst_width, dst_height, dcn, swapBlue, uIdx) \
TEGRA_CVT2PYUVTOBGR_EX(src_data, src_step, src_data + src_step * dst_height, src_step, dst_data, dst_step, \
dst_width, dst_height, dcn, swapBlue, uIdx);
#undef cv_hal_cvtBGRtoBGR
#define cv_hal_cvtBGRtoBGR TEGRA_CVTBGRTOBGR
@ -1847,6 +1850,8 @@ TegraCvtColor_Invoker(bgrx2hsvf, bgrx2hsv, src_data + static_cast<size_t>(range.
#define cv_hal_cvtBGRtoHSV TEGRA_CVTBGRTOHSV
#undef cv_hal_cvtTwoPlaneYUVtoBGR
#define cv_hal_cvtTwoPlaneYUVtoBGR TEGRA_CVT2PYUVTOBGR
#undef cv_hal_cvtTwoPlaneYUVtoBGREx
#define cv_hal_cvtTwoPlaneYUVtoBGREx TEGRA_CVT2PYUVTOBGR_EX
#endif // OPENCV_IMGPROC_HAL_INTERFACE_H

View File

@ -1,8 +1,8 @@
# Binaries branch name: ffmpeg/master_20210608
# Binaries were created for OpenCV: eaa9228a4fdfb9c2465aea65a50ce2d16b55dce0
ocv_update(FFMPEG_BINARIES_COMMIT "213fcd5d4897319a83207406036c4a5957fba010")
ocv_update(FFMPEG_FILE_HASH_BIN32 "bab661341c30862fa88627130219c0a5")
ocv_update(FFMPEG_FILE_HASH_BIN64 "ac99f9767a83103c31709628af685924")
# Binaries branch name: ffmpeg/master_20211005
# Binaries were created for OpenCV: 672399c751c431bbe52818b33fd3ca17b51e0e16
ocv_update(FFMPEG_BINARIES_COMMIT "40b4666d1aa374205fd61373496e15d92ecd5313")
ocv_update(FFMPEG_FILE_HASH_BIN32 "c2f9a897d464a2dce2286f8067ad9d90")
ocv_update(FFMPEG_FILE_HASH_BIN64 "878a4e8fe5a4d68f18c9cdde543b9ead")
ocv_update(FFMPEG_FILE_HASH_CMAKE "8862c87496e2e8c375965e1277dee1c7")
function(download_win_ffmpeg script_var)

View File

@ -32,7 +32,9 @@ endif()
# Define the library target:
# ----------------------------------------------------------------------------------
add_definitions(-DWEBP_USE_THREAD)
if(NOT OPENCV_DISABLE_THREAD_SUPPORT)
add_definitions(-DWEBP_USE_THREAD)
endif()
add_library(${WEBP_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
if(ANDROID)

View File

@ -923,6 +923,11 @@ int ovx_hal_cvtGraytoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep,
}
int ovx_hal_cvtTwoPlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int bcn, bool swapBlue, int uIdx)
{
return ovx_hal_cvtTwoPlaneYUVtoBGREx(a, astep, a + h * astep, astep, b, bstep, w, h, bcn, swapBlue, uIdx);
}
int ovx_hal_cvtTwoPlaneYUVtoBGREx(const uchar * a, size_t astep, const uchar * b, size_t bstep, uchar * c, size_t cstep, int w, int h, int bcn, bool swapBlue, int uIdx)
{
if (skipSmallImages<VX_KERNEL_COLOR_CONVERT>(w, h))
return CV_HAL_ERROR_NOT_IMPLEMENTED;
@ -933,8 +938,7 @@ int ovx_hal_cvtTwoPlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t
if (w & 1 || h & 1) // It's not described in spec but sample implementation unable to convert odd sized images
return CV_HAL_ERROR_NOT_IMPLEMENTED;
refineStep(w, h, uIdx ? VX_DF_IMAGE_NV21 : VX_DF_IMAGE_NV12, astep);
refineStep(w, h, bcn == 3 ? VX_DF_IMAGE_RGB : VX_DF_IMAGE_RGBX, bstep);
try
{
ivx::Context ctx = getOpenVXHALContext();
@ -943,8 +947,8 @@ int ovx_hal_cvtTwoPlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t
std::vector<void *> ptrs;
addr.push_back(ivx::Image::createAddressing(w, h, 1, (vx_int32)astep));
ptrs.push_back((void*)a);
addr.push_back(ivx::Image::createAddressing(w / 2, h / 2, 2, (vx_int32)astep));
ptrs.push_back((void*)(a + h * astep));
addr.push_back(ivx::Image::createAddressing(w / 2, h / 2, 2, (vx_int32)bstep));
ptrs.push_back((void*)b);
vxImage
ia = ivx::Image::createFromHandle(ctx, uIdx ? VX_DF_IMAGE_NV21 : VX_DF_IMAGE_NV12, addr, ptrs);
@ -952,7 +956,7 @@ int ovx_hal_cvtTwoPlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t
return CV_HAL_ERROR_NOT_IMPLEMENTED; // OpenCV store NV12/NV21 as RANGE_RESTRICTED while OpenVX expect RANGE_FULL
vxImage
ib = ivx::Image::createFromHandle(ctx, bcn == 3 ? VX_DF_IMAGE_RGB : VX_DF_IMAGE_RGBX,
ivx::Image::createAddressing(w, h, bcn, (vx_int32)bstep), b);
ivx::Image::createAddressing(w, h, bcn, (vx_int32)cstep), c);
ivx::IVX_CHECK_STATUS(vxuColorConvert(ctx, ia, ib));
}
catch (ivx::RuntimeError & e)

View File

@ -49,6 +49,7 @@ int ovx_hal_morph(cvhalFilter2D *filter_context, uchar *a, size_t astep, uchar *
int ovx_hal_cvtBGRtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int depth, int acn, int bcn, bool swapBlue);
int ovx_hal_cvtGraytoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int depth, int bcn);
int ovx_hal_cvtTwoPlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int bcn, bool swapBlue, int uIdx);
int ovx_hal_cvtTwoPlaneYUVtoBGREx(const uchar * a, size_t astep, const uchar * b, size_t bstep, uchar * c, size_t cstep, int w, int h, int bcn, bool swapBlue, int uIdx);
int ovx_hal_cvtThreePlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int bcn, bool swapBlue, int uIdx);
int ovx_hal_cvtBGRtoThreePlaneYUV(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int acn, bool swapBlue, int uIdx);
int ovx_hal_cvtOnePlaneYUVtoBGR(const uchar * a, size_t astep, uchar * b, size_t bstep, int w, int h, int bcn, bool swapBlue, int uIdx, int ycn);
@ -130,6 +131,8 @@ int ovx_hal_integral(int depth, int sdepth, int, const uchar * a, size_t astep,
#define cv_hal_cvtGraytoBGR ovx_hal_cvtGraytoBGR
#undef cv_hal_cvtTwoPlaneYUVtoBGR
#define cv_hal_cvtTwoPlaneYUVtoBGR ovx_hal_cvtTwoPlaneYUVtoBGR
#undef cv_hal_cvtTwoPlaneYUVtoBGREx
#define cv_hal_cvtTwoPlaneYUVtoBGREx ovx_hal_cvtTwoPlaneYUVtoBGREx
#undef cv_hal_cvtThreePlaneYUVtoBGR
#define cv_hal_cvtThreePlaneYUVtoBGR ovx_hal_cvtThreePlaneYUVtoBGR
#undef cv_hal_cvtBGRtoThreePlaneYUV

6
3rdparty/readme.txt vendored
View File

@ -31,7 +31,7 @@ libpng Portable Network Graphics library.
libtiff Tag Image File Format (TIFF) Software
Copyright (c) 1988-1997 Sam Leffler
Copyright (c) 1991-1997 Silicon Graphics, Inc.
See libtiff home page http://www.remotesensing.org/libtiff/
See libtiff home page http://www.libtiff.org/
for details and links to the source code
WITH_TIFF CMake option must be ON to add libtiff & zlib support to imgcodecs.
@ -51,7 +51,9 @@ jasper JasPer is a collection of software
Copyright (c) 1999-2000 The University of British Columbia
Copyright (c) 2001-2003 Michael David Adams
The JasPer license can be found in libjasper.
See JasPer official GitHub repository
https://github.com/jasper-software/jasper.git
for details and links to source code
------------------------------------------------------------------------------------
openexr OpenEXR is a high dynamic-range (HDR) image file format developed
by Industrial Light & Magic for use in computer imaging applications.

View File

@ -513,6 +513,8 @@ OCV_OPTION(OPENCV_GENERATE_SETUPVARS "Generate setup_vars* scripts" ON IF (NOT
OCV_OPTION(ENABLE_CONFIG_VERIFICATION "Fail build if actual configuration doesn't match requested (WITH_XXX != HAVE_XXX)" OFF)
OCV_OPTION(OPENCV_ENABLE_MEMALIGN "Enable posix_memalign or memalign usage" ON)
OCV_OPTION(OPENCV_DISABLE_FILESYSTEM_SUPPORT "Disable filesystem support" OFF)
OCV_OPTION(OPENCV_DISABLE_THREAD_SUPPORT "Build the library without multi-threaded code." OFF)
OCV_OPTION(OPENCV_SEMIHOSTING "Build the library for semihosting target (Arm). See https://developer.arm.com/documentation/100863/latest." OFF)
OCV_OPTION(ENABLE_PYLINT "Add target with Pylint checks" (BUILD_DOCS OR BUILD_EXAMPLES) IF (NOT CMAKE_CROSSCOMPILING AND NOT APPLE_FRAMEWORK) )
OCV_OPTION(ENABLE_FLAKE8 "Add target with Python flake8 checker" (BUILD_DOCS OR BUILD_EXAMPLES) IF (NOT CMAKE_CROSSCOMPILING AND NOT APPLE_FRAMEWORK) )
@ -662,7 +664,7 @@ if(UNIX)
elseif(EMSCRIPTEN)
# no need to link to system libs with emscripten
elseif(QNXNTO)
# no need to link to system libs with QNX
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m)
else()
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m pthread rt)
endif()
@ -670,6 +672,11 @@ if(UNIX)
set(HAVE_PTHREAD 1)
endif()
# Ensure that libpthread is not listed as one of the libraries to pass to the linker.
if (OPENCV_DISABLE_THREAD_SUPPORT)
list(REMOVE_ITEM OPENCV_LINKER_LIBS pthread)
endif()
if(OPENCV_ENABLE_MEMALIGN)
CHECK_SYMBOL_EXISTS(posix_memalign stdlib.h HAVE_POSIX_MEMALIGN)
CHECK_INCLUDE_FILE(malloc.h HAVE_MALLOC_H)
@ -914,7 +921,7 @@ add_subdirectory(include)
ocv_add_modules_compiler_options()
# OpenCV modules
add_subdirectory(modules)
ocv_register_modules()
# Generate targets for documentation
add_subdirectory(doc)
@ -1244,15 +1251,20 @@ endif(WIN32)
# ========================== GUI ==========================
status("")
status(" GUI: ")
status(" GUI: " "${OPENCV_HIGHGUI_BUILTIN_BACKEND}")
if(WITH_QT OR HAVE_QT)
if(HAVE_QT5)
status(" QT:" "YES (ver ${Qt5Core_VERSION_STRING})")
status(" QT OpenGL support:" HAVE_QT_OPENGL THEN "YES (${Qt5OpenGL_LIBRARIES} ${Qt5OpenGL_VERSION_STRING})" ELSE NO)
elseif(HAVE_QT)
if(HAVE_QT)
status(" QT:" "YES (ver ${QT_VERSION_MAJOR}.${QT_VERSION_MINOR}.${QT_VERSION_PATCH} ${QT_EDITION})")
status(" QT OpenGL support:" HAVE_QT_OPENGL THEN "YES (${QT_QTOPENGL_LIBRARY})" ELSE NO)
if(HAVE_QT_OPENGL)
if(Qt${QT_VERSION_MAJOR}OpenGL_LIBRARIES)
status(" QT OpenGL support:" HAVE_QT_OPENGL THEN "YES (${Qt${QT_VERSION_MAJOR}OpenGL_LIBRARIES} ${Qt${QT_VERSION_MAJOR}OpenGL_VERSION_STRING})" ELSE NO)
else()
status(" QT OpenGL support:" HAVE_QT_OPENGL THEN "YES (${QT_QTOPENGL_LIBRARY})" ELSE NO)
endif()
else()
status(" QT OpenGL support:" "NO")
endif()
else()
status(" QT:" "NO")
endif()
@ -1470,6 +1482,15 @@ ocv_build_features_string(parallel_status EXCLUSIVE
ELSE "none")
status("")
status(" Parallel framework:" "${parallel_status}")
if (OPENCV_DISABLE_THREAD_SUPPORT)
status("" "Multi thread code explicitly disabled with OPENCV_DISABLE_THREAD_SUPPORT.")
if(HAVE_PTHREADS_PF OR HAVE_HPX OR HAVE_OPENMP OR HAVE_GCD OR HAVE_CONCURRENCY)
message(FATAL_ERROR "Not all parallel frameworks have been disabled (using ${parallel_status}).")
endif()
if(HAVE_PTHREAD)
message(FATAL_ERROR "Thread execution might be in use in some component.")
endif()
endif()
if(CV_TRACE OR OPENCV_TRACE)
ocv_build_features_string(trace_status EXCLUSIVE
@ -1521,6 +1542,7 @@ if(WITH_INF_ENGINE OR INF_ENGINE_TARGET)
if(INF_ENGINE_TARGET)
list(GET INF_ENGINE_TARGET 0 ie_target)
set(__msg "YES (${INF_ENGINE_RELEASE} / ${INF_ENGINE_VERSION})")
ocv_get_imported_target(ie_target "${ie_target}")
get_target_property(_lib ${ie_target} IMPORTED_LOCATION)
get_target_property(_lib_imp_rel ${ie_target} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_imp_dbg ${ie_target} IMPORTED_IMPLIB_DEBUG)
@ -1544,7 +1566,7 @@ if(WITH_INF_ENGINE OR INF_ENGINE_TARGET)
endif()
if(WITH_NGRAPH OR HAVE_NGRAPH)
if(HAVE_NGRAPH)
set(__target ngraph::ngraph)
ocv_get_imported_target(__target ngraph::ngraph)
set(__msg "YES (${ngraph_VERSION})")
get_target_property(_lib ${__target} IMPORTED_LOCATION)
get_target_property(_lib_imp_rel ${__target} IMPORTED_IMPLIB_RELEASE)

View File

@ -1,9 +1,10 @@
/*************************************************
USAGE:
./model_diagnostics -m <onnx file location>
./model_diagnostics -m <model file location>
**************************************************/
#include <opencv2/dnn.hpp>
#include <opencv2/core/utils/filesystem.hpp>
#include <opencv2/dnn/utils/debug_utils.hpp>
#include <iostream>
@ -32,7 +33,7 @@ static std::string checkFileExists(const std::string& fileName)
}
std::string diagnosticKeys =
"{ model m | | Path to the model .onnx file. }"
"{ model m | | Path to the model file. }"
"{ config c | | Path to the model configuration file. }"
"{ framework f | | [Optional] Name of the model framework. }";
@ -41,7 +42,7 @@ std::string diagnosticKeys =
int main( int argc, const char** argv )
{
CommandLineParser argParser(argc, argv, diagnosticKeys);
argParser.about("Use this tool to run the diagnostics of provided ONNX model"
argParser.about("Use this tool to run the diagnostics of provided ONNX/TF model"
"to obtain the information about its support (supported layers).");
if (argc == 1)
@ -57,6 +58,7 @@ int main( int argc, const char** argv )
CV_Assert(!model.empty());
enableModelDiagnostics(true);
skipModelImport(true);
redirectError(diagnosticsErrorCallback, NULL);
Net ocvNet = readNet(model, config, frameworkId);

View File

@ -178,8 +178,17 @@ if(CV_GCC OR CV_CLANG)
add_extra_compiler_option(-Wno-long-long)
endif()
# We need pthread's
if(UNIX AND NOT ANDROID AND NOT (APPLE AND CV_CLANG)) # TODO
# We need pthread's, unless we have explicitly disabled multi-thread execution.
if(NOT OPENCV_DISABLE_THREAD_SUPPORT
AND (
(UNIX
AND NOT ANDROID
AND NOT (APPLE AND CV_CLANG)
AND NOT EMSCRIPTEN
)
OR (EMSCRIPTEN AND WITH_PTHREADS_PF) # https://github.com/opencv/opencv/issues/20285
)
) # TODO
add_extra_compiler_option(-pthread)
endif()
@ -398,6 +407,9 @@ if(MSVC)
endif()
endif()
# Enable [[attribute]] syntax checking to prevent silent failure: "attribute is ignored in this syntactic position"
add_extra_compiler_option("/w15240")
if(NOT ENABLE_NOISY_WARNINGS)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127) # conditional expression is constant
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4251) # class 'std::XXX' needs to have dll-interface to be used by clients of YYY

View File

@ -112,7 +112,7 @@ if(CUDA_FOUND)
if(CUDA_GENERATION)
if(NOT ";${_generations};" MATCHES ";${CUDA_GENERATION};")
string(REPLACE ";" ", " _generations "${_generations}")
message(FATAL_ERROR "ERROR: ${_generations} Generations are suppered.")
message(FATAL_ERROR "ERROR: ${_generations} Generations are supported.")
endif()
unset(CUDA_ARCH_BIN CACHE)
unset(CUDA_ARCH_PTX CACHE)

View File

@ -171,6 +171,8 @@ elseif(MSVC)
set(OpenCV_RUNTIME vc15)
elseif(MSVC_VERSION MATCHES "^192[0-9]$")
set(OpenCV_RUNTIME vc16)
elseif(MSVC_VERSION MATCHES "^193[0-9]$")
set(OpenCV_RUNTIME vc17)
else()
message(WARNING "OpenCV does not recognize MSVC_VERSION \"${MSVC_VERSION}\". Cannot set OpenCV_RUNTIME")
endif()

View File

@ -9,9 +9,14 @@ set(HALIDE_ROOT_DIR "${HALIDE_ROOT_DIR}" CACHE PATH "Halide root directory")
if(NOT HAVE_HALIDE)
find_package(Halide QUIET) # Try CMake-based config files
if(Halide_FOUND)
set(HALIDE_INCLUDE_DIRS "${Halide_INCLUDE_DIRS}" CACHE PATH "Halide include directories" FORCE)
set(HALIDE_LIBRARIES "${Halide_LIBRARIES}" CACHE PATH "Halide libraries" FORCE)
set(HAVE_HALIDE TRUE)
if(TARGET Halide::Halide) # modern Halide scripts defines imported target
set(HALIDE_INCLUDE_DIRS "")
set(HALIDE_LIBRARIES "Halide::Halide")
set(HAVE_HALIDE TRUE)
else()
# using HALIDE_INCLUDE_DIRS / Halide_LIBRARIES
set(HAVE_HALIDE TRUE)
endif()
endif()
endif()
@ -28,18 +33,15 @@ if(NOT HAVE_HALIDE AND HALIDE_ROOT_DIR)
)
if(HALIDE_LIBRARY AND HALIDE_INCLUDE_DIR)
# TODO try_compile
set(HALIDE_INCLUDE_DIRS "${HALIDE_INCLUDE_DIR}" CACHE PATH "Halide include directories" FORCE)
set(HALIDE_LIBRARIES "${HALIDE_LIBRARY}" CACHE PATH "Halide libraries" FORCE)
set(HALIDE_INCLUDE_DIRS "${HALIDE_INCLUDE_DIR}")
set(HALIDE_LIBRARIES "${HALIDE_LIBRARY}")
set(HAVE_HALIDE TRUE)
endif()
if(NOT HAVE_HALIDE)
ocv_clear_vars(HALIDE_LIBRARIES HALIDE_INCLUDE_DIRS CACHE)
endif()
endif()
if(HAVE_HALIDE)
include_directories(${HALIDE_INCLUDE_DIRS})
if(HALIDE_INCLUDE_DIRS)
include_directories(${HALIDE_INCLUDE_DIRS})
endif()
list(APPEND OPENCV_LINKER_LIBS ${HALIDE_LIBRARIES})
else()
ocv_clear_vars(HALIDE_INCLUDE_DIRS HALIDE_LIBRARIES)
endif()

View File

@ -99,6 +99,20 @@ if(InferenceEngine_FOUND)
message(STATUS "Detected InferenceEngine: cmake package (${InferenceEngine_VERSION})")
endif()
if(DEFINED InferenceEngine_VERSION)
message(STATUS "InferenceEngine: ${InferenceEngine_VERSION}")
if(NOT INF_ENGINE_RELEASE AND NOT (InferenceEngine_VERSION VERSION_LESS "2021.4"))
math(EXPR INF_ENGINE_RELEASE_INIT "${InferenceEngine_VERSION_MAJOR} * 1000000 + ${InferenceEngine_VERSION_MINOR} * 10000 + ${InferenceEngine_VERSION_PATCH} * 100")
endif()
endif()
if(NOT INF_ENGINE_RELEASE AND NOT INF_ENGINE_RELEASE_INIT)
message(STATUS "WARNING: InferenceEngine version has not been set, 2021.4.1 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
set(INF_ENGINE_RELEASE_INIT "2021040100")
elseif(DEFINED INF_ENGINE_RELEASE)
set(INF_ENGINE_RELEASE_INIT "${INF_ENGINE_RELEASE}")
endif()
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE_INIT}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS)
find_path(ie_custom_inc "inference_engine.hpp" PATHS "${INF_ENGINE_INCLUDE_DIRS}" NO_DEFAULT_PATH)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
@ -134,16 +148,8 @@ endif()
# Add more features to the target
if(INF_ENGINE_TARGET)
if(InferenceEngine_VERSION VERSION_GREATER_EQUAL "2021.4")
math(EXPR INF_ENGINE_RELEASE "${InferenceEngine_VERSION_MAJOR} * 1000000 + ${InferenceEngine_VERSION_MINOR} * 10000 + ${InferenceEngine_VERSION_PATCH} * 100")
endif()
if(NOT INF_ENGINE_RELEASE)
message(WARNING "InferenceEngine version has not been set, 2021.3 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
set(INF_ENGINE_RELEASE "2021030000")
endif()
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
set_target_properties(${INF_ENGINE_TARGET} PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
)
endif()

View File

@ -1,34 +1,34 @@
# VTK 9.0
if(NOT VTK_FOUND)
find_package(VTK 9 QUIET NAMES vtk COMPONENTS
FiltersExtraction
FiltersSources
FiltersTexture
IOExport
IOGeometry
IOPLY
InteractionStyle
RenderingCore
RenderingLOD
RenderingOpenGL2
NO_MODULE)
endif()
# VTK 6.x components
if(NOT VTK_FOUND)
find_package(VTK QUIET COMPONENTS vtkInteractionStyle vtkRenderingLOD vtkIOPLY vtkFiltersTexture vtkRenderingFreeType vtkIOExport NO_MODULE)
IF(VTK_FOUND)
IF(VTK_RENDERING_BACKEND) #in vtk 7, the rendering backend is exported as a var.
find_package(VTK QUIET COMPONENTS vtkRendering${VTK_RENDERING_BACKEND} vtkInteractionStyle vtkRenderingLOD vtkIOPLY vtkFiltersTexture vtkRenderingFreeType vtkIOExport vtkIOGeometry NO_MODULE)
ELSE(VTK_RENDERING_BACKEND)
find_package(VTK QUIET COMPONENTS vtkRenderingOpenGL vtkInteractionStyle vtkRenderingLOD vtkIOPLY vtkFiltersTexture vtkRenderingFreeType vtkIOExport NO_MODULE)
ENDIF(VTK_RENDERING_BACKEND)
ENDIF(VTK_FOUND)
endif()
# VTK 5.x components
if(NOT VTK_FOUND)
find_package(VTK QUIET COMPONENTS vtkCommon NO_MODULE)
find_package(VTK QUIET NAMES vtk VTK)
if(VTK_FOUND)
if(NOT (VTK_VERSION VERSION_LESS "9.0.0") AND (VTK_VERSION VERSION_LESS "10.0.0")) # VTK 9.x
find_package(VTK 9 QUIET NAMES vtk COMPONENTS
FiltersExtraction
FiltersSources
FiltersTexture
IOExport
IOGeometry
IOPLY
InteractionStyle
RenderingCore
RenderingLOD
RenderingOpenGL2
NO_MODULE)
elseif(VTK_VERSION VERSION_GREATER "5") # VTK 6.x components
find_package(VTK QUIET COMPONENTS vtkInteractionStyle vtkRenderingLOD vtkIOPLY vtkFiltersTexture vtkRenderingFreeType vtkIOExport NO_MODULE)
IF(VTK_FOUND)
IF(VTK_RENDERING_BACKEND) #in vtk 7, the rendering backend is exported as a var.
find_package(VTK QUIET COMPONENTS vtkRendering${VTK_RENDERING_BACKEND} vtkInteractionStyle vtkRenderingLOD vtkIOPLY vtkFiltersTexture vtkRenderingFreeType vtkIOExport vtkIOGeometry NO_MODULE)
ELSE(VTK_RENDERING_BACKEND)
find_package(VTK QUIET COMPONENTS vtkRenderingOpenGL vtkInteractionStyle vtkRenderingLOD vtkIOPLY vtkFiltersTexture vtkRenderingFreeType vtkIOExport NO_MODULE)
ENDIF(VTK_RENDERING_BACKEND)
ENDIF(VTK_FOUND)
elseif(VTK_VERSION VERSION_EQUAL "5") # VTK 5.x components
find_package(VTK QUIET COMPONENTS vtkCommon NO_MODULE)
else()
set(VTK_FOUND FALSE)
endif()
endif()
endif()
if(NOT VTK_FOUND)
@ -44,32 +44,15 @@ if(VTK_VERSION VERSION_LESS "5.8.0")
endif()
# Different Qt versions can't be linked together
if(HAVE_QT5 AND VTK_VERSION VERSION_LESS "6.0.0")
if(VTK_USE_QT)
message(STATUS "VTK support is disabled. Incompatible combination: OpenCV + Qt5 and VTK ver.${VTK_VERSION} + Qt4")
endif()
endif()
# Different Qt versions can't be linked together. VTK 6.0.0 doesn't provide a way to get Qt version it was linked with
if(HAVE_QT5 AND VTK_VERSION VERSION_EQUAL "6.0.0" AND NOT DEFINED FORCE_VTK)
message(STATUS "VTK support is disabled. Possible incompatible combination: OpenCV+Qt5, and VTK ver.${VTK_VERSION} with Qt4")
message(STATUS "If it is known that VTK was compiled without Qt4, please define '-DFORCE_VTK=TRUE' flag in CMake")
if((HAVE_QT AND VTK_USE_QT)
AND NOT DEFINED FORCE_VTK # deprecated
AND NOT DEFINED OPENCV_FORCE_VTK
)
message(STATUS "VTK support is disabled. Possible incompatible combination: OpenCV+Qt, and VTK ver.${VTK_VERSION} with Qt")
message(STATUS "If it is known that VTK was compiled without Qt4, please define '-DOPENCV_FORCE_VTK=TRUE' flag in CMake")
return()
endif()
# Different Qt versions can't be linked together
if(HAVE_QT AND VTK_VERSION VERSION_GREATER "6.0.0" AND NOT ${VTK_QT_VERSION} STREQUAL "")
if(HAVE_QT5 AND ${VTK_QT_VERSION} EQUAL "4")
message(STATUS "VTK support is disabled. Incompatible combination: OpenCV + Qt5 and VTK ver.${VTK_VERSION} + Qt4")
return()
endif()
if(NOT HAVE_QT5 AND ${VTK_QT_VERSION} EQUAL "5")
message(STATUS "VTK support is disabled. Incompatible combination: OpenCV + Qt4 and VTK ver.${VTK_VERSION} + Qt5")
return()
endif()
endif()
try_compile(VTK_COMPILE_STATUS
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/vtk_test.cpp"

View File

@ -17,7 +17,7 @@ else()
endif()
# --- Concurrency ---
if(MSVC AND NOT HAVE_TBB)
if(MSVC AND NOT HAVE_TBB AND NOT OPENCV_DISABLE_THREAD_SUPPORT)
set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/concurrencytest.cpp")
file(WRITE "${_fname}" "#if _MSC_VER < 1600\n#error\n#endif\nint main() { return 0; }\n")
try_compile(HAVE_CONCURRENCY "${CMAKE_BINARY_DIR}" "${_fname}")

View File

@ -2,34 +2,52 @@
# Detect 3rd-party GUI libraries
# ----------------------------------------------------------------------------
#--- Win32 UI ---
ocv_clear_vars(HAVE_WIN32UI)
if(WITH_WIN32UI)
try_compile(HAVE_WIN32UI
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/win32uitest.cpp"
CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=user32;gdi32")
endif()
# --- QT4/5 ---
ocv_clear_vars(HAVE_QT HAVE_QT5)
if(WITH_QT)
if(NOT WITH_QT EQUAL 4)
find_package(Qt5 COMPONENTS Core Gui Widgets Test Concurrent REQUIRED NO_MODULE)
if(Qt5_FOUND)
set(HAVE_QT5 ON)
set(HAVE_QT ON)
find_package(Qt5 COMPONENTS OpenGL QUIET)
if(Qt5OpenGL_FOUND)
set(QT_QTOPENGL_FOUND ON)
endif()
endif()
endif()
if(NOT HAVE_QT)
find_package(Qt4 REQUIRED QtCore QtGui QtTest)
if(QT4_FOUND)
set(HAVE_QT TRUE)
macro(ocv_find_package_Qt4)
find_package(Qt4 COMPONENTS QtCore QtGui QtTest ${ARGN})
if(QT4_FOUND)
set(QT_FOUND 1)
ocv_assert(QT_VERSION_MAJOR EQUAL 4)
endif()
endmacro()
macro(ocv_find_package_Qt OCV_QT_VER)
find_package(Qt${OCV_QT_VER} COMPONENTS Core Gui Widgets Test Concurrent ${ARGN} NO_MODULE)
if(Qt${OCV_QT_VER}_FOUND)
set(QT_FOUND 1)
set(QT_VERSION "${Qt${OCV_QT_VER}_VERSION}")
set(QT_VERSION_MAJOR "${Qt${OCV_QT_VER}_VERSION_MAJOR}")
set(QT_VERSION_MINOR "${Qt${OCV_QT_VER}_VERSION_MINOR}")
set(QT_VERSION_PATCH "${Qt${OCV_QT_VER}_VERSION_PATCH}")
set(QT_VERSION_TWEAK "${Qt${OCV_QT_VER}_VERSION_TWEAK}")
set(QT_VERSION_COUNT "${Qt${OCV_QT_VER}_VERSION_COUNT}")
endif()
endmacro()
if(WITH_QT)
if(NOT WITH_QT GREATER 0)
# BUG: Qt5Config.cmake script can't handle components properly: find_package(QT NAMES Qt6 Qt5 REQUIRED NO_MODULE COMPONENTS Core Gui Widgets Test Concurrent)
ocv_find_package_Qt(6 QUIET)
if(NOT QT_FOUND)
ocv_find_package_Qt(5 QUIET)
endif()
if(NOT QT_FOUND)
ocv_find_package_Qt4(QUIET)
endif()
elseif(WITH_QT EQUAL 4)
ocv_find_package_Qt4(REQUIRED)
else() # WITH_QT=<major version>
ocv_find_package_Qt("${WITH_QT}" REQUIRED)
endif()
if(QT_FOUND)
set(HAVE_QT ON)
if(QT_VERSION_MAJOR GREATER 4)
find_package(Qt${QT_VERSION_MAJOR} COMPONENTS OpenGL QUIET)
if(Qt${QT_VERSION_MAJOR}OpenGL_FOUND)
set(QT_QTOPENGL_FOUND ON) # HAVE_QT_OPENGL is defined below
endif()
endif()
endif()
endif()

View File

@ -254,7 +254,7 @@ function(_glob_locations out_paths out_names)
list(LENGTH paths before)
get_filename_component(path "${path}" ABSOLUTE)
# Either module itself
if(NOT path STREQUAL CMAKE_CURRENT_SOURCE_DIR AND EXISTS "${path}/CMakeLists.txt")
if(NOT path STREQUAL "${OpenCV_SOURCE_DIR}/modules" AND EXISTS "${path}/CMakeLists.txt")
get_filename_component(name "${path}" NAME)
list(APPEND paths "${path}")
list(APPEND names "${name}")
@ -296,7 +296,7 @@ macro(_add_modules_1 paths names)
list(GET ${names} ${i} __name)
#message(STATUS "First pass: ${__name} => ${__path}")
include("${__path}/cmake/init.cmake" OPTIONAL)
add_subdirectory("${__path}" "${CMAKE_CURRENT_BINARY_DIR}/.firstpass/${__name}")
add_subdirectory("${__path}" "${OpenCV_BINARY_DIR}/modules/.firstpass/${__name}")
endforeach()
endif()
endmacro()
@ -316,7 +316,7 @@ macro(_add_modules_2)
endif()
string(REGEX REPLACE "^opencv_" "" name "${m}")
#message(STATUS "Second pass: ${name} => ${OPENCV_MODULE_${m}_LOCATION}")
add_subdirectory("${OPENCV_MODULE_${m}_LOCATION}" "${CMAKE_CURRENT_BINARY_DIR}/${name}")
add_subdirectory("${OPENCV_MODULE_${m}_LOCATION}" "${OpenCV_BINARY_DIR}/modules/${name}")
endif()
ocv_cmake_hook(POST_MODULES_CREATE_${the_module})
endforeach()
@ -369,7 +369,6 @@ macro(ocv_glob_modules main_root)
__ocv_resolve_dependencies()
# create modules
set(OPENCV_INITIAL_PASS OFF PARENT_SCOPE)
set(OPENCV_INITIAL_PASS OFF)
ocv_cmake_hook(PRE_MODULES_CREATE)
_add_modules_2(${OPENCV_MODULES_BUILD})
@ -377,6 +376,37 @@ macro(ocv_glob_modules main_root)
endmacro()
# called by root CMakeLists.txt
macro(ocv_register_modules)
if(NOT OPENCV_MODULES_PATH)
set(OPENCV_MODULES_PATH "${OpenCV_SOURCE_DIR}/modules")
endif()
ocv_glob_modules(${OPENCV_MODULES_PATH} ${OPENCV_EXTRA_MODULES_PATH})
# build lists of modules to be documented
set(OPENCV_MODULES_MAIN "")
set(OPENCV_MODULES_EXTRA "")
foreach(mod ${OPENCV_MODULES_BUILD} ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MODULES_DISABLED_AUTO} ${OPENCV_MODULES_DISABLED_FORCE})
string(REGEX REPLACE "^opencv_" "" mod "${mod}")
if("${OPENCV_MODULE_opencv_${mod}_LOCATION}" STREQUAL "${OpenCV_SOURCE_DIR}/modules/${mod}")
list(APPEND OPENCV_MODULES_MAIN ${mod})
else()
list(APPEND OPENCV_MODULES_EXTRA ${mod})
endif()
endforeach()
ocv_list_sort(OPENCV_MODULES_MAIN)
ocv_list_sort(OPENCV_MODULES_EXTRA)
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video 3d stereo features2d calib objdetect dnn ml flann photo stitching)
list(REMOVE_ITEM OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES})
set(OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES} ${OPENCV_MODULES_MAIN})
set(OPENCV_MODULES_MAIN ${OPENCV_MODULES_MAIN} CACHE INTERNAL "List of main modules" FORCE)
set(OPENCV_MODULES_EXTRA ${OPENCV_MODULES_EXTRA} CACHE INTERNAL "List of extra modules" FORCE)
endmacro()
# disables OpenCV module with missing dependencies
function(__ocv_module_turn_off the_module)
list(REMOVE_ITEM OPENCV_MODULES_DISABLED_AUTO "${the_module}")
@ -877,6 +907,7 @@ macro(ocv_create_module)
endmacro()
macro(_ocv_create_module)
add_definitions(-D__OPENCV_BUILD=1)
ocv_compiler_optimization_process_sources(OPENCV_MODULE_${the_module}_SOURCES OPENCV_MODULE_${the_module}_DEPS_EXT ${the_module})
set(__module_headers ${OPENCV_MODULE_${the_module}_HEADERS})

View File

@ -1481,8 +1481,8 @@ function(ocv_target_link_libraries target)
if(NOT LINK_PENDING STREQUAL "")
__ocv_push_target_link_libraries(${LINK_MODE} ${LINK_PENDING})
set(LINK_PENDING "")
set(LINK_MODE "${dep}")
endif()
set(LINK_MODE "${dep}")
else()
if(BUILD_opencv_world)
if(OPENCV_MODULE_${dep}_IS_PART_OF_WORLD)
@ -1619,6 +1619,18 @@ function(ocv_add_external_target name inc link def)
endif()
endfunction()
# Returns the first non-interface target
function(ocv_get_imported_target imported interface)
set(__result "${interface}")
get_target_property(__type "${__result}" TYPE)
if(__type STREQUAL "INTERFACE_LIBRARY")
get_target_property(__libs "${__result}" INTERFACE_LINK_LIBRARIES)
list(GET __libs 0 __interface)
ocv_get_imported_target(__result "${__interface}")
endif()
set(${imported} "${__result}" PARENT_SCOPE)
endfunction()
macro(ocv_get_libname var_name)
get_filename_component(__libname "${ARGN}" NAME)
@ -1999,3 +2011,10 @@ function(ocv_blob2hdr blob_filename hdr_filename cpp_variable)
file(WRITE "${hdr_filename}" "${source}")
endfunction()
#
# Include configuration override settings
#
include("${CMAKE_CURRENT_LIST_DIR}/vars/EnableModeVars.cmake")

View File

@ -9,7 +9,7 @@
int test()
{
const float src[] = { 0.0f, 0.0f, 0.0f, 0.0f };
vfloat32m1_t val = vle32_v_f32m1((const float*)(src));
vfloat32m1_t val = vle32_v_f32m1((const float*)(src), 4);
return (int)vfmv_f_s_f32m1_f32(val);
}
#else

View File

@ -137,6 +137,20 @@ elseif(MSVC)
set(OpenCV_RUNTIME vc14) # selecting previous compatible runtime version
endif()
endif()
elseif(MSVC_VERSION MATCHES "^193[0-9]$")
set(OpenCV_RUNTIME vc17)
check_one_config(has_VS2022)
if(NOT has_VS2022)
set(OpenCV_RUNTIME vc16)
check_one_config(has_VS2019)
if(NOT has_VS2019)
set(OpenCV_RUNTIME vc15) # selecting previous compatible runtime version
check_one_config(has_VS2017)
if(NOT has_VS2017)
set(OpenCV_RUNTIME vc14) # selecting previous compatible runtime version
endif()
endif()
endif()
endif()
elseif(MINGW)
set(OpenCV_RUNTIME mingw)

View File

@ -121,9 +121,6 @@
/* TIFF codec */
#cmakedefine HAVE_TIFF
/* Win32 UI */
#cmakedefine HAVE_WIN32UI
/* Define if your processor stores words with the most significant byte
first (like Motorola and SPARC, unlike Intel and VAX). */
#cmakedefine WORDS_BIGENDIAN

View File

@ -0,0 +1,21 @@
set(__OCV_MODE_VARS_DIR "${CMAKE_CURRENT_LIST_DIR}")
macro(ocv_change_mode_var)
set(__var "${ARGV0}")
set(__mode "${ARGV1}")
set(__value "${ARGV2}")
if(__mode STREQUAL "MODIFIED_ACCESS" AND __value)
if(NOT __applied_mode_${__var})
include("${__OCV_MODE_VARS_DIR}/${__var}.cmake")
set(__applied_mode_${__var} 1)
else()
#message("Mode is already applied: ${__var}")
endif()
endif()
endmacro()
variable_watch(OPENCV_DISABLE_THREAD_SUPPORT ocv_change_mode_var)
set(OPENCV_DISABLE_THREAD_SUPPORT "${OPENCV_DISABLE_THREAD_SUPPORT}")
variable_watch(OPENCV_SEMIHOSTING ocv_change_mode_var)
set(OPENCV_SEMIHOSTING "${OPENCV_SEMIHOSTING}")

View File

@ -0,0 +1,28 @@
# Force removal of code conditionally compiled with `#if
# HAVE_PTHREAD`.
ocv_update(HAVE_PTHREAD 0)
# There components are disabled because they require
# multi-threaded execution.
ocv_update(WITH_PROTOBUF OFF)
ocv_update(WITH_GSTREAMER OFF)
ocv_update(WITH_IPP OFF)
ocv_update(WITH_ITT OFF)
ocv_update(WITH_OPENCL OFF)
ocv_update(WITH_VA OFF)
ocv_update(WITH_VA_INTEL OFF)
# Disable bindings
ocv_update(BUILD_opencv_python2 OFF)
ocv_update(BUILD_opencv_python3 OFF)
ocv_update(BUILD_JAVA OFF)
ocv_update(BUILD_opencv_java OFF)
# These modules require `#include
# <[thread|mutex|condition_variable|future]>` and linkage into
# `libpthread` to work.
ocv_update(BUILD_opencv_objdetect OFF)
ocv_update(BUILD_opencv_gapi OFF)
ocv_update(BUILD_opencv_dnn OFF)
set(OPJ_USE_THREAD "OFF" CACHE INTERNAL "")

View File

@ -0,0 +1,10 @@
set(CV_TRACE OFF)
# These third parties libraries are incompatible with the semihosting
# toolchain.
set(WITH_JPEG OFF)
set(WITH_OPENEXR OFF)
set(WITH_TIFF OFF)
# Turn off `libpng` for some linking issues.
set(WITH_PNG OFF)

View File

@ -106,7 +106,7 @@ RECURSIVE = YES
EXCLUDE = @CMAKE_DOXYGEN_EXCLUDE_LIST@
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp *.m */opencl/runtime/* */legacy/* *_c.h @DOXYGEN_EXCLUDE_PATTERNS@
EXCLUDE_SYMBOLS = cv::DataType<*> cv::traits::* int void CV__* T __CV*
EXCLUDE_SYMBOLS = cv::DataType<*> cv::traits::* int void CV__* T __CV* cv::gapi::detail*
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = YES

View File

@ -1,6 +1,9 @@
Contour Features {#tutorial_js_contour_features}
================
@prev_tutorial{tutorial_js_contours_begin}
@next_tutorial{tutorial_js_contour_properties}
Goal
----

View File

@ -1,6 +1,9 @@
Contour Properties {#tutorial_js_contour_properties}
==================
@prev_tutorial{tutorial_js_contour_features}
@next_tutorial{tutorial_js_contours_more_functions}
Goal
----

View File

@ -1,6 +1,8 @@
Contours : Getting Started {#tutorial_js_contours_begin}
==========================
@next_tutorial{tutorial_js_contour_features}
Goal
----

View File

@ -1,6 +1,8 @@
Contours Hierarchy {#tutorial_js_contours_hierarchy}
==================
@prev_tutorial{tutorial_js_contours_more_functions}
Goal
----

View File

@ -1,6 +1,9 @@
Contours : More Functions {#tutorial_js_contours_more_functions}
=========================
@prev_tutorial{tutorial_js_contour_properties}
@next_tutorial{tutorial_js_contours_hierarchy}
Goal
----

View File

@ -6,13 +6,14 @@ python gen_pattern.py -o out.svg -r 11 -c 8 -T circles -s 20.0 -R 5.0 -u mm -w 2
-o, --output - output file (default out.svg)
-r, --rows - pattern rows (default 11)
-c, --columns - pattern columns (default 8)
-T, --type - type of pattern, circles, acircles, checkerboard (default circles)
-T, --type - type of pattern, circles, acircles, checkerboard, radon_checkerboard (default circles)
-s, --square_size - size of squares in pattern (default 20.0)
-R, --radius_rate - circles_radius = square_size/radius_rate (default 5.0)
-u, --units - mm, inches, px, m (default mm)
-w, --page_width - page width in units (default 216)
-h, --page_height - page height in units (default 279)
-a, --page_size - page size (default A4), supersedes -h -w arguments
-m, --markers - list of cells with markers for the radon checkerboard
-H, --help - show help
"""
@ -22,7 +23,7 @@ from svgfig import *
class PatternMaker:
def __init__(self, cols, rows, output, units, square_size, radius_rate, page_width, page_height):
def __init__(self, cols, rows, output, units, square_size, radius_rate, page_width, page_height, markers):
self.cols = cols
self.rows = rows
self.output = output
@ -31,6 +32,7 @@ class PatternMaker:
self.radius_rate = radius_rate
self.width = page_width
self.height = page_height
self.markers = markers
self.g = SVG("g") # the svg group container
def make_circles_pattern(self):
@ -70,6 +72,74 @@ class PatternMaker:
height=spacing, fill="black", stroke="none")
self.g.append(square)
@staticmethod
def _make_round_rect(x, y, diam, corners=("right", "right", "right", "right")):
rad = diam / 2
cw_point = ((0, 0), (diam, 0), (diam, diam), (0, diam))
mid_cw_point = ((0, rad), (rad, 0), (diam, rad), (rad, diam))
res_str = "M{},{} ".format(x + mid_cw_point[0][0], y + mid_cw_point[0][1])
n = len(cw_point)
for i in range(n):
if corners[i] == "right":
res_str += "L{},{} L{},{} ".format(x + cw_point[i][0], y + cw_point[i][1],
x + mid_cw_point[(i + 1) % n][0], y + mid_cw_point[(i + 1) % n][1])
elif corners[i] == "round":
res_str += "A{},{} 0,0,1 {},{} ".format(rad, rad, x + mid_cw_point[(i + 1) % n][0],
y + mid_cw_point[(i + 1) % n][1])
else:
raise TypeError("unknown corner type")
return res_str
def _get_type(self, x, y):
corners = ["right", "right", "right", "right"]
is_inside = True
if x == 0:
corners[0] = "round"
corners[3] = "round"
is_inside = False
if y == 0:
corners[0] = "round"
corners[1] = "round"
is_inside = False
if x == self.cols - 1:
corners[1] = "round"
corners[2] = "round"
is_inside = False
if y == self.rows - 1:
corners[2] = "round"
corners[3] = "round"
is_inside = False
return corners, is_inside
def make_radon_checkerboard_pattern(self):
spacing = self.square_size
xspacing = (self.width - self.cols * self.square_size) / 2.0
yspacing = (self.height - self.rows * self.square_size) / 2.0
for x in range(0, self.cols):
for y in range(0, self.rows):
if x % 2 == y % 2:
corner_types, is_inside = self._get_type(x, y)
if is_inside:
square = SVG("rect", x=x * spacing + xspacing, y=y * spacing + yspacing, width=spacing,
height=spacing, fill="black", stroke="none")
else:
square = SVG("path", d=self._make_round_rect(x * spacing + xspacing, y * spacing + yspacing,
spacing, corner_types), fill="black", stroke="none")
self.g.append(square)
if self.markers is not None:
r = self.square_size * 0.17
pattern_width = ((self.cols - 1.0) * spacing) + (2.0 * r)
pattern_height = ((self.rows - 1.0) * spacing) + (2.0 * r)
x_spacing = (self.width - pattern_width) / 2.0
y_spacing = (self.height - pattern_height) / 2.0
for x, y in self.markers:
color = "black"
if x % 2 == y % 2:
color = "white"
dot = SVG("circle", cx=(x * spacing) + x_spacing + r,
cy=(y * spacing) + y_spacing + r, r=r, fill=color, stroke="none")
self.g.append(dot)
def save(self):
c = canvas(self.g, width="%d%s" % (self.width, self.units), height="%d%s" % (self.height, self.units),
viewBox="0 0 %d %d" % (self.width, self.height))
@ -85,7 +155,7 @@ def main():
type=int)
parser.add_argument("-r", "--rows", help="pattern rows", default="11", action="store", dest="rows", type=int)
parser.add_argument("-T", "--type", help="type of pattern", default="circles", action="store", dest="p_type",
choices=["circles", "acircles", "checkerboard"])
choices=["circles", "acircles", "checkerboard", "radon_checkerboard"])
parser.add_argument("-u", "--units", help="length unit", default="mm", action="store", dest="units",
choices=["mm", "inches", "px", "m"])
parser.add_argument("-s", "--square_size", help="size of squares in pattern", default="20.0", action="store",
@ -96,8 +166,12 @@ def main():
dest="page_width", type=float)
parser.add_argument("-h", "--page_height", help="page height in units", default=argparse.SUPPRESS, action="store",
dest="page_height", type=float)
parser.add_argument("-a", "--page_size", help="page size, superseded if -h and -w are set", default="A4", action="store",
dest="page_size", choices=["A0", "A1", "A2", "A3", "A4", "A5"])
parser.add_argument("-a", "--page_size", help="page size, superseded if -h and -w are set", default="A4",
action="store", dest="page_size", choices=["A0", "A1", "A2", "A3", "A4", "A5"])
parser.add_argument("-m", "--markers", help="list of cells with markers for the radon checkerboard. Marker "
"coordinates as list of numbers: -m 1 2 3 4 means markers in cells "
"[1, 2] and [3, 4]",
action="store", dest="markers", nargs="+", type=int)
args = parser.parse_args()
show_help = args.show_help
@ -121,10 +195,19 @@ def main():
"A5": [148, 210]}
page_width = page_sizes[page_size][0]
page_height = page_sizes[page_size][1]
pm = PatternMaker(columns, rows, output, units, square_size, radius_rate, page_width, page_height)
if len(args.markers) % 2 == 1:
raise ValueError("The length of the markers array={} must be even".format(len(args.markers)))
markers = set()
for x, y in zip(args.markers[::2], args.markers[1::2]):
if x in range(0, columns) and y in range(0, rows):
markers.add((x, y))
else:
raise ValueError("The marker {},{} is outside the checkerboard".format(x, y))
pm = PatternMaker(columns, rows, output, units, square_size, radius_rate, page_width, page_height, markers)
# dict for easy lookup of pattern type
mp = {"circles": pm.make_circles_pattern, "acircles": pm.make_acircles_pattern,
"checkerboard": pm.make_checkerboard_pattern}
"checkerboard": pm.make_checkerboard_pattern, "radon_checkerboard": pm.make_radon_checkerboard_pattern}
mp[p_type]()
# this should save pattern to output
pm.save()

View File

@ -60,6 +60,14 @@ of C++.
So this is the basic version of how OpenCV-Python bindings are generated.
@note There is no 1:1 mapping of numpy.ndarray on cv::Mat. For example, cv::Mat has channels field,
which is emulated as last dimension of numpy.ndarray and implicitly converted.
However, such implicit conversion has problem with passing of 3D numpy arrays into C++ code
(the last dimension is implicitly reinterpreted as number of channels).
Refer to the [issue](https://github.com/opencv/opencv/issues/19091) for workarounds if you need to process 3D arrays or ND-arrays with channels.
OpenCV 4.5.4+ has `cv.Mat` wrapper derived from `numpy.ndarray` to explicitly handle the channels behavior.
How to extend new modules to Python?
------------------------------------

View File

@ -98,7 +98,7 @@ import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('simple.jpg',0)
img = cv.imread('blox.jpg',0) # `<opencv_root>/samples/data/blox.jpg`
# Initiate FAST object with default values
fast = cv.FastFeatureDetector_create()
@ -113,17 +113,17 @@ print( "nonmaxSuppression:{}".format(fast.getNonmaxSuppression()) )
print( "neighborhood: {}".format(fast.getType()) )
print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )
cv.imwrite('fast_true.png',img2)
cv.imwrite('fast_true.png', img2)
# Disable nonmaxSuppression
fast.setNonmaxSuppression(0)
kp = fast.detect(img,None)
kp = fast.detect(img, None)
print( "Total Keypoints without nonmaxSuppression: {}".format(len(kp)) )
img3 = cv.drawKeypoints(img, kp, None, color=(255,0,0))
cv.imwrite('fast_false.png',img3)
cv.imwrite('fast_false.png', img3)
@endcode
See the results. First image shows FAST with nonmaxSuppression and second one without
nonmaxSuppression:

View File

@ -74,7 +74,7 @@ Canny Edge Detection in OpenCV
OpenCV puts all the above in single function, **cv.Canny()**. We will see how to use it. First
argument is our input image. Second and third arguments are our minVal and maxVal respectively.
Third argument is aperture_size. It is the size of Sobel kernel used for find image gradients. By
Fourth argument is aperture_size. It is the size of Sobel kernel used for find image gradients. By
default it is 3. Last argument is L2gradient which specifies the equation for finding gradient
magnitude. If it is True, it uses the equation mentioned above which is more accurate, otherwise it
uses this function: \f$Edge\_Gradient \; (G) = |G_x| + |G_y|\f$. By default, it is False.

View File

@ -1,6 +1,9 @@
Contour Features {#tutorial_py_contour_features}
================
@prev_tutorial{tutorial_py_contours_begin}
@next_tutorial{tutorial_py_contour_properties}
Goal
----
@ -91,7 +94,7 @@ convexity defects, which are the local maximum deviations of hull from contours.
There is a little bit things to discuss about it its syntax:
@code{.py}
hull = cv.convexHull(points[, hull[, clockwise[, returnPoints]]
hull = cv.convexHull(points[, hull[, clockwise[, returnPoints]]])
@endcode
Arguments details:

View File

@ -1,6 +1,9 @@
Contour Properties {#tutorial_py_contour_properties}
==================
@prev_tutorial{tutorial_py_contour_features}
@next_tutorial{tutorial_py_contours_more_functions}
Here we will learn to extract some frequently used properties of objects like Solidity, Equivalent
Diameter, Mask image, Mean Intensity etc. More features can be found at [Matlab regionprops
documentation](http://www.mathworks.in/help/images/ref/regionprops.html).

View File

@ -1,6 +1,8 @@
Contours : Getting Started {#tutorial_py_contours_begin}
==========================
@next_tutorial{tutorial_py_contour_features}
Goal
----

View File

@ -1,6 +1,8 @@
Contours Hierarchy {#tutorial_py_contours_hierarchy}
==================
@prev_tutorial{tutorial_py_contours_more_functions}
Goal
----

View File

@ -1,6 +1,10 @@
Contours : More Functions {#tutorial_py_contours_more_functions}
=========================
@prev_tutorial{tutorial_py_contour_properties}
@next_tutorial{tutorial_py_contours_hierarchy}
Goal
----

View File

@ -117,7 +117,7 @@ for i in range(5,0,-1):
LS = []
for la,lb in zip(lpA,lpB):
rows,cols,dpt = la.shape
ls = np.hstack((la[:,0:cols/2], lb[:,cols/2:]))
ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))
LS.append(ls)
# now reconstruct
@ -127,7 +127,7 @@ for i in range(1,6):
ls_ = cv.add(ls_, LS[i])
# image with direct connecting each half
real = np.hstack((A[:,:cols/2],B[:,cols/2:]))
real = np.hstack((A[:,:cols//2],B[:,cols//2:]))
cv.imwrite('Pyramid_blending2.jpg',ls_)
cv.imwrite('Direct_blending.jpg',real)

View File

@ -188,7 +188,7 @@ implementation below.
This will return a similarity index for each channel of the image. This value is between zero and
one, where one corresponds to perfect fit. Unfortunately, the many Gaussian blurring is quite
costly, so while the PSNR may work in a real time like environment (24 frame per second) this will
costly, so while the PSNR may work in a real time like environment (24 frames per second) this will
take significantly more than to accomplish similar performance results.
Therefore, the source code presented at the start of the tutorial will perform the PSNR measurement

View File

@ -36,6 +36,10 @@ create a circle board pattern in file acircleboard.svg with 7 rows, 5 columns an
python gen_pattern.py -o acircleboard.svg --rows 7 --columns 5 --type acircles --square_size 10 --radius_rate 2
create a radon checkerboard for findChessboardCornersSB() with markers in (7 4), (7 5), (8 5) cells:
python gen_pattern.py -o radon_checkerboard.svg --rows 10 --columns 15 --type radon_checkerboard -s 12.1 -m 7 4 7 5 8 5
If you want to change unit use -u option (mm inches, px, m)
If you want to change page size use -w and -h options

View File

@ -4,7 +4,7 @@ File Input and Output using XML and YAML files {#tutorial_file_input_output_with
@tableofcontents
@prev_tutorial{tutorial_discrete_fourier_transform}
@next_tutorial{tutorial_how_to_use_OpenCV_parallel_for_}
@next_tutorial{tutorial_how_to_use_OpenCV_parallel_for_new}
| | |
| -: | :- |

View File

@ -0,0 +1,166 @@
How to use the OpenCV parallel_for_ to parallelize your code {#tutorial_how_to_use_OpenCV_parallel_for_new}
==================================================================
@tableofcontents
@prev_tutorial{tutorial_file_input_output_with_xml_yml}
@next_tutorial{tutorial_univ_intrin}
| | |
| -: | :- |
| Compatibility | OpenCV >= 3.0 |
Goal
----
The goal of this tutorial is to demonstrate the use of the OpenCV `parallel_for_` framework to easily parallelize your code. To illustrate the concept, we will write a program to perform convolution operation over an image.
The full tutorial code is [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_new.cpp).
Precondition
----
### Parallel Frameworks
The first precondition is to have OpenCV built with a parallel framework.
In OpenCV 4.5, the following parallel frameworks are available in that order:
* Intel Threading Building Blocks (3rdparty library, should be explicitly enabled)
* OpenMP (integrated to compiler, should be explicitly enabled)
* APPLE GCD (system wide, used automatically (APPLE only))
* Windows RT concurrency (system wide, used automatically (Windows RT only))
* Windows concurrency (part of runtime, used automatically (Windows only - MSVC++ >= 10))
* Pthreads
As you can see, several parallel frameworks can be used in the OpenCV library. Some parallel libraries are third party libraries and have to be explicitly enabled in CMake before building, while others are automatically available with the platform (e.g. APPLE GCD).
### Race Conditions
Race conditions occur when more than one thread try to write *or* read and write to a particular memory location simultaneously.
Based on that, we can broadly classify algorithms into two categories:-
1. Algorithms in which only a single thread writes data to a particular memory location.
* In *convolution*, for example, even though multiple threads may read from a pixel at a particular time, only a single thread *writes* to a particular pixel.
2. Algorithms in which multiple threads may write to a single memory location.
* Finding contours, features, etc. Such algorithms may require each thread to add data to a global variable simultaneously. For example, when detecting features, each thread will add features of their respective parts of the image to a common vector, thus creating a race condition.
Convolution
-----------
We will use the example of performing a convolution to demonstrate the use of `parallel_for_` to parallelize the computation. This is an example of an algorithm which does not lead to a race condition.
Theory
------
Convolution is a simple mathematical operation widely used in image processing. Here, we slide a smaller matrix, called the *kernel*, over an image and a sum of the product of pixel values and corresponding values in the kernel gives us the value of the particular pixel in the output (called the anchor point of the kernel). Based on the values in the kernel, we get different results.
In the example below, we use a 3x3 kernel (anchored at its center) and convolve over a 5x5 matrix to produce a 3x3 matrix. The size of the output can be altered by padding the input with suitable values.
![Convolution Animation](images/convolution-example-matrix.gif)
For more information about different kernels and what they do, look [here](https://en.wikipedia.org/wiki/Kernel_(image_processing))
For the purpose of this tutorial, we will implement the simplest form of the function which takes a grayscale image (1 channel) and an odd length square kernel and produces an output image.
The operation will not be performed in-place.
@note We can store a few of the relevant pixels temporarily to make sure we use the original values during the convolution and then do it in-place. However, the purpose of this tutorial is to introduce parallel_for_ function and an inplace implementation may be too complicated.
Pseudocode
-----------
InputImage src, OutputImage dst, kernel(size n)
makeborder(src, n/2)
for each pixel (i, j) strictly inside borders, do:
{
value := 0
for k := -n/2 to n/2, do:
for l := -n/2 to n/2, do:
value += kernel[n/2 + k][n/2 + l]*src[i + k][j + l]
dst[i][j] := value
}
For an *n-sized kernel*, we will add a border of size *n/2* to handle edge cases.
We then run two loops to move along the kernel and add the products to sum
Implementation
--------------
### Sequential implementation
@snippet how_to_use_OpenCV_parallel_for_new.cpp convolution-sequential
We first make an output matrix(dst) with the same size as src and add borders to the src image(to handle edge cases).
@snippet how_to_use_OpenCV_parallel_for_new.cpp convolution-make-borders
We then sequentially iterate over the pixels in the src image and compute the value over the kernel and the neighbouring pixel values.
We then fill value to the corresponding pixel in the dst image.
@snippet how_to_use_OpenCV_parallel_for_new.cpp convolution-kernel-loop
### Parallel implementation
When looking at the sequential implementation, we can notice that each pixel depends on multiple neighbouring pixels but only one pixel is edited at a time. Thus, to optimize the computation, we can split the image into stripes and parallely perform convolution on each, by exploiting the multi-core architecture of modern processor. The OpenCV @ref cv::parallel_for_ framework automatically decides how to split the computation efficiently and does most of the work for us.
@note Although values of a pixel in a particular stripe may depend on pixel values outside the stripe, these are only read only operations and hence will not cause undefined behaviour.
We first declare a custom class that inherits from @ref cv::ParallelLoopBody and override the `virtual void operator ()(const cv::Range& range) const`.
@snippet how_to_use_OpenCV_parallel_for_new.cpp convolution-parallel
The range in the `operator ()` represents the subset of values that will be treated by an individual thread. Based on the requirement, there may be different ways of splitting the range which in turn changes the computation.
For example, we can either
1. Split the entire traversal of the image and obtain the [row, col] coordinate in the following way (as shown in the above code):
@snippet how_to_use_OpenCV_parallel_for_new.cpp overload-full
We would then call the parallel_for_ function in the following way:
@snippet how_to_use_OpenCV_parallel_for_new.cpp convolution-parallel-function
<br>
2. Split the rows and compute for each row:
@snippet how_to_use_OpenCV_parallel_for_new.cpp overload-row-split
In this case, we call the parallel_for_ function with a different range:
@snippet how_to_use_OpenCV_parallel_for_new.cpp convolution-parallel-function-row
@note In our case, both implementations perform similarly. Some cases may allow better memory access patterns or other performance benefits.
To set the number of threads, you can use: @ref cv::setNumThreads. You can also specify the number of splitting using the nstripes parameter in @ref cv::parallel_for_. For instance, if your processor has 4 threads, setting `cv::setNumThreads(2)` or setting `nstripes=2` should be the same as by default it will use all the processor threads available but will split the workload only on two threads.
@note C++ 11 standard allows to simplify the parallel implementation by get rid of the `parallelConvolution` class and replacing it with lambda expression:
@snippet how_to_use_OpenCV_parallel_for_new.cpp convolution-parallel-cxx11
Results
-----------
The resulting time taken for execution of the two implementations on a
* *512x512 input* with a *5x5 kernel*:
This program shows how to use the OpenCV parallel_for_ function and
compares the performance of the sequential and parallel implementations for a
convolution operation
Usage:
./a.out [image_path -- default lena.jpg]
Sequential Implementation: 0.0953564s
Parallel Implementation: 0.0246762s
Parallel Implementation(Row Split): 0.0248722s
<br>
* *512x512 input with a 3x3 kernel*
This program shows how to use the OpenCV parallel_for_ function and
compares the performance of the sequential and parallel implementations for a
convolution operation
Usage:
./a.out [image_path -- default lena.jpg]
Sequential Implementation: 0.0301325s
Parallel Implementation: 0.0117053s
Parallel Implementation(Row Split): 0.0117894s
The performance of the parallel implementation depends on the type of CPU you have. For instance, on 4 cores - 8 threads CPU, runtime may be 6x to 7x faster than a sequential implementation. There are many factors to explain why we do not achieve a speed-up of 8x:
* the overhead to create and manage the threads,
* background processes running in parallel,
* the difference between 4 hardware cores with 2 logical threads for each core and 8 hardware cores.
In the tutorial, we used a horizontal gradient filter(as shown in the animation above), which produces an image highlighting the vertical edges.
![result image](images/resimg.jpg)

Binary file not shown.

After

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

View File

@ -91,8 +91,8 @@ a new header with the new boundaries:
Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle
Mat E = A(Range::all(), Range(1,3)); // using row and column boundaries
@endcode
Now you may ask -- if the matrix itself may belong to multiple *Mat* objects who takes responsibility
for cleaning it up when it's no longer needed. The short answer is: the last object that used it.
Now you may ask -- if the matrix itself may belong to multiple *Mat* objects, who takes responsibility
for cleaning it up when it's no longer needed? The short answer is: the last object that used it.
This is handled by using a reference counting mechanism. Whenever somebody copies a header of a
*Mat* object, a counter is increased for the matrix. Whenever a header is cleaned, this counter
is decreased. When the counter reaches zero the matrix is freed. Sometimes you will want to copy
@ -102,12 +102,12 @@ Mat F = A.clone();
Mat G;
A.copyTo(G);
@endcode
Now modifying *F* or *G* will not affect the matrix pointed by the *A*'s header. What you need to
Now modifying *F* or *G* will not affect the matrix pointed to by the *A*'s header. What you need to
remember from all this is that:
- Output image allocation for OpenCV functions is automatic (unless specified otherwise).
- You do not need to think about memory management with OpenCV's C++ interface.
- The assignment operator and the copy constructor only copies the header.
- The assignment operator and the copy constructor only copy the header.
- The underlying matrix of an image may be copied using the @ref cv::Mat::clone() and @ref cv::Mat::copyTo()
functions.
@ -122,10 +122,10 @@ of these allows us to create many shades of gray.
For *colorful* ways we have a lot more methods to choose from. Each of them breaks it down to three
or four basic components and we can use the combination of these to create the others. The most
popular one is RGB, mainly because this is also how our eye builds up colors. Its base colors are
red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is
red, green and blue. To code the transparency of a color sometimes a fourth element, alpha (A), is
added.
There are, however, many other color systems each with their own advantages:
There are, however, many other color systems, each with their own advantages:
- RGB is the most common as our eyes use something similar, however keep in mind that OpenCV standard display
system composes colors using the BGR color space (red and blue channels are swapped places).
@ -139,11 +139,11 @@ There are, however, many other color systems each with their own advantages:
Each of the building components has its own valid domains. This leads to the data type used. How
we store a component defines the control we have over its domain. The smallest data type possible is
*char*, which means one byte or 8 bits. This may be unsigned (so can store values from 0 to 255) or
signed (values from -127 to +127). Although in case of three components this already gives 16
million possible colors to represent (like in case of RGB) we may acquire an even finer control by
signed (values from -127 to +127). Although this width, in the case of three components (like RGB), already gives 16
million possible colors to represent, we may acquire an even finer control by
using the float (4 byte = 32 bit) or double (8 byte = 64 bit) data types for each component.
Nevertheless, remember that increasing the size of a component also increases the size of the whole
picture in the memory.
picture in memory.
Creating a Mat object explicitly
----------------------------------

View File

@ -9,4 +9,5 @@ The Core Functionality (core module) {#tutorial_table_of_content_core}
- @subpage tutorial_basic_linear_transform
- @subpage tutorial_discrete_fourier_transform
- @subpage tutorial_file_input_output_with_xml_yml
- @subpage tutorial_how_to_use_OpenCV_parallel_for_
- @subpage tutorial_how_to_use_OpenCV_parallel_for_new
- @subpage tutorial_univ_intrin

View File

@ -0,0 +1,334 @@
Vectorizing your code using Universal Intrinsics {#tutorial_univ_intrin}
==================================================================
@tableofcontents
@prev_tutorial{tutorial_how_to_use_OpenCV_parallel_for_new}
| | |
| -: | :- |
| Compatibility | OpenCV >= 3.0 |
Goal
----
The goal of this tutorial is to provide a guide to using the @ref core_hal_intrin feature to vectorize your C++ code for a faster runtime.
We'll briefly look into _SIMD intrinsics_ and how to work with wide _registers_, followed by a tutorial on the basic operations using wide registers.
Theory
------
In this section, we will briefly look into a few concepts to better help understand the functionality.
### Intrinsics
Intrinsics are functions which are separately handled by the compiler. These functions are often optimized to perform in the most efficient ways possible and hence run faster than normal implementations. However, since these functions depend on the compiler, it makes it difficult to write portable applications.
### SIMD
SIMD stands for **Single Instruction, Multiple Data**. SIMD Intrinsics allow the processor to vectorize calculations. The data is stored in what are known as *registers*. A *register* may be *128-bits*, *256-bits* or *512-bits* wide. Each *register* stores **multiple values** of the **same data type**. The size of the register and the size of each value determines the number of values stored in total.
Depending on what *Instruction Sets* your CPU supports, you may be able to use the different registers. To learn more, look [here](https://en.wikipedia.org/wiki/Instruction_set_architecture)
Universal Intrinsics
--------------------
OpenCVs universal intrinsics provides an abstraction to SIMD vectorization methods and allows the user to use intrinsics without the need to write system specific code.
OpenCV Universal Intrinsics support the following instruction sets:
* *128 bit* registers of various types support is implemented for a wide range of architectures including
* x86(SSE/SSE2/SSE4.2),
* ARM(NEON),
* PowerPC(VSX),
* MIPS(MSA).
* *256 bit* registers are supported on x86(AVX2) and
* *512 bit* registers are supported on x86(AVX512)
**We will now introduce the available structures and functions:**
* Register structures
* Load and store
* Mathematical Operations
* Reduce and Mask
### Register Structures
The Universal Intrinsics set implements every register as a structure based on the particular SIMD register.
All types contain the `nlanes` enumeration which gives the exact number of values that the type can hold. This eliminates the need to hardcode the number of values during implementations.
@note Each register structure is under the `cv` namespace.
There are **two types** of registers:
* **Variable sized registers**: These structures do not have a fixed size and their exact bit length is deduced during compilation, based on the available SIMD capabilities. Consequently, the value of the `nlanes` enum is determined in compile time.
<br>
Each structure follows the following convention:
v_[type of value][size of each value in bits]
For instance, **v_uint8 holds 8-bit unsigned integers** and **v_float32 holds 32-bit floating point values**. We then declare a register like we would declare any object in C++
Based on the available SIMD instruction set, a particular register will hold different number of values.
For example: If your computer supports a maximum of 256bit registers,
* *v_uint8* will hold 32 8-bit unsigned integers
* *v_float64* will hold 4 64-bit floats (doubles)
v_uint8 a; // a is a register supporting uint8(char) data
int n = a.nlanes; // n holds 32
Available data type and sizes:
|Type|Size in bits|
|-:|:-|
|uint| 8, 16, 32, 64|
|int | 8, 16, 32, 64|
|float | 32, 64|
* **Constant sized registers**: These structures have a fixed bit size and hold a constant number of values. We need to know what SIMD instruction set is supported by the system and select compatible registers. Use these only if exact bit length is necessary.
<br>
Each structure follows the convention:
v_[type of value][size of each value in bits]x[number of values]
Suppose we want to store
* 32-bit(*size in bits*) signed integers in a **128 bit register**. Since the register size is already known, we can find out the *number of data points in register* (*128/32 = 4*):
v_int32x8 reg1 // holds 8 32-bit signed integers.
* 64-bit floats in 512 bit register:
v_float64x8 reg2 // reg2.nlanes = 8
### Load and Store operations
Now that we know how registers work, let us look at the functions used for filling these registers with values.
* **Load**: Load functions allow you to *load* values into a register.
* *Constructors* - When declaring a register structure, we can either provide a memory address from where the register will pick up contiguous values, or provide the values explicitly as multiple arguments (Explicit multiple arguments is available only for Constant Sized Registers):
float ptr[32] = {1, 2, 3 ..., 32}; // ptr is a pointer to a contiguous memory block of 32 floats
// Variable Sized Registers //
int x = v_float32().nlanes; // set x as the number of values the register can hold
v_float32 reg1(ptr); // reg1 stores first x values according to the maximum register size available.
v_float32 reg2(ptr + x); // reg stores the next x values
// Constant Sized Registers //
v_float32x4 reg1(ptr); // reg1 stores the first 4 floats (1, 2, 3, 4)
v_float32x4 reg2(ptr + 4); // reg2 stores the next 4 floats (5, 6, 7, 8)
// Or we can explicitly write down the values.
v_float32x4(1, 2, 3, 4);
<br>
* *Load Function* - We can use the load method and provide the memory address of the data:
float ptr[32] = {1, 2, 3, ..., 32};
v_float32 reg_var;
reg_var = vx_load(ptr); // loads values from ptr[0] upto ptr[reg_var.nlanes - 1]
v_float32x4 reg_128;
reg_128 = v_load(ptr); // loads values from ptr[0] upto ptr[3]
v_float32x8 reg_256;
reg_256 = v256_load(ptr); // loads values from ptr[0] upto ptr[7]
v_float32x16 reg_512;
reg_512 = v512_load(ptr); // loads values from ptr[0] upto ptr[15]
@note The load function assumes data is unaligned. If your data is aligned, you may use the `vx_load_aligned()` function.
<br>
* **Store**: Store functions allow you to *store* the values from a register into a particular memory location.
* To store values from a register into a memory location, you may use the *v_store()* function:
float ptr[4];
v_store(ptr, reg); // store the first 128 bits(interpreted as 4x32-bit floats) of reg into ptr.
<br>
@note Ensure **ptr** has the same type as register. You can also cast the register into the proper type before carrying out operations. Simply typecasting the pointer to a particular type will lead wrong interpretation of data.
### Binary and Unary Operators
The universal intrinsics set provides element wise binary and unary operations.
* **Arithmetics**: We can add, subtract, multiply and divide two registers element-wise. The registers must be of the same width and hold the same type. To multiply two registers, for example:
v_float32 a, b; // {a1, ..., an}, {b1, ..., bn}
v_float32 c;
c = a + b // {a1 + b1, ..., an + bn}
c = a * b; // {a1 * b1, ..., an * bn}
<br>
* **Bitwise Logic and Shifts**: We can left shift or right shift the bits of each element of the register. We can also apply bitwise &, |, ^ and ~ operators between two registers element-wise:
v_int32 as; // {a1, ..., an}
v_int32 al = as << 2; // {a1 << 2, ..., an << 2}
v_int32 bl = as >> 2; // {a1 >> 2, ..., an >> 2}
v_int32 a, b;
v_int32 a_and_b = a & b; // {a1 & b1, ..., an & bn}
<br>
* **Comparison Operators**: We can compare values between two registers using the <, >, <= , >=, == and != operators. Since each register contains multiple values, we don't get a single bool for these operations. Instead, for true values, all bits are converted to one (0xff for 8 bits, 0xffff for 16 bits, etc), while false values return bits converted to zero.
// let us consider the following code is run in a 128-bit register
v_uint8 a; // a = {0, 1, 2, ..., 15}
v_uint8 b; // b = {15, 14, 13, ..., 0}
v_uint8 c = a < b;
/*
let us look at the first 4 values in binary
a = |00000000|00000001|00000010|00000011|
b = |00001111|00001110|00001101|00001100|
c = |11111111|11111111|11111111|11111111|
If we store the values of c and print them as integers, we will get 255 for true values and 0 for false values.
*/
---
// In a computer supporting 256-bit registers
v_int32 a; // a = {1, 2, 3, 4, 5, 6, 7, 8}
v_int32 b; // b = {8, 7, 6, 5, 4, 3, 2, 1}
v_int32 c = (a < b); // c = {-1, -1, -1, -1, 0, 0, 0, 0}
/*
The true values are 0xffffffff, which in signed 32-bit integer representation is equal to -1.
*/
<br>
* **Min/Max operations**: We can use the *v_min()* and *v_max()* functions to return registers containing element-wise min, or max, of the two registers:
v_int32 a; // {a1, ..., an}
v_int32 b; // {b1, ..., bn}
v_int32 mn = v_min(a, b); // {min(a1, b1), ..., min(an, bn)}
v_int32 mx = v_max(a, b); // {max(a1, b1), ..., max(an, bn)}
<br>
@note Comparison and Min/Max operators are not available for 64 bit integers. Bitwise shift and logic operators are available only for integer values. Bitwise shift is available only for 16, 32 and 64 bit registers.
### Reduce and Mask
* **Reduce Operations**: The *v_reduce_min()*, *v_reduce_max()* and *v_reduce_sum()* return a single value denoting the min, max or sum of the entire register:
v_int32 a; // a = {a1, ..., a4}
int mn = v_reduce_min(a); // mn = min(a1, ..., an)
int sum = v_reduce_sum(a); // sum = a1 + ... + an
<br>
* **Mask Operations**: Mask operations allow us to replicate conditionals in wide registers. These include:
* *v_check_all()* - Returns a bool, which is true if all the values in the register are less than zero.
* *v_check_any()* - Returns a bool, which is true if any value in the register is less than zero.
* *v_select()* - Returns a register, which blends two registers, based on a mask.
v_uint8 a; // {a1, .., an}
v_uint8 b; // {b1, ..., bn}
v_int32x4 mask: // {0xff, 0, 0, 0xff, ..., 0xff, 0}
v_uint8 Res = v_select(mask, a, b) // {a1, b2, b3, a4, ..., an-1, bn}
/*
"Res" will contain the value from "a" if mask is true (all bits set to 1),
and value from "b" if mask is false (all bits set to 0)
We can use comparison operators to generate mask and v_select to obtain results based on conditionals.
It is common to set all values of b to 0. Thus, v_select will give values of "a" or 0 based on the mask.
*/
## Demonstration
In the following section, we will vectorize a simple convolution function for single channel and compare the results to a scalar implementation.
@note Not all algorithms are improved by manual vectorization. In fact, in certain cases, the compiler may *autovectorize* the code, thus producing faster results for scalar implementations.
You may learn more about convolution from the previous tutorial. We use the same naive implementation from the previous tutorial and compare it to the vectorized version.
The full tutorial code is [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/univ_intrin/univ_intrin.cpp).
### Vectorizing Convolution
We will first implement a 1-D convolution and then vectorize it. The 2-D vectorized convolution will perform 1-D convolution across the rows to produce the correct results.
#### 1-D Convolution: Scalar
@snippet univ_intrin.cpp convolution-1D-scalar
1. We first set up variables and make a border on both sides of the src matrix, to take care of edge cases.
@snippet univ_intrin.cpp convolution-1D-border
2. For the main loop, we select an index *i* and offset it on both sides along with the kernel, using the k variable. We store the value in *value* and add it to the *dst* matrix.
@snippet univ_intrin.cpp convolution-1D-scalar-main
#### 1-D Convolution: Vector
We will now look at the vectorized version of 1-D convolution.
@snippet univ_intrin.cpp convolution-1D-vector
1. In our case, the kernel is a float. Since the kernel's datatype is the largest, we convert src to float32, forming *src_32*. We also make a border like we did for the naive case.
@snippet univ_intrin.cpp convolution-1D-convert
2. Now, for each column in the *kernel*, we calculate the scalar product of the value with all *window* vectors of length `step`. We add these values to the already stored values in ans
@snippet univ_intrin.cpp convolution-1D-main
* We declare a pointer to the src_32 and kernel and run a loop for each kernel element
@snippet univ_intrin.cpp convolution-1D-main-h1
* We load a register with the current kernel element. A window is shifted from *0* to *len - step* and its product with the kernel_wide array is added to the values stored in *ans*. We store the values back into *ans*
@snippet univ_intrin.cpp convolution-1D-main-h2
* Since the length might not be divisible by steps, we take care of the remaining values directly. The number of *tail* values will always be less than *step* and will not affect the performance significantly. We store all the values to *ans* which is a float pointer. We can also directly store them in a `Mat` object
@snippet univ_intrin.cpp convolution-1D-main-h3
* Here is an iterative example:
For example:
kernel: {k1, k2, k3}
src: ...|a1|a2|a3|a4|...
iter1:
for each idx i in (0, len), 'step' idx at a time
kernel_wide: |k1|k1|k1|k1|
window: |a0|a1|a2|a3|
ans: ...| 0| 0| 0| 0|...
sum = ans + window * kernel_wide
= |a0 * k1|a1 * k1|a2 * k1|a3 * k1|
iter2:
kernel_wide: |k2|k2|k2|k2|
window: |a1|a2|a3|a4|
ans: ...|a0 * k1|a1 * k1|a2 * k1|a3 * k1|...
sum = ans + window * kernel_wide
= |a0 * k1 + a1 * k2|a1 * k1 + a2 * k2|a2 * k1 + a3 * k2|a3 * k1 + a4 * k2|
iter3:
kernel_wide: |k3|k3|k3|k3|
window: |a2|a3|a4|a5|
ans: ...|a0 * k1 + a1 * k2|a1 * k1 + a2 * k2|a2 * k1 + a3 * k2|a3 * k1 + a4 * k2|...
sum = sum + window * kernel_wide
= |a0*k1 + a1*k2 + a2*k3|a1*k1 + a2*k2 + a3*k3|a2*k1 + a3*k2 + a4*k3|a3*k1 + a4*k2 + a5*k3|
@note The function parameters also include *row*, *rowk* and *len*. These values are used when using the function as an intermediate step of 2-D convolution
#### 2-D Convolution
Suppose our kernel has *ksize* rows. To compute the values for a particular row, we compute the 1-D convolution of the previous *ksize/2* and the next *ksize/2* rows, with the corresponding kernel row. The final values is simply the sum of the individual 1-D convolutions
@snippet univ_intrin.cpp convolution-2D
1. We first initialize variables and make a border above and below the *src* matrix. The left and right sides are handled by the 1-D convolution function.
@snippet univ_intrin.cpp convolution-2D-init
2. For each row, we calculate the 1-D convolution of the rows above and below it. we then add the values to the *dst* matrix.
@snippet univ_intrin.cpp convolution-2D-main
3. We finally convert the *dst* matrix to a *8-bit* `unsigned char` matrix
@snippet univ_intrin.cpp convolution-2D-conv
Results
-------
In the tutorial, we used a horizontal gradient kernel. We obtain the same output image for both methods.
Improvement in runtime varies and will depend on the SIMD capabilities available in your CPU.

View File

@ -0,0 +1,95 @@
# DNN-based Face Detection And Recognition {#tutorial_dnn_face}
@tableofcontents
@prev_tutorial{tutorial_dnn_text_spotting}
@next_tutorial{pytorch_cls_tutorial_dnn_conversion}
| | |
| -: | :- |
| Original Author | Chengrui Wang, Yuantao Feng |
| Compatibility | OpenCV >= 4.5.1 |
## Introduction
In this section, we introduce the DNN-based module for face detection and face recognition. Models can be obtained in [Models](#Models). The usage of `FaceDetectorYN` and `FaceRecognizer` are presented in [Usage](#Usage).
## Models
There are two models (ONNX format) pre-trained and required for this module:
- [Face Detection](https://github.com/ShiqiYu/libfacedetection.train/tree/master/tasks/task1/onnx):
- Size: 337KB
- Results on WIDER Face Val set: 0.830(easy), 0.824(medium), 0.708(hard)
- [Face Recognition](https://drive.google.com/file/d/1ClK9WiB492c5OZFKveF3XiHCejoOxINW/view?usp=sharing)
- Size: 36.9MB
- Results:
| Database | Accuracy | Threshold (normL2) | Threshold (cosine) |
| -------- | -------- | ------------------ | ------------------ |
| LFW | 99.60% | 1.128 | 0.363 |
| CALFW | 93.95% | 1.149 | 0.340 |
| CPLFW | 91.05% | 1.204 | 0.275 |
| AgeDB-30 | 94.90% | 1.202 | 0.277 |
| CFP-FP | 94.80% | 1.253 | 0.212 |
## Usage
### DNNFaceDetector
```cpp
// Initialize FaceDetectorYN
Ptr<FaceDetectorYN> faceDetector = FaceDetectorYN::create(onnx_path, "", image.size(), score_thresh, nms_thresh, top_k);
// Forward
Mat faces;
faceDetector->detect(image, faces);
```
The detection output `faces` is a two-dimension array of type CV_32F, whose rows are the detected face instances, columns are the location of a face and 5 facial landmarks. The format of each row is as follows:
```
x1, y1, w, h, x_re, y_re, x_le, y_le, x_nt, y_nt, x_rcm, y_rcm, x_lcm, y_lcm
```
, where `x1, y1, w, h` are the top-left coordinates, width and height of the face bounding box, `{x, y}_{re, le, nt, rcm, lcm}` stands for the coordinates of right eye, left eye, nose tip, the right corner and left corner of the mouth respectively.
### Face Recognition
Following Face Detection, run codes below to extract face feature from facial image.
```cpp
// Initialize FaceRecognizer with model path (cv::String)
Ptr<FaceRecognizer> faceRecognizer = FaceRecognizer::create(model_path, "");
// Aligning and cropping facial image through the first face of faces detected by dnn_face::DNNFaceDetector
Mat aligned_face;
faceRecognizer->alignCrop(image, faces.row(0), aligned_face);
// Run feature extraction with given aligned_face (cv::Mat)
Mat feature;
faceRecognizer->feature(aligned_face, feature);
feature = feature.clone();
```
After obtaining face features *feature1* and *feature2* of two facial images, run codes below to calculate the identity discrepancy between the two faces.
```cpp
// Calculating the discrepancy between two face features by using cosine distance.
double cos_score = faceRecognizer->match(feature1, feature2, FaceRecognizer::DisType::COSINE);
// Calculating the discrepancy between two face features by using normL2 distance.
double L2_score = faceRecognizer->match(feature1, feature2, FaceRecognizer::DisType::NORM_L2);
```
For example, two faces have same identity if the cosine distance is greater than or equal to 0.363, or the normL2 distance is less than or equal to 1.128.
## Reference:
- https://github.com/ShiqiYu/libfacedetection
- https://github.com/ShiqiYu/libfacedetection.train
- https://github.com/zhongyy/SFace
## Acknowledgement
Thanks [Professor Shiqi Yu](https://github.com/ShiqiYu/) and [Yuantao Feng](https://github.com/fengyuentau) for training and providing the face detection model.
Thanks [Professor Deng](http://www.whdeng.cn/), [PhD Candidate Zhong](https://github.com/zhongyy/) and [Master Candidate Wang](https://github.com/crywang/) for training and providing the face recognition model.

View File

@ -3,7 +3,7 @@
@tableofcontents
@prev_tutorial{tutorial_dnn_OCR}
@next_tutorial{pytorch_cls_tutorial_dnn_conversion}
@next_tutorial{tutorial_dnn_face}
| | |
| -: | :- |
@ -26,6 +26,11 @@ Before recognition, you should `setVocabulary` and `setDecodeType`.
- `T` is the sequence length
- `B` is the batch size (only support `B=1` in inference)
- and `Dim` is the length of vocabulary +1('Blank' of CTC is at the index=0 of Dim).
- "CTC-prefix-beam-search", the output of the text recognition model should be a probability matrix same with "CTC-greedy".
- The algorithm is proposed at Hannun's [paper](https://arxiv.org/abs/1408.2873).
- `setDecodeOptsCTCPrefixBeamSearch` could be used to control the beam size in search step.
- To futher optimize for big vocabulary, a new option `vocPruneSize` is introduced to avoid iterate the whole vocbulary
but only the number of `vocPruneSize` tokens with top probabilty.
@ref cv::dnn::TextRecognitionModel::recognize() is the main function for text recognition.
- The input image should be a cropped text image or an image with `roiRects`

View File

@ -10,6 +10,7 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
- @subpage tutorial_dnn_custom_layers
- @subpage tutorial_dnn_OCR
- @subpage tutorial_dnn_text_spotting
- @subpage tutorial_dnn_face
#### PyTorch models with OpenCV
In this section you will find the guides, which describe how to run classification, segmentation and detection PyTorch DNN models with OpenCV.

View File

@ -589,6 +589,14 @@ Some features have been added specifically for automated build environments, lik
| `OPENCV_CMAKE_HOOKS_DIR` | _empty_ | OpenCV allows to customize configuration process by adding custom hook scripts at each stage and substage. cmake scripts with predefined names located in the directory set by this variable will be included before and after various configuration stages. Examples of file names: _CMAKE_INIT.cmake_, _PRE_CMAKE_BOOTSTRAP.cmake_, _POST_CMAKE_BOOTSTRAP.cmake_, etc.. Other names are not documented and can be found in the project cmake files by searching for the _ocv_cmake_hook_ macro calls. |
| `OPENCV_DUMP_HOOKS_FLOW` | _OFF_ | Enables a debug message print on each cmake hook script call. |
## Contrib Modules
Following build options are utilized in `opencv_contrib` modules, as stated [previously](#tutorial_config_reference_general_contrib), these extra modules can be added to your final build by setting `DOPENCV_EXTRA_MODULES_PATH` option.
| Option | Default | Description |
| ------ | ------- | ----------- |
| `WITH_CLP` | _OFF_ | Will add [coinor](https://projects.coin-or.org/Clp) linear programming library build support which is required in `videostab` module. Make sure to install the development libraries of coinor-clp. |
# Other non-documented options
@ -605,7 +613,6 @@ Some features have been added specifically for automated build environments, lik
`WITH_CPUFEATURES`
`WITH_EIGEN`
`WITH_OPENVX`
`WITH_CLP`
`WITH_DIRECTX`
`WITH_VA`
`WITH_LAPACK`

View File

@ -1,7 +1,7 @@
Using OpenCV with gcc and CMake {#tutorial_linux_gcc_cmake}
===============================
@prev_tutorial{tutorial_linux_install}
@prev_tutorial{tutorial_linux_gdb_pretty_printer}
@next_tutorial{tutorial_linux_eclipse}
| | |

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

View File

@ -0,0 +1,38 @@
Using OpenCV with gdb-powered IDEs {#tutorial_linux_gdb_pretty_printer}
=====================
@prev_tutorial{tutorial_linux_install}
@next_tutorial{tutorial_linux_gcc_cmake}
| | |
| -: | :- |
| Original author | Egor Smirnov |
| Compatibility | OpenCV >= 4.0 |
@tableofcontents
# Capabilities {#tutorial_linux_gdb_pretty_printer_capabilities}
This pretty-printer can show element type, `is_continuous`, `is_submatrix` flags and (possibly truncated) matrix. It is known to work in Clion, VS Code and gdb.
![Clion example](images/example.png)
# Installation {#tutorial_linux_gdb_pretty_printer_installation}
Move into `opencv/samples/gdb/`. Place `mat_pretty_printer.py` in a convinient place, rename `gdbinit` to `.gdbinit` and move it into your home folder. Change 'source' line of `.gdbinit` to point to your `mat_pretty_printer.py` path.
In order to check version of python bundled with your gdb, use the following commands from the gdb shell:
python
import sys
print(sys.version_info)
end
If the version of python 3 installed in your system doesn't match the version in gdb, create a new virtual environment with the exact same version, install `numpy` and change the path to python3 in `.gdbinit` accordingly.
# Usage {#tutorial_linux_gdb_pretty_printer_usage}
The fields in a debugger prefixed with `view_` are pseudo-fields added for convinience, the rest are left as is.
If you feel that the number of elements in truncated view is too low, you can edit `mat_pretty_printer.py` - `np.set_printoptions` controlls everything matrix display-related.

View File

@ -1,7 +1,7 @@
Installation in Linux {#tutorial_linux_install}
=====================
@next_tutorial{tutorial_linux_gcc_cmake}
@next_tutorial{tutorial_linux_gdb_pretty_printer}
| | |
| -: | :- |

View File

@ -6,6 +6,7 @@ Introduction to OpenCV {#tutorial_table_of_content_introduction}
##### Linux
- @subpage tutorial_linux_install
- @subpage tutorial_linux_gdb_pretty_printer
- @subpage tutorial_linux_gcc_cmake
- @subpage tutorial_linux_eclipse

View File

@ -13,6 +13,8 @@ Working with a boosted cascade of weak classifiers includes two major stages: th
To support this tutorial, several official OpenCV applications will be used: [opencv_createsamples](https://github.com/opencv/opencv/tree/master/apps/createsamples), [opencv_annotation](https://github.com/opencv/opencv/tree/master/apps/annotation), [opencv_traincascade](https://github.com/opencv/opencv/tree/master/apps/traincascade) and [opencv_visualisation](https://github.com/opencv/opencv/tree/master/apps/visualisation).
@note Createsamples and traincascade are disabled since OpenCV 4.0. Consider using these apps for training from 3.4 branch for Cascade Classifier. Model format is the same between 3.4 and 4.x.
### Important notes
- If you come across any tutorial mentioning the old opencv_haartraining tool <i>(which is deprecated and still using the OpenCV1.x interface)</i>, then please ignore that tutorial and stick to the opencv_traincascade tool. This tool is a newer version, written in C++ in accordance to the OpenCV 2.x and OpenCV 3.x API. The opencv_traincascade supports both HAAR like wavelet features @cite Viola01 and LBP (Local Binary Patterns) @cite Liao2007 features. LBP features yield integer precision in contrast to HAAR features, yielding floating point precision, so both training and detection with LBP are several times faster then with HAAR features. Regarding the LBP and HAAR detection quality, it mainly depends on the training data used and the training parameters selected. It's possible to train a LBP-based classifier that will provide almost the same quality as HAAR-based one, within a percentage of the training time.

View File

@ -1309,7 +1309,6 @@ CV_EXPORTS_W int solvePnPGeneric( InputArray objectPoints, InputArray imagePoint
InputArray rvec = noArray(), InputArray tvec = noArray(),
OutputArray reprojectionError = noArray() );
/** @brief Draw axes of the world/object coordinate system from pose estimation. @sa solvePnP
@param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
@ -1611,6 +1610,76 @@ unit length.
*/
CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
/** @brief Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
inliers that pass the check.
@param points1 Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).
@param points2 Array of the second image points of the same size and format as points1 .
@param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
@param distCoeffs1 Input/output vector of distortion coefficients, the same as in
@ref calibrateCamera.
@param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
@param distCoeffs2 Input/output vector of distortion coefficients, the same as in
@ref calibrateCamera.
@param E The output essential matrix.
@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
that performs a change of basis from the first camera's coordinate system to the second camera's
coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
described below.
@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
length.
@param method Method for computing an essential matrix.
- @ref RANSAC for the RANSAC algorithm.
- @ref LMEDS for the LMedS algorithm.
@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.
@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.
@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
recover pose. In the output mask only inliers which pass the cheirality check.
This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies
possible pose hypotheses by doing cheirality check. The cheirality check means that the
triangulated 3D points should have positive depth. Some details can be found in @cite Nister03.
This function can be used to process the output E and mask from @ref findEssentialMat. In this
scenario, points1 and points2 are the same input for findEssentialMat.:
@code
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
// Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
// Output: Essential matrix, relative rotation and relative translation.
Mat E, R, t, mask;
recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
@endcode
*/
CV_EXPORTS_W int recoverPose( InputArray points1, InputArray points2,
InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
OutputArray E, OutputArray R, OutputArray t,
int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0,
InputOutputArray mask = noArray());
/** @brief Recovers the relative camera rotation and the translation from an estimated essential
matrix and the corresponding points in two images, using cheirality check. Returns the number of
inliers that pass the check.
@ -2303,6 +2372,7 @@ void initInverseRectificationMap( InputArray cameraMatrix, InputArray distCoeffs
InputArray R, InputArray newCameraMatrix,
const Size& size, int m1type, OutputArray map1, OutputArray map2 );
//! initializes maps for #remap for wide-angle
CV_EXPORTS
float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs,

View File

@ -9,9 +9,9 @@ static inline double cbrt(double x) { return (double)cv::cubeRoot((float)x); };
namespace cv {
using namespace std;
static void solveQuartic(const double *factors, double *realRoots) {
static
void solveQuartic(const double *factors, double *realRoots)
{
const double &a4 = factors[0];
const double &a3 = factors[1];
const double &a2 = factors[2];
@ -31,29 +31,29 @@ static void solveQuartic(const double *factors, double *realRoots) {
double q3 = (72 * r4 * p4 - 2 * p4 * p4 * p4 - 27 * q4 * q4) / 432; // /=2
double t; // *=2
complex<double> w;
std::complex<double> w;
if (q3 >= 0)
w = -sqrt(static_cast<complex<double> >(q3 * q3 - p3 * p3 * p3)) - q3;
w = -std::sqrt(static_cast<std::complex<double> >(q3 * q3 - p3 * p3 * p3)) - q3;
else
w = sqrt(static_cast<complex<double> >(q3 * q3 - p3 * p3 * p3)) - q3;
w = std::sqrt(static_cast<std::complex<double> >(q3 * q3 - p3 * p3 * p3)) - q3;
if (w.imag() == 0.0) {
w.real(cbrt(w.real()));
w.real(std::cbrt(w.real()));
t = 2.0 * (w.real() + p3 / w.real());
} else {
w = pow(w, 1.0 / 3);
t = 4.0 * w.real();
}
complex<double> sqrt_2m = sqrt(static_cast<complex<double> >(-2 * p4 / 3 + t));
std::complex<double> sqrt_2m = sqrt(static_cast<std::complex<double> >(-2 * p4 / 3 + t));
double B_4A = -a3 / (4 * a4);
double complex1 = 4 * p4 / 3 + t;
#if defined(__clang__) && defined(__arm__) && (__clang_major__ == 3 || __clang_major__ == 4) && !defined(__ANDROID__)
// details: https://github.com/opencv/opencv/issues/11135
// details: https://github.com/opencv/opencv/issues/11056
complex<double> complex2 = 2 * q4;
complex2 = complex<double>(complex2.real() / sqrt_2m.real(), 0);
std::complex<double> complex2 = 2 * q4;
complex2 = std::complex<double>(complex2.real() / sqrt_2m.real(), 0);
#else
complex<double> complex2 = 2 * q4 / sqrt_2m;
std::complex<double> complex2 = 2 * q4 / sqrt_2m;
#endif
double sqrt_2m_rh = sqrt_2m.real() / 2;
double sqrt1 = sqrt(-(complex1 + complex2)).real() / 2;

View File

@ -399,6 +399,31 @@ protected:
}
};
// Find essential matrix given undistorted points and two cameras.
static Mat findEssentialMat_( InputArray _points1, InputArray _points2,
InputArray cameraMatrix1, InputArray cameraMatrix2,
int method, double prob, double threshold, OutputArray _mask)
{
// Scale the points back. We use "arithmetic mean" between the supplied two camera matrices.
// Thanks to such 2-stage procedure RANSAC threshold still makes sense, because the undistorted
// and rescaled points have a similar value range to the original ones.
Mat _pointsTransformed1, _pointsTransformed2;
Mat cm1 = cameraMatrix1.getMat(), cm2 = cameraMatrix2.getMat(), cm0;
Mat(cm1 + cm2).convertTo(cm0, CV_64F, 0.5);
CV_Assert(cm0.rows == 3 && cm0.cols == 3);
CV_Assert(std::abs(cm0.at<double>(2, 0)) < 1e-3 &&
std::abs(cm0.at<double>(2, 1)) < 1e-3 &&
std::abs(cm0.at<double>(2, 2) - 1.) < 1e-3);
Mat affine = cm0.rowRange(0, 2);
transform(_points1, _pointsTransformed1, affine);
transform(_points2, _pointsTransformed2, affine);
return findEssentialMat(_pointsTransformed1, _pointsTransformed2, cm0, method, prob, threshold, _mask);
}
// Input should be a vector of n 2D points or a Nx2 matrix
Mat findEssentialMat( InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
int method, double prob, double threshold,
@ -485,25 +510,10 @@ Mat findEssentialMat( InputArray _points1, InputArray _points2,
CV_INSTRUMENT_REGION();
// Undistort image points, bring them to 3x3 identity "camera matrix"
Mat _pointsUntistorted1, _pointsUntistorted2;
undistortPoints(_points1, _pointsUntistorted1, cameraMatrix1, distCoeffs1);
undistortPoints(_points2, _pointsUntistorted2, cameraMatrix2, distCoeffs2);
// Scale the points back. We use "arithmetic mean" between the supplied two camera matrices.
// Thanks to such 2-stage procedure RANSAC threshold still makes sense, because the undistorted
// and rescaled points have a similar value range to the original ones.
Mat cm1 = cameraMatrix1.getMat(), cm2 = cameraMatrix2.getMat(), cm0;
Mat(cm1 + cm2).convertTo(cm0, CV_64F, 0.5);
CV_Assert(cm0.rows == 3 && cm0.cols == 3);
CV_Assert(std::abs(cm0.at<double>(2, 0)) < 1e-3 &&
std::abs(cm0.at<double>(2, 1)) < 1e-3 &&
std::abs(cm0.at<double>(2, 2) - 1.) < 1e-3);
Mat affine = cm0.rowRange(0, 2);
transform(_pointsUntistorted1, _pointsUntistorted1, affine);
transform(_pointsUntistorted2, _pointsUntistorted2, affine);
return findEssentialMat(_pointsUntistorted1, _pointsUntistorted2, cm0, method, prob, threshold, _mask);
Mat _pointsUndistorted1, _pointsUndistorted2;
undistortPoints(_points1, _pointsUndistorted1, cameraMatrix1, distCoeffs1);
undistortPoints(_points2, _pointsUndistorted2, cameraMatrix2, distCoeffs2);
return findEssentialMat_(_pointsUndistorted1, _pointsUndistorted2, cameraMatrix1, cameraMatrix2, method, prob, threshold, _mask);
}
Mat findEssentialMat( InputArray points1, InputArray points2,
@ -520,9 +530,33 @@ Mat findEssentialMat( InputArray points1, InputArray points2,
}
int recoverPose( InputArray _points1, InputArray _points2,
InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
OutputArray E, OutputArray R, OutputArray t,
int method, double prob, double threshold,
InputOutputArray _mask)
{
CV_INSTRUMENT_REGION();
// Undistort image points, bring them to 3x3 identity "camera matrix"
Mat _pointsUndistorted1, _pointsUndistorted2;
undistortPoints(_points1, _pointsUndistorted1, cameraMatrix1, distCoeffs1);
undistortPoints(_points2, _pointsUndistorted2, cameraMatrix2, distCoeffs2);
// Get essential matrix.
Mat _E = findEssentialMat_(_pointsUndistorted1, _pointsUndistorted2, cameraMatrix1, cameraMatrix2,
method, prob, threshold, _mask);
CV_Assert(_E.cols == 3 && _E.rows == 3);
E.create(3, 3, _E.type());
_E.copyTo(E);
return recoverPose(_E, _pointsUndistorted1, _pointsUndistorted2, Mat::eye(3,3, CV_64F), R, t, _mask);
}
int recoverPose( InputArray E, InputArray _points1, InputArray _points2,
InputArray _cameraMatrix, OutputArray _R, OutputArray _t, double distanceThresh,
InputOutputArray _mask, OutputArray triangulatedPoints)
InputArray _cameraMatrix, OutputArray _R, OutputArray _t, double distanceThresh,
InputOutputArray _mask, OutputArray triangulatedPoints)
{
CV_INSTRUMENT_REGION();

View File

@ -897,7 +897,7 @@ void CV_InitInverseRectificationMapTest::prepare_to_validation(int/* test_case_i
Mat _new_cam0 = zero_new_cam ? test_mat[INPUT][0] : test_mat[INPUT][3];
Mat _mapx(img_size, CV_32F), _mapy(img_size, CV_32F);
double a[9], d[5]={0,0,0,0,0}, R[9]={1, 0, 0, 0, 1, 0, 0, 0, 1}, a1[9];
double a[9], d[5]={0., 0., 0., 0. , 0.}, R[9]={1., 0., 0., 0., 1., 0., 0., 0., 1.}, a1[9];
Mat _a(3, 3, CV_64F, a), _a1(3, 3, CV_64F, a1);
Mat _d(_d0.rows,_d0.cols, CV_MAKETYPE(CV_64F,_d0.channels()),d);
Mat _R(3, 3, CV_64F, R);
@ -951,9 +951,9 @@ void CV_InitInverseRectificationMapTest::prepare_to_validation(int/* test_case_i
// Undistort
double x2 = x*x, y2 = y*y;
double r2 = x2 + y2;
double cdist = 1./(1 + (d[0] + (d[1] + d[4]*r2)*r2)*r2); // (1 + (d[5] + (d[6] + d[7]*r2)*r2)*r2) == 1 as d[5-7]=0;
double x_ = x*cdist - d[2]*2*x*y + d[3]*(r2 + 2*x2);
double y_ = y*cdist - d[3]*2*x*y + d[2]*(r2 + 2*y2);
double cdist = 1./(1. + (d[0] + (d[1] + d[4]*r2)*r2)*r2); // (1. + (d[5] + (d[6] + d[7]*r2)*r2)*r2) == 1 as d[5-7]=0;
double x_ = (x - (d[2]*2.*x*y + d[3]*(r2 + 2.*x2)))*cdist;
double y_ = (y - (d[3]*2.*x*y + d[2]*(r2 + 2.*y2)))*cdist;
// Rectify
double X = R[0]*x_ + R[1]*y_ + R[2];
@ -1807,4 +1807,78 @@ TEST(Calib3d_initUndistortRectifyMap, regression_14467)
EXPECT_LE(cvtest::norm(dst, mesh_uv, NORM_INF), 1e-3);
}
TEST(Calib3d_initInverseRectificationMap, regression_20165)
{
Size size_w_h(1280, 800);
Mat dst(size_w_h, CV_32FC2); // Reference for validation
Mat mapxy; // Output of initInverseRectificationMap()
// Camera Matrix
double k[9]={
1.5393951443032472e+03, 0., 6.7491727003047140e+02,
0., 1.5400748240626747e+03, 5.1226968329123963e+02,
0., 0., 1.
};
Mat _K(3, 3, CV_64F, k);
// Distortion
// double d[5]={0,0,0,0,0}; // Zero Distortion
double d[5]={ // Non-zero distortion
-3.4134571357400023e-03, 2.9733267766101856e-03, // K1, K2
3.6653586399031184e-03, -3.1960714017365702e-03, // P1, P2
0. // K3
};
Mat _d(1, 5, CV_64F, d);
// Rotation
//double R[9]={1., 0., 0., 0., 1., 0., 0., 0., 1.}; // Identity transform (none)
double R[9]={ // Random transform
9.6625486010428052e-01, 1.6055789378989216e-02, 2.5708706103628531e-01,
-8.0300261706161002e-03, 9.9944797497929860e-01, -3.2237617614807819e-02,
-2.5746274294459848e-01, 2.9085338870243265e-02, 9.6585039165403186e-01
};
Mat _R(3, 3, CV_64F, R);
// --- Validation --- //
initInverseRectificationMap(_K, _d, _R, _K, size_w_h, CV_32FC2, mapxy, noArray());
// Copy camera matrix
double fx, fy, cx, cy, ifx, ify, cxn, cyn;
fx = k[0]; fy = k[4]; cx = k[2]; cy = k[5];
// Copy new camera matrix
ifx = k[0]; ify = k[4]; cxn = k[2]; cyn = k[5];
// Distort Points
for( int v = 0; v < size_w_h.height; v++ )
{
for( int u = 0; u < size_w_h.width; u++ )
{
// Convert from image to pin-hole coordinates
double x = (u - cx)/fx;
double y = (v - cy)/fy;
// Undistort
double x2 = x*x, y2 = y*y;
double r2 = x2 + y2;
double cdist = 1./(1. + (d[0] + (d[1] + d[4]*r2)*r2)*r2); // (1. + (d[5] + (d[6] + d[7]*r2)*r2)*r2) == 1 as d[5-7]=0;
double x_ = (x - (d[2]*2.*x*y + d[3]*(r2 + 2.*x2)))*cdist;
double y_ = (y - (d[3]*2.*x*y + d[2]*(r2 + 2.*y2)))*cdist;
// Rectify
double X = R[0]*x_ + R[1]*y_ + R[2];
double Y = R[3]*x_ + R[4]*y_ + R[5];
double Z = R[6]*x_ + R[7]*y_ + R[8];
double x__ = X/Z;
double y__ = Y/Z;
// Convert from pin-hole to image coordinates
dst.at<Vec2f>(v, u) = Vec2f((float)(x__*ifx + cxn), (float)(y__*ify + cyn));
}
}
// Check Result
EXPECT_LE(cvtest::norm(dst, mapxy, NORM_INF), 2e-1);
}
}} // namespace

View File

@ -525,6 +525,8 @@ Sample usage of detecting and drawing chessboard corners: :
the board to make the detection more robust in various environments. Otherwise, if there is no
border and the background is dark, the outer black squares cannot be segmented properly and so the
square grouping and ordering algorithm fails.
Use gen_pattern.py (@ref tutorial_camera_calibration_pattern) to create checkerboard.
*/
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
@ -581,6 +583,8 @@ transformation it is beneficial to use round corners for the field corners
which are located on the outside of the board. The following figure illustrates
a sample checkerboard optimized for the detection. However, any other checkerboard
can be used as well.
Use gen_pattern.py (@ref tutorial_camera_calibration_pattern) to create checkerboard.
![Checkerboard](pics/checkerboard_radon.png)
*/
CV_EXPORTS_AS(findChessboardCornersSBWithMeta)

View File

@ -1707,10 +1707,10 @@ void Chessboard::Board::normalizeOrientation(bool bblack)
iter_bottom_left.getCell()->empty() || iter_bottom_right.getCell()->empty())
return;
float d1 = pow(top_left->top_left->x,2)+pow(top_left->top_left->y,2);
float d2 = pow((*iter_top_right)->x,2)+pow((*iter_top_right)->y,2);
float d3 = pow((*iter_bottom_left)->x,2)+pow((*iter_bottom_left)->y,2);
float d4 = pow((*iter_bottom_right)->x,2)+pow((*iter_bottom_right)->y,2);
float d1 = top_left->top_left->dot(*top_left->top_left);
float d2 = (*iter_top_right)->dot(*(*iter_top_right));
float d3 = (*iter_bottom_left)->dot(*(*iter_bottom_left));
float d4 = (*iter_bottom_right)->dot(*(*iter_bottom_right));
if(d2 <= d1 && d2 <= d3 && d2 <= d4) // top left is top right
rotateLeft();
else if(d3 <= d1 && d3 <= d2 && d3 <= d4) // top left is bottom left
@ -3924,7 +3924,7 @@ bool cv::findChessboardCornersSB(InputArray image_, Size pattern_size,
{
meta_.create(int(board.rowCount()),int(board.colCount()),CV_8UC1);
cv::Mat meta = meta_.getMat();
meta = 0;
meta.setTo(cv::Scalar::all(0));
for(int row =0;row < meta.rows-1;++row)
{
for(int col=0;col< meta.cols-1;++col)

View File

@ -2144,7 +2144,17 @@ TEST(CV_RecoverPoseTest, regression_15341)
// camera matrix with both focal lengths = 1, and principal point = (0, 0)
const Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
const Mat zeroDistCoeffs = Mat::zeros(1, 5, CV_64F);
// camera matrix with focal lengths 0.5 and 0.6 respectively and principal point = (100, 200)
double cameraMatrix2Data[] = { 0.5, 0, 100,
0, 0.6, 200,
0, 0, 1 };
const Mat cameraMatrix2( 3, 3, CV_64F, cameraMatrix2Data );
// zero and nonzero distortion coefficients
double nonZeroDistCoeffsData[] = { 0.01, 0.0001, 0, 0, 1e-04, 0.2, 0.02, 0.0002 }; // k1, k2, p1, p2, k3, k4, k5, k6
vector<Mat> distCoeffsList = {Mat::zeros(1, 5, CV_64F), Mat{1, 8, CV_64F, nonZeroDistCoeffsData}};
const auto &zeroDistCoeffs = distCoeffsList[0];
int Inliers = 0;
@ -2160,14 +2170,26 @@ TEST(CV_RecoverPoseTest, regression_15341)
// Estimation of fundamental matrix using the RANSAC algorithm
Mat E, E2, R, t;
// Check pose when camera matrices are different.
for (const auto &distCoeffs: distCoeffsList)
{
E = findEssentialMat(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, RANSAC, 0.999, 1.0, mask);
recoverPose(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, E2, R, t, RANSAC, 0.999, 1.0, mask);
EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) <<
"Two big difference between the same essential matrices computed using different functions with different cameras, testcase " << testcase;
EXPECT_EQ(0, (int)mask[13]) << "Detecting outliers in function failed with different cameras, testcase " << testcase;
}
// Check pose when camera matrices are the same.
E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
E2 = findEssentialMat(points1, points2, cameraMatrix, zeroDistCoeffs, cameraMatrix, zeroDistCoeffs, RANSAC, 0.999, 1.0, mask);
EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) <<
"Two big difference between the same essential matrices computed using different functions, testcase " << testcase;
EXPECT_EQ(0, (int)mask[13]) << "Detecting outliers in function findEssentialMat failed, testcase " << testcase;
"Two big difference between the same essential matrices computed using different functions with same cameras, testcase " << testcase;
EXPECT_EQ(0, (int)mask[13]) << "Detecting outliers in function findEssentialMat failed with same cameras, testcase " << testcase;
points2[12] = Point2f(0.0f, 0.0f); // provoke another outlier detection for recover Pose
Inliers = recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
EXPECT_EQ(0, (int)mask[12]) << "Detecting outliers in function failed, testcase " << testcase;
EXPECT_EQ(0, (int)mask[12]) << "Detecting outliers in function failed with same cameras, testcase " << testcase;
}
else // testcase with mat input data
{
@ -2187,14 +2209,26 @@ TEST(CV_RecoverPoseTest, regression_15341)
// Estimation of fundamental matrix using the RANSAC algorithm
Mat E, E2, R, t;
// Check pose when camera matrices are different.
for (const auto &distCoeffs: distCoeffsList)
{
E = findEssentialMat(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, RANSAC, 0.999, 1.0, mask);
recoverPose(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, E2, R, t, RANSAC, 0.999, 1.0, mask);
EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) <<
"Two big difference between the same essential matrices computed using different functions with different cameras, testcase " << testcase;
EXPECT_EQ(0, (int)mask.at<unsigned char>(13)) << "Detecting outliers in function failed with different cameras, testcase " << testcase;
}
// Check pose when camera matrices are the same.
E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
E2 = findEssentialMat(points1, points2, cameraMatrix, zeroDistCoeffs, cameraMatrix, zeroDistCoeffs, RANSAC, 0.999, 1.0, mask);
EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) <<
"Two big difference between the same essential matrices computed using different functions, testcase " << testcase;
EXPECT_EQ(0, (int)mask.at<unsigned char>(13)) << "Detecting outliers in function findEssentialMat failed, testcase " << testcase;
"Two big difference between the same essential matrices computed using different functions with same cameras, testcase " << testcase;
EXPECT_EQ(0, (int)mask.at<unsigned char>(13)) << "Detecting outliers in function findEssentialMat failed with same cameras, testcase " << testcase;
points2.at<Point2f>(12) = Point2f(0.0f, 0.0f); // provoke an outlier detection
Inliers = recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
EXPECT_EQ(0, (int)mask.at<unsigned char>(12)) << "Detecting outliers in function failed, testcase " << testcase;
EXPECT_EQ(0, (int)mask.at<unsigned char>(12)) << "Detecting outliers in function failed with same cameras, testcase " << testcase;
}
EXPECT_EQ(Inliers, point_count - invalid_point_count) <<
"Number of inliers differs from expected number of inliers, testcase " << testcase;

View File

@ -153,6 +153,14 @@ if(OPENCV_CORE_EXCLUDE_C_API)
ocv_target_compile_definitions(${the_module} PRIVATE "OPENCV_EXCLUDE_C_API=1")
endif()
if(OPENCV_DISABLE_THREAD_SUPPORT)
ocv_target_compile_definitions(${the_module} PUBLIC "OPENCV_DISABLE_THREAD_SUPPORT=1")
endif()
if(OPENCV_SEMIHOSTING)
ocv_target_compile_definitions(${the_module} PRIVATE "-DOPENCV_SEMIHOSTING")
endif(OPENCV_SEMIHOSTING)
if(HAVE_HPX)
ocv_target_link_libraries(${the_module} LINK_PRIVATE "${HPX_LIBRARIES}")
endif()

View File

@ -116,6 +116,65 @@ String dumpRange(const Range& argument)
}
}
CV_WRAP static inline
int testOverwriteNativeMethod(int argument)
{
return argument;
}
CV_WRAP static inline
String testReservedKeywordConversion(int positional_argument, int lambda = 2, int from = 3)
{
return format("arg=%d, lambda=%d, from=%d", positional_argument, lambda, from);
}
CV_EXPORTS_W String dumpVectorOfInt(const std::vector<int>& vec);
CV_EXPORTS_W String dumpVectorOfDouble(const std::vector<double>& vec);
CV_EXPORTS_W String dumpVectorOfRect(const std::vector<Rect>& vec);
CV_WRAP static inline
void generateVectorOfRect(size_t len, CV_OUT std::vector<Rect>& vec)
{
vec.resize(len);
if (len > 0)
{
RNG rng(12345);
Mat tmp(static_cast<int>(len), 1, CV_32SC4);
rng.fill(tmp, RNG::UNIFORM, 10, 20);
tmp.copyTo(vec);
}
}
CV_WRAP static inline
void generateVectorOfInt(size_t len, CV_OUT std::vector<int>& vec)
{
vec.resize(len);
if (len > 0)
{
RNG rng(554433);
Mat tmp(static_cast<int>(len), 1, CV_32SC1);
rng.fill(tmp, RNG::UNIFORM, -10, 10);
tmp.copyTo(vec);
}
}
CV_WRAP static inline
void generateVectorOfMat(size_t len, int rows, int cols, int dtype, CV_OUT std::vector<Mat>& vec)
{
vec.resize(len);
if (len > 0)
{
RNG rng(65431);
for (size_t i = 0; i < len; ++i)
{
vec[i].create(rows, cols, dtype);
rng.fill(vec[i], RNG::UNIFORM, 0, 10);
}
}
}
CV_WRAP static inline
void testRaiseGeneralException()
{

View File

@ -55,7 +55,7 @@
# ifdef _MSC_VER
# include <nmmintrin.h>
# if defined(_M_X64)
# define CV_POPCNT_U64 _mm_popcnt_u64
# define CV_POPCNT_U64 (int)_mm_popcnt_u64
# endif
# define CV_POPCNT_U32 _mm_popcnt_u32
# else

View File

@ -707,14 +707,47 @@ __CV_ENUM_FLAGS_BITWISE_XOR_EQ (EnumType, EnumType)
# endif
#endif
/****************************************************************************************\
* CV_NODISCARD_STD attribute (C++17) *
* encourages the compiler to issue a warning if the return value is discarded *
\****************************************************************************************/
#ifndef CV_NODISCARD_STD
# ifndef __has_cpp_attribute
// workaround preprocessor non-compliance https://reviews.llvm.org/D57851
# define __has_cpp_attribute(__x) 0
# endif
# if __has_cpp_attribute(nodiscard)
# define CV_NODISCARD_STD [[nodiscard]]
# elif __cplusplus >= 201703L
// available when compiler is C++17 compliant
# define CV_NODISCARD_STD [[nodiscard]]
# elif defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L
// available with VS2017 v15.3+ with /std:c++17 or higher; works on functions and classes
# define CV_NODISCARD_STD [[nodiscard]]
# elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 700) && (__cplusplus >= 201103L)
// available with GCC 7.0+; works on functions, works or silently fails on classes
# define CV_NODISCARD_STD [[nodiscard]]
# elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 408) && (__cplusplus >= 201103L)
// available with GCC 4.8+ but it usually does nothing and can fail noisily -- therefore not used
// define CV_NODISCARD_STD [[gnu::warn_unused_result]]
# endif
#endif
#ifndef CV_NODISCARD_STD
# define CV_NODISCARD_STD /* nothing by default */
#endif
/****************************************************************************************\
* CV_NODISCARD attribute *
* encourages the compiler to issue a warning if the return value is discarded (C++17) *
* CV_NODISCARD attribute (deprecated, GCC only) *
* DONT USE: use instead the standard CV_NODISCARD_STD macro above *
* this legacy method silently fails to issue warning until some version *
* after gcc 6.3.0. Yet with gcc 7+ you can use the above standard method *
* which makes this method useless. Don't use it. *
* @deprecated use instead CV_NODISCARD_STD *
\****************************************************************************************/
#ifndef CV_NODISCARD
# if defined(__GNUC__)
# define CV_NODISCARD __attribute__((__warn_unused_result__)) // at least available with GCC 3.4
# define CV_NODISCARD __attribute__((__warn_unused_result__))
# elif defined(__clang__) && defined(__has_attribute)
# if __has_attribute(__warn_unused_result__)
# define CV_NODISCARD __attribute__((__warn_unused_result__))

File diff suppressed because it is too large Load Diff

View File

@ -1183,14 +1183,14 @@ public:
The method creates a square diagonal matrix from specified main diagonal.
@param d One-dimensional matrix that represents the main diagonal.
*/
static Mat diag(const Mat& d);
CV_NODISCARD_STD static Mat diag(const Mat& d);
/** @brief Creates a full copy of the array and the underlying data.
The method creates a full copy of the array. The original step[] is not taken into account. So, the
array copy is a continuous array occupying total()*elemSize() bytes.
*/
Mat clone() const CV_NODISCARD;
CV_NODISCARD_STD Mat clone() const;
/** @brief Copies the matrix to another one.
@ -1358,20 +1358,20 @@ public:
@param cols Number of columns.
@param type Created matrix type.
*/
static MatExpr zeros(int rows, int cols, int type);
CV_NODISCARD_STD static MatExpr zeros(int rows, int cols, int type);
/** @overload
@param size Alternative to the matrix size specification Size(cols, rows) .
@param type Created matrix type.
*/
static MatExpr zeros(Size size, int type);
CV_NODISCARD_STD static MatExpr zeros(Size size, int type);
/** @overload
@param ndims Array dimensionality.
@param sz Array of integers specifying the array shape.
@param type Created matrix type.
*/
static MatExpr zeros(int ndims, const int* sz, int type);
CV_NODISCARD_STD static MatExpr zeros(int ndims, const int* sz, int type);
/** @brief Returns an array of all 1's of the specified size and type.
@ -1389,20 +1389,20 @@ public:
@param cols Number of columns.
@param type Created matrix type.
*/
static MatExpr ones(int rows, int cols, int type);
CV_NODISCARD_STD static MatExpr ones(int rows, int cols, int type);
/** @overload
@param size Alternative to the matrix size specification Size(cols, rows) .
@param type Created matrix type.
*/
static MatExpr ones(Size size, int type);
CV_NODISCARD_STD static MatExpr ones(Size size, int type);
/** @overload
@param ndims Array dimensionality.
@param sz Array of integers specifying the array shape.
@param type Created matrix type.
*/
static MatExpr ones(int ndims, const int* sz, int type);
CV_NODISCARD_STD static MatExpr ones(int ndims, const int* sz, int type);
/** @brief Returns an identity matrix of the specified size and type.
@ -1418,13 +1418,13 @@ public:
@param cols Number of columns.
@param type Created matrix type.
*/
static MatExpr eye(int rows, int cols, int type);
CV_NODISCARD_STD static MatExpr eye(int rows, int cols, int type);
/** @overload
@param size Alternative matrix size specification as Size(cols, rows) .
@param type Created matrix type.
*/
static MatExpr eye(Size size, int type);
CV_NODISCARD_STD static MatExpr eye(Size size, int type);
/** @brief Allocates new array data if needed.
@ -2287,7 +2287,7 @@ public:
Mat_ row(int y) const;
Mat_ col(int x) const;
Mat_ diag(int d=0) const;
Mat_ clone() const CV_NODISCARD;
CV_NODISCARD_STD Mat_ clone() const;
//! overridden forms of Mat::elemSize() etc.
size_t elemSize() const;
@ -2300,14 +2300,14 @@ public:
size_t stepT(int i=0) const;
//! overridden forms of Mat::zeros() etc. Data type is omitted, of course
static MatExpr zeros(int rows, int cols);
static MatExpr zeros(Size size);
static MatExpr zeros(int _ndims, const int* _sizes);
static MatExpr ones(int rows, int cols);
static MatExpr ones(Size size);
static MatExpr ones(int _ndims, const int* _sizes);
static MatExpr eye(int rows, int cols);
static MatExpr eye(Size size);
CV_NODISCARD_STD static MatExpr zeros(int rows, int cols);
CV_NODISCARD_STD static MatExpr zeros(Size size);
CV_NODISCARD_STD static MatExpr zeros(int _ndims, const int* _sizes);
CV_NODISCARD_STD static MatExpr ones(int rows, int cols);
CV_NODISCARD_STD static MatExpr ones(Size size);
CV_NODISCARD_STD static MatExpr ones(int _ndims, const int* _sizes);
CV_NODISCARD_STD static MatExpr eye(int rows, int cols);
CV_NODISCARD_STD static MatExpr eye(Size size);
//! some more overridden methods
Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright );
@ -2450,10 +2450,11 @@ public:
//! <0 - a diagonal from the lower half)
UMat diag(int d=0) const;
//! constructs a square diagonal matrix which main diagonal is vector "d"
static UMat diag(const UMat& d);
CV_NODISCARD_STD static UMat diag(const UMat& d, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat diag(const UMat& d) { return diag(d, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
//! returns deep copy of the matrix, i.e. the data is copied
UMat clone() const CV_NODISCARD;
CV_NODISCARD_STD UMat clone() const;
//! copies the matrix content to "m".
// It calls m.create(this->size(), this->type()).
void copyTo( OutputArray m ) const;
@ -2484,14 +2485,22 @@ public:
double dot(InputArray m) const;
//! Matlab-style matrix initialization
static UMat zeros(int rows, int cols, int type);
static UMat zeros(Size size, int type);
static UMat zeros(int ndims, const int* sz, int type);
static UMat ones(int rows, int cols, int type);
static UMat ones(Size size, int type);
static UMat ones(int ndims, const int* sz, int type);
static UMat eye(int rows, int cols, int type);
static UMat eye(Size size, int type);
CV_NODISCARD_STD static UMat zeros(int rows, int cols, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat zeros(Size size, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat zeros(int ndims, const int* sz, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat zeros(int rows, int cols, int type) { return zeros(rows, cols, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
CV_NODISCARD_STD static UMat zeros(Size size, int type) { return zeros(size, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
CV_NODISCARD_STD static UMat zeros(int ndims, const int* sz, int type) { return zeros(ndims, sz, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
CV_NODISCARD_STD static UMat ones(int rows, int cols, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat ones(Size size, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat ones(int ndims, const int* sz, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat ones(int rows, int cols, int type) { return ones(rows, cols, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
CV_NODISCARD_STD static UMat ones(Size size, int type) { return ones(size, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
CV_NODISCARD_STD static UMat ones(int ndims, const int* sz, int type) { return ones(ndims, sz, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
CV_NODISCARD_STD static UMat eye(int rows, int cols, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat eye(Size size, int type, UMatUsageFlags usageFlags /*= USAGE_DEFAULT*/);
CV_NODISCARD_STD static UMat eye(int rows, int cols, int type) { return eye(rows, cols, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
CV_NODISCARD_STD static UMat eye(Size size, int type) { return eye(size, type, USAGE_DEFAULT); } // OpenCV 5.0: remove abi compatibility overload
//! allocates new matrix data unless the matrix already has specified size and type.
// previous data is unreferenced if needed.
@ -2757,7 +2766,7 @@ public:
SparseMat& operator = (const Mat& m);
//! creates full copy of the matrix
SparseMat clone() const CV_NODISCARD;
CV_NODISCARD_STD SparseMat clone() const;
//! copies all the data to the destination matrix. All the previous content of m is erased
void copyTo( SparseMat& m ) const;
@ -2994,7 +3003,7 @@ public:
SparseMat_& operator = (const Mat& m);
//! makes full copy of the matrix. All the elements are duplicated
SparseMat_ clone() const CV_NODISCARD;
CV_NODISCARD_STD SparseMat_ clone() const;
//! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type)
void create(int dims, const int* _sizes);
//! converts sparse matrix to the old-style CvSparseMat. All the elements are copied

View File

@ -148,22 +148,22 @@ public:
Matx(std::initializer_list<_Tp>); //!< initialize from an initializer list
static Matx all(_Tp alpha);
static Matx zeros();
static Matx ones();
static Matx eye();
static Matx diag(const diag_type& d);
CV_NODISCARD_STD static Matx all(_Tp alpha);
CV_NODISCARD_STD static Matx zeros();
CV_NODISCARD_STD static Matx ones();
CV_NODISCARD_STD static Matx eye();
CV_NODISCARD_STD static Matx diag(const diag_type& d);
/** @brief Generates uniformly distributed random numbers
@param a Range boundary.
@param b The other range boundary (boundaries don't have to be ordered, the lower boundary is inclusive,
the upper one is exclusive).
*/
static Matx randu(_Tp a, _Tp b);
CV_NODISCARD_STD static Matx randu(_Tp a, _Tp b);
/** @brief Generates normally distributed random numbers
@param a Mean value.
@param b Standard deviation.
*/
static Matx randn(_Tp a, _Tp b);
CV_NODISCARD_STD static Matx randn(_Tp a, _Tp b);
//! dot product computed with the default precision
_Tp dot(const Matx<_Tp, m, n>& v) const;

View File

@ -235,7 +235,11 @@ public:
/**
* @param d OpenCL handle (cl_device_id). clRetainDevice() is called on success.
*/
*
* @note Ownership of the passed device is passed to OpenCV on success.
* The caller should additionally call `clRetainDevice` on it if it intends
* to continue using the device.
*/
static Device fromHandle(void* d);
struct Impl;
@ -495,8 +499,8 @@ public:
template<typename... _Tps> inline
Kernel& args(const _Tps&... kernel_args) { set_args_(0, kernel_args...); return *this; }
/** @brief Run the OpenCL kernel (globalsize value may be adjusted)
/** @brief Run the OpenCL kernel.
@param dims the work problem dimensions. It is the length of globalsize and localsize. It can be either 1, 2 or 3.
@param globalsize work items for each dimension. It is not the final globalsize passed to
OpenCL. Each dimension will be adjusted to the nearest integer divisible by the corresponding
@ -505,12 +509,26 @@ public:
@param localsize work-group size for each dimension.
@param sync specify whether to wait for OpenCL computation to finish before return.
@param q command queue
@note Use run_() if your kernel code doesn't support adjusted globalsize.
*/
bool run(int dims, size_t globalsize[],
size_t localsize[], bool sync, const Queue& q=Queue());
/** @brief Run the OpenCL kernel
*
* @param dims the work problem dimensions. It is the length of globalsize and localsize. It can be either 1, 2 or 3.
* @param globalsize work items for each dimension. This value is passed to OpenCL without changes.
* @param localsize work-group size for each dimension.
* @param sync specify whether to wait for OpenCL computation to finish before return.
* @param q command queue
*/
bool run_(int dims, size_t globalsize[], size_t localsize[], bool sync, const Queue& q=Queue());
bool runTask(bool sync, const Queue& q=Queue());
/** @brief Similar to synchronized run() call with returning of kernel execution time
/** @brief Similar to synchronized run_() call with returning of kernel execution time
*
* Separate OpenCL command queue may be used (with CL_QUEUE_PROFILING_ENABLE)
* @return Execution time in nanoseconds or negative number on error
*/
@ -826,11 +844,13 @@ public:
OpenCLExecutionContext cloneWithNewQueue() const;
/** @brief Creates OpenCL execution context
* OpenCV will check if available OpenCL platform has platformName name, then assign context to
* OpenCV and call `clRetainContext` function. The deviceID device will be used as target device and
* new command queue will be created.
* OpenCV will check if available OpenCL platform has platformName name,
* then assign context to OpenCV.
* The deviceID device will be used as target device and a new command queue will be created.
*
* @note Lifetime of passed handles is transferred to OpenCV wrappers on success
* @note On success, ownership of one reference of the context and device is taken.
* The caller should additionally call `clRetainContext` and/or `clRetainDevice`
* to increase the reference count if it wishes to continue using them.
*
* @param platformName name of OpenCL platform to attach, this string is used to check if platform is available to OpenCV at runtime
* @param platformID ID of platform attached context was created for (cl_platform_id)

View File

@ -144,6 +144,10 @@ static void dumpOpenCLInformation()
DUMP_MESSAGE_STDOUT(" Double support = " << doubleSupportStr);
DUMP_CONFIG_PROPERTY("cv_ocl_current_haveDoubleSupport", device.doubleFPConfig() > 0);
const char* halfSupportStr = device.halfFPConfig() > 0 ? "Yes" : "No";
DUMP_MESSAGE_STDOUT(" Half support = " << halfSupportStr);
DUMP_CONFIG_PROPERTY("cv_ocl_current_haveHalfSupport", device.halfFPConfig() > 0);
const char* isUnifiedMemoryStr = device.hostUnifiedMemory() ? "Yes" : "No";
DUMP_MESSAGE_STDOUT(" Host unified memory = " << isUnifiedMemoryStr);
DUMP_CONFIG_PROPERTY("cv_ocl_current_hostUnifiedMemory", device.hostUnifiedMemory());
@ -191,6 +195,9 @@ static void dumpOpenCLInformation()
DUMP_MESSAGE_STDOUT(" Preferred vector width double = " << device.preferredVectorWidthDouble());
DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthDouble", device.preferredVectorWidthDouble());
DUMP_MESSAGE_STDOUT(" Preferred vector width half = " << device.preferredVectorWidthHalf());
DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthHalf", device.preferredVectorWidthHalf());
}
catch (...)
{

View File

@ -38,7 +38,7 @@ static tbb::task_scheduler_init& getScheduler()
}
#endif
/** OpenMP parallel_for API implementation
/** TBB parallel_for API implementation
*
* @sa setParallelForBackend
* @ingroup core_parallel_backend

View File

@ -162,13 +162,23 @@ public:
//! default constructor
Point_();
Point_(_Tp _x, _Tp _y);
#if (defined(__GNUC__) && __GNUC__ < 5) // GCC 4.x bug. Details: https://github.com/opencv/opencv/pull/20837
Point_(const Point_& pt);
Point_(Point_&& pt) CV_NOEXCEPT;
Point_(Point_&& pt) CV_NOEXCEPT = default;
#elif OPENCV_ABI_COMPATIBILITY < 500
Point_(const Point_& pt) = default;
Point_(Point_&& pt) CV_NOEXCEPT = default;
#endif
Point_(const Size_<_Tp>& sz);
Point_(const Vec<_Tp, 2>& v);
#if (defined(__GNUC__) && __GNUC__ < 5) // GCC 4.x bug. Details: https://github.com/opencv/opencv/pull/20837
Point_& operator = (const Point_& pt);
Point_& operator = (Point_&& pt) CV_NOEXCEPT;
Point_& operator = (Point_&& pt) CV_NOEXCEPT = default;
#elif OPENCV_ABI_COMPATIBILITY < 500
Point_& operator = (const Point_& pt) = default;
Point_& operator = (Point_&& pt) CV_NOEXCEPT = default;
#endif
//! conversion to another data type
template<typename _Tp2> operator Point_<_Tp2>() const;
@ -244,13 +254,17 @@ public:
//! default constructor
Point3_();
Point3_(_Tp _x, _Tp _y, _Tp _z);
Point3_(const Point3_& pt);
Point3_(Point3_&& pt) CV_NOEXCEPT;
#if OPENCV_ABI_COMPATIBILITY < 500
Point3_(const Point3_& pt) = default;
Point3_(Point3_&& pt) CV_NOEXCEPT = default;
#endif
explicit Point3_(const Point_<_Tp>& pt);
Point3_(const Vec<_Tp, 3>& v);
Point3_& operator = (const Point3_& pt);
Point3_& operator = (Point3_&& pt) CV_NOEXCEPT;
#if OPENCV_ABI_COMPATIBILITY < 500
Point3_& operator = (const Point3_& pt) = default;
Point3_& operator = (Point3_&& pt) CV_NOEXCEPT = default;
#endif
//! conversion to another data type
template<typename _Tp2> operator Point3_<_Tp2>() const;
//! conversion to cv::Vec<>
@ -320,12 +334,16 @@ public:
//! default constructor
Size_();
Size_(_Tp _width, _Tp _height);
Size_(const Size_& sz);
Size_(Size_&& sz) CV_NOEXCEPT;
#if OPENCV_ABI_COMPATIBILITY < 500
Size_(const Size_& sz) = default;
Size_(Size_&& sz) CV_NOEXCEPT = default;
#endif
Size_(const Point_<_Tp>& pt);
Size_& operator = (const Size_& sz);
Size_& operator = (Size_&& sz) CV_NOEXCEPT;
#if OPENCV_ABI_COMPATIBILITY < 500
Size_& operator = (const Size_& sz) = default;
Size_& operator = (Size_&& sz) CV_NOEXCEPT = default;
#endif
//! the area (width*height)
_Tp area() const;
//! aspect ratio (width/height)
@ -425,13 +443,17 @@ public:
//! default constructor
Rect_();
Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height);
Rect_(const Rect_& r);
Rect_(Rect_&& r) CV_NOEXCEPT;
#if OPENCV_ABI_COMPATIBILITY < 500
Rect_(const Rect_& r) = default;
Rect_(Rect_&& r) CV_NOEXCEPT = default;
#endif
Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz);
Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2);
Rect_& operator = ( const Rect_& r );
Rect_& operator = ( Rect_&& r ) CV_NOEXCEPT;
#if OPENCV_ABI_COMPATIBILITY < 500
Rect_& operator = (const Rect_& r) = default;
Rect_& operator = (Rect_&& r) CV_NOEXCEPT = default;
#endif
//! the top-left corner
Point_<_Tp> tl() const;
//! the bottom-right corner
@ -1165,13 +1187,11 @@ template<typename _Tp> inline
Point_<_Tp>::Point_(_Tp _x, _Tp _y)
: x(_x), y(_y) {}
#if (defined(__GNUC__) && __GNUC__ < 5) // GCC 4.x bug. Details: https://github.com/opencv/opencv/pull/20837
template<typename _Tp> inline
Point_<_Tp>::Point_(const Point_& pt)
: x(pt.x), y(pt.y) {}
template<typename _Tp> inline
Point_<_Tp>::Point_(Point_&& pt) CV_NOEXCEPT
: x(std::move(pt.x)), y(std::move(pt.y)) {}
#endif
template<typename _Tp> inline
Point_<_Tp>::Point_(const Size_<_Tp>& sz)
@ -1181,19 +1201,14 @@ template<typename _Tp> inline
Point_<_Tp>::Point_(const Vec<_Tp,2>& v)
: x(v[0]), y(v[1]) {}
#if (defined(__GNUC__) && __GNUC__ < 5) // GCC 4.x bug. Details: https://github.com/opencv/opencv/pull/20837
template<typename _Tp> inline
Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt)
{
x = pt.x; y = pt.y;
return *this;
}
template<typename _Tp> inline
Point_<_Tp>& Point_<_Tp>::operator = (Point_&& pt) CV_NOEXCEPT
{
x = std::move(pt.x); y = std::move(pt.y);
return *this;
}
#endif
template<typename _Tp> template<typename _Tp2> inline
Point_<_Tp>::operator Point_<_Tp2>() const
@ -1432,14 +1447,6 @@ template<typename _Tp> inline
Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z)
: x(_x), y(_y), z(_z) {}
template<typename _Tp> inline
Point3_<_Tp>::Point3_(const Point3_& pt)
: x(pt.x), y(pt.y), z(pt.z) {}
template<typename _Tp> inline
Point3_<_Tp>::Point3_(Point3_&& pt) CV_NOEXCEPT
: x(std::move(pt.x)), y(std::move(pt.y)), z(std::move(pt.z)) {}
template<typename _Tp> inline
Point3_<_Tp>::Point3_(const Point_<_Tp>& pt)
: x(pt.x), y(pt.y), z(_Tp()) {}
@ -1460,20 +1467,6 @@ Point3_<_Tp>::operator Vec<_Tp, 3>() const
return Vec<_Tp, 3>(x, y, z);
}
template<typename _Tp> inline
Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt)
{
x = pt.x; y = pt.y; z = pt.z;
return *this;
}
template<typename _Tp> inline
Point3_<_Tp>& Point3_<_Tp>::operator = (Point3_&& pt) CV_NOEXCEPT
{
x = std::move(pt.x); y = std::move(pt.y); z = std::move(pt.z);
return *this;
}
template<typename _Tp> inline
_Tp Point3_<_Tp>::dot(const Point3_& pt) const
{
@ -1686,14 +1679,6 @@ template<typename _Tp> inline
Size_<_Tp>::Size_(_Tp _width, _Tp _height)
: width(_width), height(_height) {}
template<typename _Tp> inline
Size_<_Tp>::Size_(const Size_& sz)
: width(sz.width), height(sz.height) {}
template<typename _Tp> inline
Size_<_Tp>::Size_(Size_&& sz) CV_NOEXCEPT
: width(std::move(sz.width)), height(std::move(sz.height)) {}
template<typename _Tp> inline
Size_<_Tp>::Size_(const Point_<_Tp>& pt)
: width(pt.x), height(pt.y) {}
@ -1704,20 +1689,6 @@ Size_<_Tp>::operator Size_<_Tp2>() const
return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height));
}
template<typename _Tp> inline
Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz)
{
width = sz.width; height = sz.height;
return *this;
}
template<typename _Tp> inline
Size_<_Tp>& Size_<_Tp>::operator = (Size_<_Tp>&& sz) CV_NOEXCEPT
{
width = std::move(sz.width); height = std::move(sz.height);
return *this;
}
template<typename _Tp> inline
_Tp Size_<_Tp>::area() const
{
@ -1828,14 +1799,6 @@ template<typename _Tp> inline
Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height)
: x(_x), y(_y), width(_width), height(_height) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(const Rect_<_Tp>& r)
: x(r.x), y(r.y), width(r.width), height(r.height) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(Rect_<_Tp>&& r) CV_NOEXCEPT
: x(std::move(r.x)), y(std::move(r.y)), width(std::move(r.width)), height(std::move(r.height)) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz)
: x(org.x), y(org.y), width(sz.width), height(sz.height) {}
@ -1849,26 +1812,6 @@ Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2)
height = std::max(pt1.y, pt2.y) - y;
}
template<typename _Tp> inline
Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r )
{
x = r.x;
y = r.y;
width = r.width;
height = r.height;
return *this;
}
template<typename _Tp> inline
Rect_<_Tp>& Rect_<_Tp>::operator = ( Rect_<_Tp>&& r ) CV_NOEXCEPT
{
x = std::move(r.x);
y = std::move(r.y);
width = std::move(r.width);
height = std::move(r.height);
return *this;
}
template<typename _Tp> inline
Point_<_Tp> Rect_<_Tp>::tl() const
{

View File

@ -714,9 +714,27 @@ void Mat::forEach_impl(const Functor& operation) {
/////////////////////////// Synchronization Primitives ///////////////////////////////
#if !defined(_M_CEE)
#ifndef OPENCV_DISABLE_THREAD_SUPPORT
typedef std::recursive_mutex Mutex;
typedef std::lock_guard<cv::Mutex> AutoLock;
#endif
#else // OPENCV_DISABLE_THREAD_SUPPORT
// Custom (failing) implementation of `std::recursive_mutex`.
struct Mutex {
void lock(){
CV_Error(cv::Error::StsNotImplemented,
"cv::Mutex is disabled by OPENCV_DISABLE_THREAD_SUPPORT=ON");
}
void unlock(){
CV_Error(cv::Error::StsNotImplemented,
"cv::Mutex is disabled by OPENCV_DISABLE_THREAD_SUPPORT=ON");
}
};
// Stub for cv::AutoLock when threads are disabled.
struct AutoLock {
AutoLock(Mutex &) { }
};
#endif // OPENCV_DISABLE_THREAD_SUPPORT
#endif // !defined(_M_CEE)
/** @brief Designed for command line parsing

View File

@ -16,8 +16,8 @@
# define OPENCV_HAVE_FILESYSTEM_SUPPORT 1
# elif defined(__APPLE__)
# include <TargetConditionals.h>
# if (defined(TARGET_OS_OSX) && TARGET_OS_OSX) || (!defined(TARGET_OS_OSX) && !TARGET_OS_IPHONE)
# define OPENCV_HAVE_FILESYSTEM_SUPPORT 1 // OSX only
# if (defined(TARGET_OS_OSX) && TARGET_OS_OSX) || (defined(TARGET_OS_IOS) && TARGET_OS_IOS)
# define OPENCV_HAVE_FILESYSTEM_SUPPORT 1 // OSX, iOS only
# endif
# else
/* unknown */

View File

@ -466,16 +466,32 @@ public class Mat {
// C++: Mat Mat::mul(Mat m, double scale = 1)
//
// javadoc: Mat::mul(m, scale)
/**
* Element-wise multiplication with scale factor
* @param m operand with with which to perform element-wise multiplication
* @param scale scale factor
*/
public Mat mul(Mat m, double scale) {
return new Mat(n_mul(nativeObj, m.nativeObj, scale));
}
// javadoc: Mat::mul(m)
/**
* Element-wise multiplication
* @param m operand with with which to perform element-wise multiplication
*/
public Mat mul(Mat m) {
return new Mat(n_mul(nativeObj, m.nativeObj));
}
/**
* Matrix multiplication
* @param m operand with with which to perform matrix multiplication
* @see Core#gemm(Mat, Mat, double, Mat, double, Mat, int)
*/
public Mat matMul(Mat m) {
return new Mat(n_matMul(nativeObj, m.nativeObj));
}
//
// C++: static Mat Mat::ones(int rows, int cols, int type)
//
@ -1732,6 +1748,8 @@ public class Mat {
private static native long n_mul(long nativeObj, long m_nativeObj);
private static native long n_matMul(long nativeObj, long m_nativeObj);
// C++: static Mat Mat::ones(int rows, int cols, int type)
private static native long n_ones(int rows, int cols, int type);

View File

@ -3,6 +3,16 @@ package org.opencv.core
import org.opencv.core.Mat.*
import java.lang.RuntimeException
fun Mat.get(row: Int, col: Int, data: UByteArray) = this.get(row, col, data.asByteArray())
fun Mat.get(indices: IntArray, data: UByteArray) = this.get(indices, data.asByteArray())
fun Mat.put(row: Int, col: Int, data: UByteArray) = this.put(row, col, data.asByteArray())
fun Mat.put(indices: IntArray, data: UByteArray) = this.put(indices, data.asByteArray())
fun Mat.get(row: Int, col: Int, data: UShortArray) = this.get(row, col, data.asShortArray())
fun Mat.get(indices: IntArray, data: UShortArray) = this.get(indices, data.asShortArray())
fun Mat.put(row: Int, col: Int, data: UShortArray) = this.put(row, col, data.asShortArray())
fun Mat.put(indices: IntArray, data: UShortArray) = this.put(indices, data.asShortArray())
/***
* Example use:
*
@ -19,6 +29,7 @@ inline fun <reified T> Mat.at(row: Int, col: Int) : Atable<T> =
col
)
UByte::class -> AtableUByte(this, row, col) as Atable<T>
UShort::class -> AtableUShort(this, row, col) as Atable<T>
else -> throw RuntimeException("Unsupported class type")
}
@ -30,6 +41,7 @@ inline fun <reified T> Mat.at(idx: IntArray) : Atable<T> =
idx
)
UByte::class -> AtableUByte(this, idx) as Atable<T>
UShort::class -> AtableUShort(this, idx) as Atable<T>
else -> throw RuntimeException("Unsupported class type")
}
@ -38,46 +50,95 @@ class AtableUByte(val mat: Mat, val indices: IntArray): Atable<UByte> {
constructor(mat: Mat, row: Int, col: Int) : this(mat, intArrayOf(row, col))
override fun getV(): UByte {
val data = ByteArray(1)
mat[indices, data]
return data[0].toUByte()
val data = UByteArray(1)
mat.get(indices, data)
return data[0]
}
override fun setV(v: UByte) {
val data = byteArrayOf(v.toByte())
val data = ubyteArrayOf(v)
mat.put(indices, data)
}
override fun getV2c(): Tuple2<UByte> {
val data = ByteArray(2)
mat[indices, data]
return Tuple2(data[0].toUByte(), data[1].toUByte())
val data = UByteArray(2)
mat.get(indices, data)
return Tuple2(data[0], data[1])
}
override fun setV2c(v: Tuple2<UByte>) {
val data = byteArrayOf(v._0.toByte(), v._1.toByte())
val data = ubyteArrayOf(v._0, v._1)
mat.put(indices, data)
}
override fun getV3c(): Tuple3<UByte> {
val data = ByteArray(3)
mat[indices, data]
return Tuple3(data[0].toUByte(), data[1].toUByte(), data[2].toUByte())
val data = UByteArray(3)
mat.get(indices, data)
return Tuple3(data[0], data[1], data[2])
}
override fun setV3c(v: Tuple3<UByte>) {
val data = byteArrayOf(v._0.toByte(), v._1.toByte(), v._2.toByte())
val data = ubyteArrayOf(v._0, v._1, v._2)
mat.put(indices, data)
}
override fun getV4c(): Tuple4<UByte> {
val data = ByteArray(4)
mat[indices, data]
return Tuple4(data[0].toUByte(), data[1].toUByte(), data[2].toUByte(), data[3].toUByte())
val data = UByteArray(4)
mat.get(indices, data)
return Tuple4(data[0], data[1], data[2], data[3])
}
override fun setV4c(v: Tuple4<UByte>) {
val data = byteArrayOf(v._0.toByte(), v._1.toByte(), v._2.toByte(), v._3.toByte())
val data = ubyteArrayOf(v._0, v._1, v._2, v._3)
mat.put(indices, data)
}
}
class AtableUShort(val mat: Mat, val indices: IntArray): Atable<UShort> {
constructor(mat: Mat, row: Int, col: Int) : this(mat, intArrayOf(row, col))
override fun getV(): UShort {
val data = UShortArray(1)
mat.get(indices, data)
return data[0]
}
override fun setV(v: UShort) {
val data = ushortArrayOf(v)
mat.put(indices, data)
}
override fun getV2c(): Tuple2<UShort> {
val data = UShortArray(2)
mat.get(indices, data)
return Tuple2(data[0], data[1])
}
override fun setV2c(v: Tuple2<UShort>) {
val data = ushortArrayOf(v._0, v._1)
mat.put(indices, data)
}
override fun getV3c(): Tuple3<UShort> {
val data = UShortArray(3)
mat.get(indices, data)
return Tuple3(data[0], data[1], data[2])
}
override fun setV3c(v: Tuple3<UShort>) {
val data = ushortArrayOf(v._0, v._1, v._2)
mat.put(indices, data)
}
override fun getV4c(): Tuple4<UShort> {
val data = UShortArray(4)
mat.get(indices, data)
return Tuple4(data[0], data[1], data[2], data[3])
}
override fun setV4c(v: Tuple4<UShort>) {
val data = ushortArrayOf(v._0, v._1, v._2, v._3)
mat.put(indices, data)
}
}

View File

@ -0,0 +1,3 @@
package org.opencv.core
operator fun Mat.times(other: Mat): Mat = this.matMul(other)

View File

@ -686,6 +686,16 @@ public class MatTest extends OpenCVTestCase {
assertMatEqual(truth, dst, EPS);
}
public void testMatMulMat() {
Mat m1 = new Mat(2, 2, CvType.CV_32F, new Scalar(2));
Mat m2 = new Mat(2, 2, CvType.CV_32F, new Scalar(3));
dst = m1.matMul(m2);
truth = new Mat(2, 2, CvType.CV_32F, new Scalar(12));
assertMatEqual(truth, dst, EPS);
}
public void testOnesIntIntInt() {
dst = Mat.ones(matSize, matSize, CvType.CV_32F);

View File

@ -114,7 +114,17 @@ CV_EXPORTS @interface Mat : NSObject
- (BOOL)isSubmatrix;
- (void)locateROI:(Size2i*)wholeSize ofs:(Point2i*)offset NS_SWIFT_NAME(locateROI(wholeSize:offset:));
- (Mat*)mul:(Mat*)mat scale:(double)scale;
/**
Performs element-wise multiplication
@param mat operand with with which to perform element-wise multiplication
*/
- (Mat*)mul:(Mat*)mat;
/**
Performs matrix multiplication
@param mat operand with with which to perform matrix multiplication
@see `Core.gemm(...)`
*/
- (Mat*)matMul:(Mat*)mat;
+ (Mat*)ones:(int)rows cols:(int)cols type:(int)type NS_SWIFT_NAME(ones(rows:cols:type:));
+ (Mat*)ones:(Size2i*)size type:(int)type NS_SWIFT_NAME(ones(size:type:));
+ (Mat*)onesEx:(NSArray<NSNumber*>*)sizes type:(int)type NS_SWIFT_NAME(ones(sizes:type:));

View File

@ -13,25 +13,28 @@
#import "CvType.h"
#import "CVObjcUtil.h"
// return true if we have reached the final index
static bool incIdx(cv::Mat* mat, std::vector<int>& indices) {
for (int dim = mat->dims-1; dim>=0; dim--) {
indices[dim] = (indices[dim] + 1) % mat->size[dim];
if (indices[dim] != 0) {
return false;
}
static int idx2Offset(cv::Mat* mat, std::vector<int>& indices) {
int offset = indices[0];
for (int dim=1; dim < mat->dims; dim++) {
offset = offset*mat->size[dim] + indices[dim];
}
return offset;
}
static void offset2Idx(cv::Mat* mat, size_t offset, std::vector<int>& indices) {
for (int dim=mat->dims-1; dim>=0; dim--) {
indices[dim] = offset % mat->size[dim];
offset = (offset - indices[dim]) / mat->size[dim];
}
return true;
}
// returns true if final index was reached
static bool updateIdx(cv::Mat* mat, std::vector<int>& indices, int inc) {
for (int index = 0; index < inc; index++) {
if (incIdx(mat, indices)) {
return true;
}
}
return false;
static bool updateIdx(cv::Mat* mat, std::vector<int>& indices, size_t inc) {
size_t currentOffset = idx2Offset(mat, indices);
size_t newOffset = currentOffset + inc;
bool reachedEnd = newOffset>=(size_t)mat->total();
offset2Idx(mat, reachedEnd?0:newOffset, indices);
return reachedEnd;
}
@implementation Mat {
@ -369,6 +372,11 @@ static bool updateIdx(cv::Mat* mat, std::vector<int>& indices, int inc) {
return [[Mat alloc] initWithNativeMat:new cv::Mat(_nativePtr->mul(*(cv::Mat*)mat.nativePtr))];
}
- (Mat*)matMul:(Mat*)mat {
cv::Mat temp = self.nativeRef * mat.nativeRef;
return [Mat fromNative:temp];
}
+ (Mat*)ones:(int)rows cols:(int)cols type:(int)type {
return [[Mat alloc] initWithNativeMat:new cv::Mat(cv::Mat::ones(rows, cols, type))];
}
@ -548,7 +556,7 @@ template<typename T> void putData(uchar* dataDest, int count, T (^readData)(int)
if (depth == CV_8U) {
putData(dest, count, ^uchar (int index) { return cv::saturate_cast<uchar>(data[offset + index].doubleValue);} );
} else if (depth == CV_8S) {
putData(dest, count, ^char (int index) { return cv::saturate_cast<char>(data[offset + index].doubleValue);} );
putData(dest, count, ^schar (int index) { return cv::saturate_cast<schar>(data[offset + index].doubleValue);} );
} else if (depth == CV_16U) {
putData(dest, count, ^ushort (int index) { return cv::saturate_cast<ushort>(data[offset + index].doubleValue);} );
} else if (depth == CV_16S) {
@ -724,22 +732,31 @@ template<typename T> int getData(NSArray<NSNumber*>* indices, cv::Mat* mat, int
}
int arrayAvailable = count;
size_t countBytes = count * sizeof(T);
size_t remainingBytes = (size_t)(mat->total() - idx2Offset(mat, tempIndices))*mat->elemSize();
countBytes = (countBytes>remainingBytes)?remainingBytes:countBytes;
int result = (int)countBytes;
int matAvailable = getMatAvailable(mat, tempIndices);
int available = MIN(arrayAvailable, matAvailable);
int result = (int)(available * mat->elemSize() / mat->channels());
if (mat->isContinuous()) {
memcpy(tBuffer, mat->ptr(tempIndices.data()), available * sizeof(T));
} else {
int copyOffset = 0;
int copyCount = MIN((mat->size[mat->dims - 1] - tempIndices[mat->dims - 1]) * mat->channels(), available);
while (available > 0) {
memcpy(tBuffer + copyOffset, mat->ptr(tempIndices.data()), copyCount * sizeof(T));
if (updateIdx(mat, tempIndices, copyCount / mat->channels())) {
break;
}
available -= copyCount;
copyOffset += copyCount * sizeof(T);
copyCount = MIN(mat->size[mat->dims-1] * mat->channels(), available);
char* buff = (char*)tBuffer;
size_t blockSize = mat->size[mat->dims-1] * mat->elemSize();
size_t firstPartialBlockSize = (mat->size[mat->dims-1] - tempIndices[mat->dims-1]) * mat->step[mat->dims-1];
for (int dim=mat->dims-2; dim>=0 && blockSize == mat->step[dim]; dim--) {
blockSize *= mat->size[dim];
firstPartialBlockSize += (mat->size[dim] - (tempIndices[dim]+1)) * mat->step[dim];
}
size_t copyCount = (countBytes<firstPartialBlockSize)?countBytes:firstPartialBlockSize;
uchar* data = mat->ptr(tempIndices.data());
while(countBytes>0) {
memcpy(buff, data, copyCount);
updateIdx(mat, tempIndices, copyCount / mat->elemSize());
countBytes -= copyCount;
buff += copyCount;
copyCount = countBytes<blockSize?countBytes:blockSize;
data = mat->ptr(tempIndices.data());
}
}
return result;
@ -817,22 +834,31 @@ template<typename T> int putData(NSArray<NSNumber*>* indices, cv::Mat* mat, int
}
int arrayAvailable = count;
size_t countBytes = count * sizeof(T);
size_t remainingBytes = (size_t)(mat->total() - idx2Offset(mat, tempIndices))*mat->elemSize();
countBytes = (countBytes>remainingBytes)?remainingBytes:countBytes;
int result = (int)countBytes;
int matAvailable = getMatAvailable(mat, tempIndices);
int available = MIN(arrayAvailable, matAvailable);
int result = (int)(available * mat->elemSize() / mat->channels());
if (mat->isContinuous()) {
memcpy(mat->ptr(tempIndices.data()), tBuffer, available * sizeof(T));
} else {
int copyOffset = 0;
int copyCount = MIN((mat->size[mat->dims - 1] - tempIndices[mat->dims - 1]) * mat->channels(), available);
while (available > 0) {
memcpy(mat->ptr(tempIndices.data()), tBuffer + copyOffset, copyCount * sizeof(T));
if (updateIdx(mat, tempIndices, copyCount / mat->channels())) {
break;
}
available -= copyCount;
copyOffset += copyCount * sizeof(T);
copyCount = MIN(mat->size[mat->dims-1] * (int)mat->channels(), available);
char* buff = (char*)tBuffer;
size_t blockSize = mat->size[mat->dims-1] * mat->elemSize();
size_t firstPartialBlockSize = (mat->size[mat->dims-1] - tempIndices[mat->dims-1]) * mat->step[mat->dims-1];
for (int dim=mat->dims-2; dim>=0 && blockSize == mat->step[dim]; dim--) {
blockSize *= mat->size[dim];
firstPartialBlockSize += (mat->size[dim] - (tempIndices[dim]+1)) * mat->step[dim];
}
size_t copyCount = (countBytes<firstPartialBlockSize)?countBytes:firstPartialBlockSize;
uchar* data = mat->ptr(tempIndices.data());
while(countBytes>0){
memcpy(data, buff, copyCount);
updateIdx(mat, tempIndices, copyCount / mat->elemSize());
countBytes -= copyCount;
buff += copyCount;
copyCount = countBytes<blockSize?countBytes:blockSize;
data = mat->ptr(tempIndices.data());
}
}
return result;

View File

@ -62,6 +62,21 @@ public extension Mat {
}
}
@discardableResult func get(indices:[Int32], data:inout [UInt8]) throws -> Int32 {
let channels = CvType.channels(Int32(type()))
if Int32(data.count) % channels != 0 {
try throwIncompatibleBufferSize(count: data.count, channels: channels)
} else if depth() != CvType.CV_8U {
try throwIncompatibleDataType(typeName: CvType.type(toString: type()))
}
let count = Int32(data.count)
return data.withUnsafeMutableBufferPointer { body in
body.withMemoryRebound(to: Int8.self) { reboundBody in
return __get(indices as [NSNumber], count: count, byteBuffer: reboundBody.baseAddress!)
}
}
}
@discardableResult func get(indices:[Int32], data:inout [Double]) throws -> Int32 {
let channels = CvType.channels(Int32(type()))
if Int32(data.count) % channels != 0 {
@ -114,10 +129,29 @@ public extension Mat {
}
}
@discardableResult func get(indices:[Int32], data:inout [UInt16]) throws -> Int32 {
let channels = CvType.channels(Int32(type()))
if Int32(data.count) % channels != 0 {
try throwIncompatibleBufferSize(count: data.count, channels: channels)
} else if depth() != CvType.CV_16U {
try throwIncompatibleDataType(typeName: CvType.type(toString: type()))
}
let count = Int32(data.count)
return data.withUnsafeMutableBufferPointer { body in
body.withMemoryRebound(to: Int16.self) { reboundBody in
return __get(indices as [NSNumber], count: count, shortBuffer: reboundBody.baseAddress!)
}
}
}
@discardableResult func get(row: Int32, col: Int32, data:inout [Int8]) throws -> Int32 {
return try get(indices: [row, col], data: &data)
}
@discardableResult func get(row: Int32, col: Int32, data:inout [UInt8]) throws -> Int32 {
return try get(indices: [row, col], data: &data)
}
@discardableResult func get(row: Int32, col: Int32, data:inout [Double]) throws -> Int32 {
return try get(indices: [row, col], data: &data)
}
@ -134,6 +168,10 @@ public extension Mat {
return try get(indices: [row, col], data: &data)
}
@discardableResult func get(row: Int32, col: Int32, data:inout [UInt16]) throws -> Int32 {
return try get(indices: [row, col], data: &data)
}
@discardableResult func put(indices:[Int32], data:[Int8]) throws -> Int32 {
let channels = CvType.channels(Int32(type()))
if Int32(data.count) % channels != 0 {
@ -147,6 +185,21 @@ public extension Mat {
}
}
@discardableResult func put(indices:[Int32], data:[UInt8]) throws -> Int32 {
let channels = CvType.channels(Int32(type()))
if Int32(data.count) % channels != 0 {
try throwIncompatibleBufferSize(count: data.count, channels: channels)
} else if depth() != CvType.CV_8U {
try throwIncompatibleDataType(typeName: CvType.type(toString: type()))
}
let count = Int32(data.count)
return data.withUnsafeBufferPointer { body in
body.withMemoryRebound(to: Int8.self) { reboundBody in
return __put(indices as [NSNumber], count: count, byteBuffer: reboundBody.baseAddress!)
}
}
}
@discardableResult func put(indices:[Int32], data:[Int8], offset: Int, length: Int32) throws -> Int32 {
let channels = CvType.channels(Int32(type()))
if Int32(data.count) % channels != 0 {
@ -214,10 +267,29 @@ public extension Mat {
}
}
@discardableResult func put(indices:[Int32], data:[UInt16]) throws -> Int32 {
let channels = CvType.channels(Int32(type()))
if Int32(data.count) % channels != 0 {
try throwIncompatibleBufferSize(count: data.count, channels: channels)
} else if depth() != CvType.CV_16U {
try throwIncompatibleDataType(typeName: CvType.type(toString: type()))
}
let count = Int32(data.count)
return data.withUnsafeBufferPointer { body in
body.withMemoryRebound(to: Int16.self) { reboundBody in
return __put(indices as [NSNumber], count: count, shortBuffer: reboundBody.baseAddress!)
}
}
}
@discardableResult func put(row: Int32, col: Int32, data:[Int8]) throws -> Int32 {
return try put(indices: [row, col], data: data)
}
@discardableResult func put(row: Int32, col: Int32, data:[UInt8]) throws -> Int32 {
return try put(indices: [row, col], data: data)
}
@discardableResult func put(row: Int32, col: Int32, data: [Int8], offset: Int, length: Int32) throws -> Int32 {
return try put(indices: [row, col], data: data, offset: offset, length: length)
}
@ -238,6 +310,10 @@ public extension Mat {
return try put(indices: [row, col], data: data)
}
@discardableResult func put(row: Int32, col: Int32, data: [UInt16]) throws -> Int32 {
return try put(indices: [row, col], data: data)
}
@discardableResult func get(row: Int32, col: Int32) -> [Double] {
return get(indices: [row, col])
}
@ -303,46 +379,46 @@ public class MatAt<N: Atable> {
extension UInt8: Atable {
public static func getAt(m: Mat, indices:[Int32]) -> UInt8 {
var tmp = [Int8](repeating: 0, count: 1)
var tmp = [UInt8](repeating: 0, count: 1)
try! m.get(indices: indices, data: &tmp)
return UInt8(bitPattern: tmp[0])
return tmp[0]
}
public static func putAt(m: Mat, indices: [Int32], v: UInt8) {
let tmp = [Int8(bitPattern: v)]
let tmp = [v]
try! m.put(indices: indices, data: tmp)
}
public static func getAt2c(m: Mat, indices:[Int32]) -> (UInt8, UInt8) {
var tmp = [Int8](repeating: 0, count: 2)
var tmp = [UInt8](repeating: 0, count: 2)
try! m.get(indices: indices, data: &tmp)
return (UInt8(bitPattern: tmp[0]), UInt8(bitPattern: tmp[1]))
return (tmp[0], tmp[1])
}
public static func putAt2c(m: Mat, indices: [Int32], v: (UInt8, UInt8)) {
let tmp = [Int8(bitPattern: v.0), Int8(bitPattern: v.1)]
let tmp = [v.0, v.1]
try! m.put(indices: indices, data: tmp)
}
public static func getAt3c(m: Mat, indices:[Int32]) -> (UInt8, UInt8, UInt8) {
var tmp = [Int8](repeating: 0, count: 3)
var tmp = [UInt8](repeating: 0, count: 3)
try! m.get(indices: indices, data: &tmp)
return (UInt8(bitPattern: tmp[0]), UInt8(bitPattern: tmp[1]), UInt8(bitPattern: tmp[2]))
return (tmp[0], tmp[1], tmp[2])
}
public static func putAt3c(m: Mat, indices: [Int32], v: (UInt8, UInt8, UInt8)) {
let tmp = [Int8(bitPattern: v.0), Int8(bitPattern: v.1), Int8(bitPattern: v.2)]
let tmp = [v.0, v.1, v.2]
try! m.put(indices: indices, data: tmp)
}
public static func getAt4c(m: Mat, indices:[Int32]) -> (UInt8, UInt8, UInt8, UInt8) {
var tmp = [Int8](repeating: 0, count: 4)
var tmp = [UInt8](repeating: 0, count: 4)
try! m.get(indices: indices, data: &tmp)
return (UInt8(bitPattern: tmp[0]), UInt8(bitPattern: tmp[1]), UInt8(bitPattern: tmp[2]), UInt8(bitPattern: tmp[3]))
return (tmp[0], tmp[1], tmp[2], tmp[3])
}
public static func putAt4c(m: Mat, indices: [Int32], v: (UInt8, UInt8, UInt8, UInt8)) {
let tmp = [Int8(bitPattern: v.0), Int8(bitPattern: v.1), Int8(bitPattern: v.2), Int8(bitPattern: v.3)]
let tmp = [v.0, v.1, v.2, v.3]
try! m.put(indices: indices, data: tmp)
}
}
@ -531,6 +607,52 @@ extension Int32: Atable {
}
}
extension UInt16: Atable {
public static func getAt(m: Mat, indices:[Int32]) -> UInt16 {
var tmp = [UInt16](repeating: 0, count: 1)
try! m.get(indices: indices, data: &tmp)
return tmp[0]
}
public static func putAt(m: Mat, indices: [Int32], v: UInt16) {
let tmp = [v]
try! m.put(indices: indices, data: tmp)
}
public static func getAt2c(m: Mat, indices:[Int32]) -> (UInt16, UInt16) {
var tmp = [UInt16](repeating: 0, count: 2)
try! m.get(indices: indices, data: &tmp)
return (tmp[0], tmp[1])
}
public static func putAt2c(m: Mat, indices: [Int32], v: (UInt16, UInt16)) {
let tmp = [v.0, v.1]
try! m.put(indices: indices, data: tmp)
}
public static func getAt3c(m: Mat, indices:[Int32]) -> (UInt16, UInt16, UInt16) {
var tmp = [UInt16](repeating: 0, count: 3)
try! m.get(indices: indices, data: &tmp)
return (tmp[0], tmp[1], tmp[2])
}
public static func putAt3c(m: Mat, indices: [Int32], v: (UInt16, UInt16, UInt16)) {
let tmp = [v.0, v.1, v.2]
try! m.put(indices: indices, data: tmp)
}
public static func getAt4c(m: Mat, indices:[Int32]) -> (UInt16, UInt16, UInt16, UInt16) {
var tmp = [UInt16](repeating: 0, count: 4)
try! m.get(indices: indices, data: &tmp)
return (tmp[0], tmp[1], tmp[2], tmp[3])
}
public static func putAt4c(m: Mat, indices: [Int32], v: (UInt16, UInt16, UInt16, UInt16)) {
let tmp = [v.0, v.1, v.2, v.3]
try! m.put(indices: indices, data: tmp)
}
}
extension Int16: Atable {
public static func getAt(m: Mat, indices:[Int32]) -> Int16 {
var tmp = [Int16](repeating: 0, count: 1)
@ -593,3 +715,9 @@ public extension Mat {
return MatAt(mat: self, indices: indices)
}
}
public extension Mat {
static func *(lhs:Mat, rhs: Mat) -> Mat {
return lhs.matMul(rhs)
}
}

View File

@ -64,10 +64,10 @@
}
- (instancetype)initWithPoint:(Point2d*)point1 point:(Point2d*)point2 {
int x = (point1.x < point2.x ? point1.x : point2.x);
int y = (point1.y < point2.y ? point1.y : point2.y);
int width = (point1.x > point2.x ? point1.x : point2.x) - x;
int height = (point1.y > point2.y ? point1.y : point2.y) - y;
double x = (point1.x < point2.x ? point1.x : point2.x);
double y = (point1.y < point2.y ? point1.y : point2.y);
double width = (point1.x > point2.x ? point1.x : point2.x) - x;
double height = (point1.y > point2.y ? point1.y : point2.y) - y;
return [self initWithX:x y:y width:width height:height];
}

View File

@ -64,10 +64,10 @@
}
- (instancetype)initWithPoint:(Point2f*)point1 point:(Point2f*)point2 {
int x = (point1.x < point2.x ? point1.x : point2.x);
int y = (point1.y < point2.y ? point1.y : point2.y);
int width = (point1.x > point2.x ? point1.x : point2.x) - x;
int height = (point1.y > point2.y ? point1.y : point2.y) - y;
float x = (point1.x < point2.x ? point1.x : point2.x);
float y = (point1.y < point2.y ? point1.y : point2.y);
float width = (point1.x > point2.x ? point1.x : point2.x) - x;
float height = (point1.y > point2.y ? point1.y : point2.y) - y;
return [self initWithX:x y:y width:width height:height];
}

View File

@ -308,15 +308,15 @@ class MatTests: OpenCVTestCase {
XCTAssert([340] == sm.get(row: 1, col: 1))
}
func testGetIntIntByteArray() throws {
let m = try getTestMat(size: 5, type: CvType.CV_8UC3)
func testGetIntIntInt8Array() throws {
let m = try getTestMat(size: 5, type: CvType.CV_8SC3)
var goodData = [Int8](repeating: 0, count: 9)
// whole Mat
var bytesNum = try m.get(row: 1, col: 1, data: &goodData)
XCTAssertEqual(9, bytesNum)
XCTAssert([110, 111, 112, 120, 121, 122, -126, -125, -124] == goodData)
XCTAssert([110, 111, 112, 120, 121, 122, 127, 127, 127] == goodData)
var badData = [Int8](repeating: 0, count: 7)
XCTAssertThrowsError(bytesNum = try m.get(row: 0, col: 0, data: &badData))
@ -326,11 +326,36 @@ class MatTests: OpenCVTestCase {
var buff00 = [Int8](repeating: 0, count: 3)
bytesNum = try sm.get(row: 0, col: 0, data: &buff00)
XCTAssertEqual(3, bytesNum)
XCTAssert(buff00 == [-26, -25, -24])
XCTAssert(buff00 == [127, 127, 127])
var buff11 = [Int8](repeating: 0, count: 3)
bytesNum = try sm.get(row: 1, col: 1, data: &buff11)
XCTAssertEqual(3, bytesNum)
XCTAssert(buff11 == [-1, -1, -1])
XCTAssert(buff11 == [127, 127, 127])
}
func testGetIntIntUInt8Array() throws {
let m = try getTestMat(size: 5, type: CvType.CV_8UC3)
var goodData = [UInt8](repeating: 0, count: 9)
// whole Mat
var bytesNum = try m.get(row: 1, col: 1, data: &goodData)
XCTAssertEqual(9, bytesNum)
XCTAssert([110, 111, 112, 120, 121, 122, 130, 131, 132] == goodData)
var badData = [UInt8](repeating: 0, count: 7)
XCTAssertThrowsError(bytesNum = try m.get(row: 0, col: 0, data: &badData))
// sub-Mat
let sm = m.submat(rowStart: 2, rowEnd: 4, colStart: 3, colEnd: 5)
var buff00 = [UInt8](repeating: 0, count: 3)
bytesNum = try sm.get(row: 0, col: 0, data: &buff00)
XCTAssertEqual(3, bytesNum)
XCTAssert(buff00 == [230, 231, 232])
var buff11 = [UInt8](repeating: 0, count: 3)
bytesNum = try sm.get(row: 1, col: 1, data: &buff11)
XCTAssertEqual(3, bytesNum)
XCTAssert(buff11 == [255, 255, 255])
}
func testGetIntIntDoubleArray() throws {
@ -399,14 +424,14 @@ class MatTests: OpenCVTestCase {
XCTAssert(buff11 == [340, 341, 0, 0])
}
func testGetIntIntShortArray() throws {
func testGetIntIntInt16Array() throws {
let m = try getTestMat(size: 5, type: CvType.CV_16SC2)
var buff = [Int16](repeating: 0, count: 6)
// whole Mat
var bytesNum = try m.get(row: 1, col: 1, data: &buff)
XCTAssertEqual(12, bytesNum);
XCTAssertEqual(12, bytesNum)
XCTAssert(buff == [110, 111, 120, 121, 130, 131])
// sub-Mat
@ -417,6 +442,46 @@ class MatTests: OpenCVTestCase {
XCTAssert(buff00 == [230, 231, 240, 241])
var buff11 = [Int16](repeating: 0, count: 4)
bytesNum = try sm.get(row: 1, col: 1, data: &buff11)
XCTAssertEqual(4, bytesNum)
XCTAssert(buff11 == [340, 341, 0, 0])
let m2 = Mat(sizes: [5, 6, 8], type: CvType.CV_16S)
let data:[Int16] = (0..<m2.total()).map { Int16($0) }
try m2.put(indices: [0, 0, 0], data: data)
let matNonContinuous = m2.submat(ranges:[Range(start:1, end:4), Range(start:2, end:5), Range(start:3, end:6)])
let matContinuous = matNonContinuous.clone()
var outNonContinuous = [Int16](repeating:0, count: matNonContinuous.total())
try matNonContinuous.get(indices: [0, 0, 0], data: &outNonContinuous)
var outContinuous = [Int16](repeating: 0, count:matNonContinuous.total())
try matContinuous.get(indices: [0, 0, 0], data: &outContinuous)
XCTAssertEqual(outNonContinuous, outContinuous)
let subMat2 = m2.submat(ranges:[Range(start:1, end:4), Range(start:1, end:5), Range(start:0, end:8)])
let subMatClone2 = subMat2.clone()
var outNonContinuous2 = [Int16](repeating:0, count: subMat2.total())
try subMat2.get(indices: [0, 1, 1], data: &outNonContinuous2)
var outContinuous2 = [Int16](repeating:0, count:subMat2.total())
try subMatClone2.get(indices: [0, 1, 1], data: &outContinuous2)
XCTAssertEqual(outNonContinuous2, outContinuous2)
}
func testGetIntIntUInt16Array() throws {
let m = try getTestMat(size: 5, type: CvType.CV_16UC2)
var buff = [UInt16](repeating: 0, count: 6)
// whole Mat
var bytesNum = try m.get(row: 1, col: 1, data: &buff)
XCTAssertEqual(12, bytesNum);
XCTAssert(buff == [110, 111, 120, 121, 130, 131])
// sub-Mat
let sm = m.submat(rowStart: 2, rowEnd: 4, colStart: 3, colEnd: 5)
var buff00 = [UInt16](repeating: 0, count: 4)
bytesNum = try sm.get(row: 0, col: 0, data: &buff00)
XCTAssertEqual(8, bytesNum)
XCTAssert(buff00 == [230, 231, 240, 241])
var buff11 = [UInt16](repeating: 0, count: 4)
bytesNum = try sm.get(row: 1, col: 1, data: &buff11)
XCTAssertEqual(4, bytesNum);
XCTAssert(buff11 == [340, 341, 0, 0])
}
@ -618,6 +683,16 @@ class MatTests: OpenCVTestCase {
try assertMatEqual(truth!, dst, OpenCVTestCase.EPS)
}
func testMatMulMat() throws {
let m1 = Mat(rows: 2, cols: 2, type: CvType.CV_32F, scalar: Scalar(2))
let m2 = Mat(rows: 2, cols: 2, type: CvType.CV_32F, scalar: Scalar(3))
dst = m1.matMul(m2)
truth = Mat(rows: 2, cols: 2, type: CvType.CV_32F, scalar: Scalar(12))
try assertMatEqual(truth!, dst, OpenCVTestCase.EPS)
}
func testOnesIntIntInt() throws {
dst = Mat.ones(rows: OpenCVTestCase.matSize, cols: OpenCVTestCase.matSize, type: CvType.CV_32F)
@ -653,7 +728,7 @@ class MatTests: OpenCVTestCase {
try assertMatEqual(truth!, m1, OpenCVTestCase.EPS)
}
func testPutIntIntByteArray() throws {
func testPutIntIntInt8Array() throws {
let m = Mat(rows: 5, cols: 5, type: CvType.CV_8SC3, scalar: Scalar(1, 2, 3))
let sm = m.submat(rowStart: 2, rowEnd: 4, colStart: 3, colEnd: 5)
var buff = [Int8](repeating: 0, count: 6)
@ -683,7 +758,37 @@ class MatTests: OpenCVTestCase {
XCTAssert(buff == buff0)
}
func testPutIntArrayByteArray() throws {
func testPutIntIntUInt8Array() throws {
let m = Mat(rows: 5, cols: 5, type: CvType.CV_8UC3, scalar: Scalar(1, 2, 3))
let sm = m.submat(rowStart: 2, rowEnd: 4, colStart: 3, colEnd: 5)
var buff = [UInt8](repeating: 0, count: 6)
let buff0:[UInt8] = [10, 20, 30, 40, 50, 60]
let buff1:[UInt8] = [255, 254, 253, 252, 251, 250]
var bytesNum = try m.put(row:1, col:2, data:buff0)
XCTAssertEqual(6, bytesNum)
bytesNum = try m.get(row: 1, col: 2, data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == buff0)
bytesNum = try sm.put(row:0, col:0, data:buff1)
XCTAssertEqual(6, bytesNum)
bytesNum = try sm.get(row: 0, col: 0, data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == buff1)
bytesNum = try m.get(row: 2, col: 3, data: &buff)
XCTAssertEqual(6, bytesNum);
XCTAssert(buff == buff1)
let m1 = m.row(1)
bytesNum = try m1.get(row: 0, col: 2, data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == buff0)
}
func testPutIntArrayInt8Array() throws {
let m = Mat(sizes: [5, 5, 5], type: CvType.CV_8SC3, scalar: Scalar(1, 2, 3))
let sm = m.submat(ranges: [Range(start: 0, end: 2), Range(start: 1, end: 3), Range(start: 2, end: 4)])
var buff = [Int8](repeating: 0, count: 6)
@ -714,10 +819,41 @@ class MatTests: OpenCVTestCase {
XCTAssert(buff == buff0)
}
func testPutIntArrayUInt8Array() throws {
let m = Mat(sizes: [5, 5, 5], type: CvType.CV_8UC3, scalar: Scalar(1, 2, 3))
let sm = m.submat(ranges: [Range(start: 0, end: 2), Range(start: 1, end: 3), Range(start: 2, end: 4)])
var buff = [UInt8](repeating: 0, count: 6)
let buff0:[UInt8] = [10, 20, 30, 40, 50, 60]
let buff1:[UInt8] = [255, 254, 253, 252, 251, 250]
var bytesNum = try m.put(indices:[1, 2, 0], data:buff0)
XCTAssertEqual(6, bytesNum)
bytesNum = try m.get(indices: [1, 2, 0], data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == buff0)
bytesNum = try sm.put(indices: [0, 0, 0], data: buff1)
XCTAssertEqual(6, bytesNum)
bytesNum = try sm.get(indices: [0, 0, 0], data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == buff1)
bytesNum = try m.get(indices: [0, 1, 2], data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == buff1)
let m1 = m.submat(ranges: [Range(start: 1,end: 2), Range.all(), Range.all()])
bytesNum = try m1.get(indices: [0, 2, 0], data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == buff0)
}
func testPutIntIntDoubleArray() throws {
let m = Mat(rows: 5, cols: 5, type: CvType.CV_8SC3, scalar: Scalar(1, 2, 3))
let m = Mat(rows: 5, cols: 5, type: CvType.CV_8UC3, scalar: Scalar(1, 2, 3))
let sm = m.submat(rowStart: 2, rowEnd: 4, colStart: 3, colEnd: 5)
var buff = [Int8](repeating: 0, count: 6)
var buff = [UInt8](repeating: 0, count: 6)
var bytesNum = try m.put(row: 1, col: 2, data: [10, 20, 30, 40, 50, 60] as [Double])
@ -731,16 +867,16 @@ class MatTests: OpenCVTestCase {
XCTAssertEqual(6, bytesNum)
bytesNum = try sm.get(row: 0, col: 0, data: &buff)
XCTAssertEqual(6, bytesNum);
XCTAssert(buff == [-1, -2, -3, -4, -5, -6])
XCTAssert(buff == [255, 254, 253, 252, 251, 250])
bytesNum = try m.get(row: 2, col: 3, data: &buff)
XCTAssertEqual(6, bytesNum);
XCTAssert(buff == [-1, -2, -3, -4, -5, -6])
XCTAssert(buff == [255, 254, 253, 252, 251, 250])
}
func testPutIntArrayDoubleArray() throws {
let m = Mat(sizes: [5, 5, 5], type: CvType.CV_8SC3, scalar: Scalar(1, 2, 3))
let m = Mat(sizes: [5, 5, 5], type: CvType.CV_8UC3, scalar: Scalar(1, 2, 3))
let sm = m.submat(ranges: [Range(start: 0, end: 2), Range(start: 1, end: 3), Range(start: 2, end: 4)])
var buff = [Int8](repeating: 0, count: 6)
var buff = [UInt8](repeating: 0, count: 6)
var bytesNum = try m.put(indices: [1, 2, 0], data: [10, 20, 30, 40, 50, 60] as [Double])
@ -754,10 +890,10 @@ class MatTests: OpenCVTestCase {
XCTAssertEqual(6, bytesNum);
bytesNum = try sm.get(indices: [0, 0, 0], data: &buff)
XCTAssertEqual(6, bytesNum);
XCTAssert(buff == [-1, -2, -3, -4, -5, -6])
XCTAssert(buff == [255, 254, 253, 252, 251, 250])
bytesNum = try m.get(indices: [0, 1, 2], data: &buff)
XCTAssertEqual(6, bytesNum)
XCTAssert(buff == [-1, -2, -3, -4, -5, -6])
XCTAssert(buff == [255, 254, 253, 252, 251, 250])
}
func testPutIntIntFloatArray() throws {
@ -820,7 +956,7 @@ class MatTests: OpenCVTestCase {
XCTAssert([40, 50, 60] == m.get(indices: [0, 1, 0]))
}
func testPutIntIntShortArray() throws {
func testPutIntIntInt16Array() throws {
let m = Mat(rows: 5, cols: 5, type: CvType.CV_16SC3, scalar: Scalar(-1, -2, -3))
let elements: [Int16] = [ 10, 20, 30, 40, 50, 60]
@ -834,7 +970,21 @@ class MatTests: OpenCVTestCase {
XCTAssert([40, 50, 60] == m.get(row: 2, col: 4))
}
func testPutIntArrayShortArray() throws {
func testPutIntIntUInt16Array() throws {
let m = Mat(rows: 5, cols: 5, type: CvType.CV_16UC3, scalar: Scalar(-1, -2, -3))
let elements: [UInt16] = [ 10, 20, 30, 40, 50, 60]
var bytesNum = try m.put(row: 2, col: 3, data: elements)
XCTAssertEqual(Int32(elements.count * 2), bytesNum)
let m1 = m.col(3)
var buff = [UInt16](repeating: 0, count: 3)
bytesNum = try m1.get(row: 2, col: 0, data: &buff)
XCTAssert(buff == [10, 20, 30])
XCTAssert([40, 50, 60] == m.get(row: 2, col: 4))
}
func testPutIntArrayInt16Array() throws {
let m = Mat(sizes: [5, 5, 5], type: CvType.CV_16SC3, scalar: Scalar(-1, -2, -3))
let elements: [Int16] = [ 10, 20, 30, 40, 50, 60]
@ -848,6 +998,20 @@ class MatTests: OpenCVTestCase {
XCTAssert([40, 50, 60] == m.get(indices: [0, 2, 4]))
}
func testPutIntArrayUInt16Array() throws {
let m = Mat(sizes: [5, 5, 5], type: CvType.CV_16UC3, scalar: Scalar(-1, -2, -3))
let elements: [UInt16] = [ 10, 20, 30, 40, 50, 60]
var bytesNum = try m.put(indices: [0, 2, 3], data: elements)
XCTAssertEqual(Int32(elements.count * 2), bytesNum)
let m1 = m.submat(ranges: [Range.all(), Range.all(), Range(start: 3, end: 4)])
var buff = [UInt16](repeating: 0, count: 3)
bytesNum = try m1.get(indices: [0, 2, 0], data: &buff)
XCTAssert(buff == [10, 20, 30])
XCTAssert([40, 50, 60] == m.get(indices: [0, 2, 4]))
}
func testReshapeInt() throws {
let src = Mat(rows: 4, cols: 4, type: CvType.CV_8U, scalar: Scalar(0))
dst = src.reshape(channels: 4)

View File

@ -101,6 +101,30 @@ class RectTest: OpenCVTestCase {
XCTAssertEqual(1, r.height);
}
func testRect2fPointPoint() {
let p1 = Point2f(x:4.3, y:4.1)
let p2 = Point2f(x:2.7, y:3.9)
let r = Rect2f(point: p1, point: p2)
XCTAssertNotNil(r);
XCTAssertEqual(2.7, r.x);
XCTAssertEqual(3.9, r.y);
XCTAssertEqual(1.6, r.width, accuracy: OpenCVTestCase.FEPS);
XCTAssertEqual(0.2, r.height, accuracy: OpenCVTestCase.FEPS);
}
func testRect2dPointPoint() {
let p1 = Point2d(x:4.7879839, y:4.9922311)
let p2 = Point2d(x:2.1213123, y:3.1122129)
let r = Rect2d(point: p1, point: p2)
XCTAssertNotNil(r);
XCTAssertEqual(2.1213123, r.x);
XCTAssertEqual(3.1122129, r.y);
XCTAssertEqual(2.6666716, r.width, accuracy: OpenCVTestCase.EPS);
XCTAssertEqual(1.8800182, r.height, accuracy: OpenCVTestCase.EPS);
}
func testRectPointSize() {
let p1 = Point(x: 4, y: 4)
let sz = Size(width: 3, height: 1)

View File

@ -0,0 +1,33 @@
__all__ = []
import sys
import numpy as np
import cv2 as cv
# NumPy documentation: https://numpy.org/doc/stable/user/basics.subclassing.html
class Mat(np.ndarray):
'''
cv.Mat wrapper for numpy array.
Stores extra metadata information how to interpret and process of numpy array for underlying C++ code.
'''
def __new__(cls, arr, **kwargs):
obj = arr.view(Mat)
return obj
def __init__(self, arr, **kwargs):
self.wrap_channels = kwargs.pop('wrap_channels', getattr(arr, 'wrap_channels', False))
if len(kwargs) > 0:
raise TypeError('Unknown parameters: {}'.format(repr(kwargs)))
def __array_finalize__(self, obj):
if obj is None:
return
self.wrap_channels = getattr(obj, 'wrap_channels', None)
Mat.__module__ = cv.__name__
cv.Mat = Mat
cv._registerMatType(Mat)

View File

@ -0,0 +1,14 @@
from collections import namedtuple
import cv2
NativeMethodPatchedResult = namedtuple("NativeMethodPatchedResult",
("py", "native"))
def testOverwriteNativeMethod(arg):
return NativeMethodPatchedResult(
arg + 1,
cv2.utils._native.testOverwriteNativeMethod(arg)
)

View File

@ -14,6 +14,7 @@
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG + 1
#include <opencv2/core/utils/logger.hpp>
#ifndef OPENCV_DISABLE_THREAD_SUPPORT
#ifdef CV_CXX11
#include <mutex>
@ -236,6 +237,171 @@ struct AsyncArray::Impl
}
};
} // namespace
#else // OPENCV_DISABLE_THREAD_SUPPORT
namespace cv {
// no threading
struct AsyncArray::Impl
{
int refcount;
void addrefFuture() CV_NOEXCEPT { refcount_future++; refcount++; }
void releaseFuture() CV_NOEXCEPT { refcount_future--; if (0 == --refcount) delete this; }
int refcount_future;
void addrefPromise() CV_NOEXCEPT { refcount_promise++; refcount++; } \
void releasePromise() CV_NOEXCEPT { refcount_promise--; if (0 == --refcount) delete this; }
int refcount_promise;
mutable bool has_result; // Mat, UMat or exception
mutable cv::Ptr<Mat> result_mat;
mutable cv::Ptr<UMat> result_umat;
bool has_exception;
#if CV__EXCEPTION_PTR
std::exception_ptr exception;
#endif
cv::Exception cv_exception;
mutable bool result_is_fetched;
bool future_is_returned;
Impl()
: refcount(1), refcount_future(0), refcount_promise(1)
, has_result(false)
, has_exception(false)
, result_is_fetched(false)
, future_is_returned(false)
{
// nothing
}
~Impl()
{
if (has_result && !result_is_fetched)
{
CV_LOG_INFO(NULL, "Asynchronous result has not been fetched");
}
}
bool get(OutputArray dst, int64 timeoutNs) const
{
CV_Assert(!result_is_fetched);
if (!has_result)
{
CV_UNUSED(timeoutNs);
CV_Error(Error::StsError, "Result is not produced (unable to wait for result in OPENCV_DISABLE_THREAD_SUPPORT mode)");
}
if (!result_mat.empty())
{
dst.move(*result_mat.get());
result_mat.release();
result_is_fetched = true;
return true;
}
if (!result_umat.empty())
{
dst.move(*result_umat.get());
result_umat.release();
result_is_fetched = true;
return true;
}
#if CV__EXCEPTION_PTR
if (has_exception && exception)
{
result_is_fetched = true;
std::rethrow_exception(exception);
}
#endif
if (has_exception)
{
result_is_fetched = true;
throw cv_exception;
}
CV_Error(Error::StsInternal, "AsyncArray: invalid state of 'has_result = true'");
return false;
}
bool valid() const CV_NOEXCEPT
{
if (result_is_fetched)
return false;
if (refcount_promise == 0 && !has_result)
return false;
return true;
}
bool wait_for(int64 timeoutNs) const
{
CV_Assert(valid());
if (has_result)
return has_result;
if (timeoutNs == 0)
return has_result;
CV_Error(Error::StsError, "Unable to wait in OPENCV_DISABLE_THREAD_SUPPORT mode");
}
AsyncArray getArrayResult()
{
CV_Assert(refcount_future == 0);
AsyncArray result;
addrefFuture();
result.p = this;
future_is_returned = true;
return result;
}
void setValue(InputArray value)
{
if (future_is_returned && refcount_future == 0)
CV_Error(Error::StsError, "Associated AsyncArray has been destroyed");
CV_Assert(!has_result);
int k = value.kind();
if (k == _InputArray::UMAT)
{
result_umat = makePtr<UMat>();
value.copyTo(*result_umat.get());
}
else
{
result_mat = makePtr<Mat>();
value.copyTo(*result_mat.get());
}
has_result = true;
}
#if CV__EXCEPTION_PTR
void setException(std::exception_ptr e)
{
if (future_is_returned && refcount_future == 0)
CV_Error(Error::StsError, "Associated AsyncArray has been destroyed");
CV_Assert(!has_result);
has_exception = true;
exception = e;
has_result = true;
}
#endif
void setException(const cv::Exception e)
{
if (future_is_returned && refcount_future == 0)
CV_Error(Error::StsError, "Associated AsyncArray has been destroyed");
CV_Assert(!has_result);
has_exception = true;
cv_exception = e;
has_result = true;
}
};
}
#endif // OPENCV_DISABLE_THREAD_SUPPORT
namespace cv {
AsyncArray::AsyncArray() CV_NOEXCEPT
: p(NULL)

View File

@ -5,6 +5,7 @@
#include "precomp.hpp"
#include "opencv2/core/bindings_utils.hpp"
#include <sstream>
#include <iomanip>
#include <opencv2/core/utils/filesystem.hpp>
#include <opencv2/core/utils/filesystem.private.hpp>
@ -210,6 +211,53 @@ CV_EXPORTS_W String dumpInputOutputArrayOfArrays(InputOutputArrayOfArrays argume
return ss.str();
}
static inline std::ostream& operator<<(std::ostream& os, const cv::Rect& rect)
{
return os << "[x=" << rect.x << ", y=" << rect.y << ", w=" << rect.width << ", h=" << rect.height << ']';
}
template <class T, class Formatter>
static inline String dumpVector(const std::vector<T>& vec, Formatter format)
{
std::ostringstream oss("[", std::ios::ate);
if (!vec.empty())
{
oss << format << vec[0];
for (std::size_t i = 1; i < vec.size(); ++i)
{
oss << ", " << format << vec[i];
}
}
oss << "]";
return oss.str();
}
static inline std::ostream& noFormat(std::ostream& os)
{
return os;
}
static inline std::ostream& floatFormat(std::ostream& os)
{
return os << std::fixed << std::setprecision(2);
}
String dumpVectorOfInt(const std::vector<int>& vec)
{
return dumpVector(vec, &noFormat);
}
String dumpVectorOfDouble(const std::vector<double>& vec)
{
return dumpVector(vec, &floatFormat);
}
String dumpVectorOfRect(const std::vector<Rect>& vec)
{
return dumpVector(vec, &noFormat);
}
namespace fs {
cv::String getCacheDirectoryForDownloads()
{

View File

@ -24,11 +24,6 @@
#ifdef HAVE_OPENCL
#include <sstream>
#include "opencl_kernels_core.hpp"
#include "opencv2/core/opencl/runtime/opencl_clblas.hpp"
#include "opencv2/core/opencl/runtime/opencl_core.hpp"
namespace cv
{
@ -37,52 +32,75 @@ static bool intel_gpu_gemm(
UMat B, Size sizeB,
UMat D, Size sizeD,
double alpha, double beta,
bool atrans, bool btrans)
bool atrans, bool btrans,
bool& isPropagatedC2D
)
{
CV_UNUSED(sizeB);
int M = sizeD.height, N = sizeD.width, K = ((atrans)? sizeA.height : sizeA.width);
std::string kernelName;
bool ret = true;
if (M < 4 || N < 4 || K < 4) // vload4
return false;
size_t lx = 8, ly = 4;
size_t dx = 4, dy = 8;
CV_LOG_VERBOSE(NULL, 0, "M=" << M << " N=" << N << " K=" << K);
std::string kernelName;
unsigned int lx = 8, ly = 4;
unsigned int dx = 4, dy = 8;
if(!atrans && !btrans)
{
if (M % 32 == 0 && N % 32 == 0 && K % 16 == 0)
{
kernelName = "intelblas_gemm_buffer_NN_sp";
}
else
{
if (M % 2 != 0)
return false;
// vload4(0, dst_write0) - 4 cols
// multiply by lx: 8
if (N % (4*8) != 0)
return false;
kernelName = "intelblas_gemm_buffer_NN";
}
}
else if(atrans && !btrans)
{
if (M % 32 != 0)
return false;
if (N % 32 != 0)
return false;
kernelName = "intelblas_gemm_buffer_TN";
}
else if(!atrans && btrans)
{
if (K % 4 != 0)
return false;
kernelName = "intelblas_gemm_buffer_NT";
ly = 16;
dx = 1;
}
else
{
if (M % 32 != 0)
return false;
if (N % 32 != 0)
return false;
if (K % 16 != 0)
return false;
kernelName = "intelblas_gemm_buffer_TT";
}
const size_t gx = (size_t)(N + dx - 1) / dx;
const size_t gy = (size_t)(M + dy - 1) / dy;
CV_LOG_DEBUG(NULL, "kernel: " << kernelName << " (M=" << M << " N=" << N << " K=" << K << ")");
const size_t gx = divUp((size_t)N, dx);
const size_t gy = divUp((size_t)M, dy);
size_t local[] = {lx, ly, 1};
size_t global[] = {(gx + lx - 1) / lx * lx, (gy + ly - 1) / ly * ly, 1};
int stride = (M * N < 1024 * 1024) ? 10000000 : 256;
size_t global[] = {roundUp(gx, lx), roundUp(gy, ly), 1};
ocl::Queue q;
String errmsg;
@ -110,10 +128,13 @@ static bool intel_gpu_gemm(
(int)(D.step / sizeof(float))
);
ret = k.run(2, global, local, false, q);
bool ret = k.run(2, global, local, false, q);
return ret;
}
else
{
int stride = (M * N < 1024 * 1024) ? 10000000 : 256;
for(int start_index = 0; start_index < K; start_index += stride)
{
ocl::Kernel k(kernelName.c_str(), program);
@ -132,12 +153,16 @@ static bool intel_gpu_gemm(
(int) start_index, // 14 start_index
stride);
ret = k.run(2, global, local, false, q);
if (!ret) return ret;
bool ret = k.run(2, global, local, false, q);
if (!ret)
{
if (start_index != 0)
isPropagatedC2D = false; // D array content is changed, need to rewrite
return false;
}
}
return true;
}
return ret;
}
} // namespace cv

View File

@ -42,6 +42,8 @@
//M*/
#include "precomp.hpp"
#include <opencv2/core/utils/logger.hpp>
#include "opencl_kernels_core.hpp"
#include "opencv2/core/opencl/runtime/opencl_clblas.hpp"
#include "opencv2/core/opencl/runtime/opencl_core.hpp"
@ -155,10 +157,12 @@ static bool ocl_gemm_amdblas( InputArray matA, InputArray matB, double alpha,
static bool ocl_gemm( InputArray matA, InputArray matB, double alpha,
InputArray matC, double beta, OutputArray matD, int flags )
{
int depth = matA.depth(), cn = matA.channels();
int type = CV_MAKETYPE(depth, cn);
int type = matA.type();
int depth = CV_MAT_DEPTH(type);
int cn = CV_MAT_CN(type);
CV_Assert_N( type == matB.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
CV_CheckTypeEQ(type, matB.type(), "");
CV_CheckType(type, type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2, "");
const ocl::Device & dev = ocl::Device::getDefault();
bool doubleSupport = dev.doubleFPConfig() > 0;
@ -170,88 +174,103 @@ static bool ocl_gemm( InputArray matA, InputArray matB, double alpha,
Size sizeA = matA.size(), sizeB = matB.size(), sizeC = haveC ? matC.size() : Size(0, 0);
bool atrans = (flags & GEMM_1_T) != 0, btrans = (flags & GEMM_2_T) != 0, ctrans = (flags & GEMM_3_T) != 0;
CV_Assert( !haveC || matC.type() == type );
if (haveC)
CV_CheckTypeEQ(type, matC.type(), "");
Size sizeD(((btrans) ? sizeB.height : sizeB.width),
((atrans) ? sizeA.width : sizeA.height));
if (atrans)
sizeA = Size(sizeA.height, sizeA.width);
if (btrans)
sizeB = Size(sizeB.height, sizeB.width);
if (haveC && ctrans)
sizeC = Size(sizeC.height, sizeC.width);
CV_CheckEQ(sizeA.width, sizeB.height, "");
if (haveC)
CV_CheckEQ(sizeC, sizeD, "");
UMat A = matA.getUMat();
UMat B = matB.getUMat();
Size sizeD(((btrans)? sizeB.height : sizeB.width),
((atrans)? sizeA.width : sizeA.height));
matD.create(sizeD, type);
UMat D = matD.getUMat();
UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat();
bool isPropagatedC2D = false; // D content is updated with C / C.t()
if (!dev.intelSubgroupsSupport() || (depth == CV_64F) || cn != 1)
{
String opts;
if (atrans)
sizeA = Size(sizeA.height, sizeA.width);
if (btrans)
sizeB = Size(sizeB.height, sizeB.width);
if (haveC && ctrans)
sizeC = Size(sizeC.height, sizeC.width);
CV_Assert( sizeA.width == sizeB.height && (!haveC || sizeC == sizeD) );
int max_wg_size = (int)dev.maxWorkGroupSize();
int block_size = (max_wg_size / (32*cn) < 32) ? (max_wg_size / (16*cn) < 16) ? (max_wg_size / (8*cn) < 8) ? 1 : 8 : 16 : 32;
if (atrans)
A = A.t();
if (btrans)
B = B.t();
if (haveC)
ctrans ? transpose(matC, D) : matC.copyTo(D);
int vectorWidths[] = { 4, 4, 2, 2, 1, 4, cn, -1 };
int kercn = ocl::checkOptimalVectorWidth(vectorWidths, B, D);
opts += format(" -D T=%s -D T1=%s -D WT=%s -D cn=%d -D kercn=%d -D LOCAL_SIZE=%d%s%s%s",
ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(CV_MAKETYPE(depth, kercn)),
cn, kercn, block_size,
(sizeA.width % block_size !=0) ? " -D NO_MULT" : "",
haveC ? " -D HAVE_C" : "",
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
ocl::Kernel k("gemm", cv::ocl::core::gemm_oclsrc, opts);
if (k.empty())
return false;
if (depth == CV_64F)
k.args(ocl::KernelArg::ReadOnlyNoSize(A),
ocl::KernelArg::ReadOnlyNoSize(B, cn, kercn),
ocl::KernelArg::ReadWrite(D, cn, kercn),
sizeA.width, alpha, beta);
else
k.args(ocl::KernelArg::ReadOnlyNoSize(A),
ocl::KernelArg::ReadOnlyNoSize(B, cn, kercn),
ocl::KernelArg::ReadWrite(D, cn, kercn),
sizeA.width, (float)alpha, (float)beta);
size_t globalsize[2] = { (size_t)sizeD.width * cn / kercn, (size_t)sizeD.height};
size_t localsize[2] = { (size_t)block_size, (size_t)block_size};
return k.run(2, globalsize, block_size!=1 ? localsize : NULL, false);
}
else
if (dev.intelSubgroupsSupport() && (depth == CV_32F) && cn == 1)
{
if (haveC && beta != 0.0)
{
ctrans ? transpose(matC, D) : matC.copyTo(D);
isPropagatedC2D = true;
}
else
{
beta = 0.0;
}
return intel_gpu_gemm(A, sizeA,
B, sizeB,
D, sizeD,
alpha,
beta,
atrans, btrans);
bool res = intel_gpu_gemm(A, matA.size(),
B, matB.size(),
D, sizeD,
alpha,
beta,
atrans, btrans,
isPropagatedC2D);
if (res)
return true;
// fallback on generic OpenCL code
}
if (sizeD.width < 8 || sizeD.height < 8)
return false;
String opts;
int wg_size = (int)dev.maxWorkGroupSize();
int sizeDmin = std::min(sizeD.width, sizeD.height);
wg_size = std::min(wg_size, sizeDmin * sizeDmin);
int block_size = (wg_size / (32*cn) < 32) ? (wg_size / (16*cn) < 16) ? (wg_size / (8*cn) < 8) ? 1 : 8 : 16 : 32;
if (atrans)
A = A.t();
if (btrans)
B = B.t();
if (haveC && !isPropagatedC2D)
ctrans ? transpose(matC, D) : matC.copyTo(D);
int vectorWidths[] = { 4, 4, 2, 2, 1, 4, cn, -1 };
int kercn = ocl::checkOptimalVectorWidth(vectorWidths, B, D);
opts += format(" -D T=%s -D T1=%s -D WT=%s -D cn=%d -D kercn=%d -D LOCAL_SIZE=%d%s%s%s",
ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(CV_MAKETYPE(depth, kercn)),
cn, kercn, block_size,
(sizeA.width % block_size !=0) ? " -D NO_MULT" : "",
haveC ? " -D HAVE_C" : "",
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
ocl::Kernel k("gemm", cv::ocl::core::gemm_oclsrc, opts);
if (k.empty())
return false;
if (depth == CV_64F)
k.args(ocl::KernelArg::ReadOnlyNoSize(A),
ocl::KernelArg::ReadOnlyNoSize(B, cn, kercn),
ocl::KernelArg::ReadWrite(D, cn, kercn),
sizeA.width, alpha, beta);
else
k.args(ocl::KernelArg::ReadOnlyNoSize(A),
ocl::KernelArg::ReadOnlyNoSize(B, cn, kercn),
ocl::KernelArg::ReadWrite(D, cn, kercn),
sizeA.width, (float)alpha, (float)beta);
size_t globalsize[2] = { (size_t)sizeD.width * cn / kercn, (size_t)sizeD.height};
size_t localsize[2] = { (size_t)block_size, (size_t)block_size};
return k.run(2, globalsize, block_size !=1 ? localsize : NULL, false);
}
#endif

View File

@ -809,18 +809,17 @@ Mat::Mat(const Mat& m, const Rect& roi)
data += roi.x*esz;
CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
if( u )
CV_XADD(&u->refcount, 1);
if( roi.width < m.cols || roi.height < m.rows )
flags |= SUBMATRIX_FLAG;
step[0] = m.step[0]; step[1] = esz;
updateContinuityFlag();
addref();
if( rows <= 0 || cols <= 0 )
{
release();
rows = cols = 0;
release();
}
}

View File

@ -229,14 +229,14 @@ void cv::setIdentity( InputOutputArray _m, const Scalar& s )
namespace cv {
UMat UMat::eye(int rows, int cols, int type)
UMat UMat::eye(int rows, int cols, int type, UMatUsageFlags usageFlags)
{
return UMat::eye(Size(cols, rows), type);
return UMat::eye(Size(cols, rows), type, usageFlags);
}
UMat UMat::eye(Size size, int type)
UMat UMat::eye(Size size, int type, UMatUsageFlags usageFlags)
{
UMat m(size, type);
UMat m(size, type, usageFlags);
setIdentity(m);
return m;
}

View File

@ -1194,7 +1194,7 @@ double norm( InputArray _src1, InputArray _src2, int normType, InputArray _mask
// special case to handle "integer" overflow in accumulator
const size_t esz = src1.elemSize();
const int total = (int)it.size;
const int intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15);
const int intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn;
const int blockSize = std::min(total, intSumBlockSize);
int isum = 0;
int count = 0;

View File

@ -76,8 +76,11 @@
#undef CV__ALLOCATOR_STATS_LOG
#define CV_OPENCL_ALWAYS_SHOW_BUILD_LOG 0
#define CV_OPENCL_SHOW_BUILD_OPTIONS 0
#define CV_OPENCL_SHOW_BUILD_KERNELS 0
#define CV_OPENCL_SHOW_RUN_KERNELS 0
#define CV_OPENCL_SYNC_RUN_KERNELS 0
#define CV_OPENCL_TRACE_CHECK 0
#define CV_OPENCL_VALIDATE_BINARY_PROGRAMS 1
@ -1566,6 +1569,7 @@ struct Device::Impl
version_ = getStrProp(CL_DEVICE_VERSION);
extensions_ = getStrProp(CL_DEVICE_EXTENSIONS);
doubleFPConfig_ = getProp<cl_device_fp_config, int>(CL_DEVICE_DOUBLE_FP_CONFIG);
halfFPConfig_ = getProp<cl_device_fp_config, int>(CL_DEVICE_HALF_FP_CONFIG);
hostUnifiedMemory_ = getBoolProp(CL_DEVICE_HOST_UNIFIED_MEMORY);
maxComputeUnits_ = getProp<cl_uint, int>(CL_DEVICE_MAX_COMPUTE_UNITS);
maxWorkGroupSize_ = getProp<size_t, size_t>(CL_DEVICE_MAX_WORK_GROUP_SIZE);
@ -1678,6 +1682,7 @@ struct Device::Impl
String version_;
std::string extensions_;
int doubleFPConfig_;
int halfFPConfig_;
bool hostUnifiedMemory_;
int maxComputeUnits_;
size_t maxWorkGroupSize_;
@ -1827,11 +1832,7 @@ int Device::singleFPConfig() const
{ return p ? p->getProp<cl_device_fp_config, int>(CL_DEVICE_SINGLE_FP_CONFIG) : 0; }
int Device::halfFPConfig() const
#ifdef CL_VERSION_1_2
{ return p ? p->getProp<cl_device_fp_config, int>(CL_DEVICE_HALF_FP_CONFIG) : 0; }
#else
{ CV_REQUIRE_OPENCL_1_2_ERROR; }
#endif
{ return p ? p->halfFPConfig_ : 0; }
bool Device::endianLittle() const
{ return p ? p->getBoolProp(CL_DEVICE_ENDIAN_LITTLE) : false; }
@ -2157,20 +2158,22 @@ static cl_device_id selectOpenCLDevice(const char* configuration = NULL)
platforms.resize(numPlatforms);
}
int selectedPlatform = -1;
if (platform.length() > 0)
{
for (size_t i = 0; i < platforms.size(); i++)
for (std::vector<cl_platform_id>::iterator currentPlatform = platforms.begin(); currentPlatform != platforms.end();)
{
std::string name;
CV_OCL_DBG_CHECK(getStringInfo(clGetPlatformInfo, platforms[i], CL_PLATFORM_NAME, name));
CV_OCL_DBG_CHECK(getStringInfo(clGetPlatformInfo, *currentPlatform, CL_PLATFORM_NAME, name));
if (name.find(platform) != std::string::npos)
{
selectedPlatform = (int)i;
break;
++currentPlatform;
}
else
{
currentPlatform = platforms.erase(currentPlatform);
}
}
if (selectedPlatform == -1)
if (platforms.size() == 0)
{
CV_LOG_ERROR(NULL, "OpenCL: Can't find OpenCL platform by name: " << platform);
goto not_found;
@ -2207,13 +2210,11 @@ static cl_device_id selectOpenCLDevice(const char* configuration = NULL)
goto not_found;
}
std::vector<cl_device_id> devices; // TODO Use clReleaseDevice to cleanup
for (int i = selectedPlatform >= 0 ? selectedPlatform : 0;
(selectedPlatform >= 0 ? i == selectedPlatform : true) && (i < (int)platforms.size());
i++)
std::vector<cl_device_id> devices;
for (std::vector<cl_platform_id>::iterator currentPlatform = platforms.begin(); currentPlatform != platforms.end(); ++currentPlatform)
{
cl_uint count = 0;
cl_int status = clGetDeviceIDs(platforms[i], deviceType, 0, NULL, &count);
cl_int status = clGetDeviceIDs(*currentPlatform, deviceType, 0, NULL, &count);
if (!(status == CL_SUCCESS || status == CL_DEVICE_NOT_FOUND))
{
CV_OCL_DBG_CHECK_RESULT(status, "clGetDeviceIDs get count");
@ -2222,7 +2223,7 @@ static cl_device_id selectOpenCLDevice(const char* configuration = NULL)
continue;
size_t base = devices.size();
devices.resize(base + count);
status = clGetDeviceIDs(platforms[i], deviceType, count, &devices[base], &count);
status = clGetDeviceIDs(*currentPlatform, deviceType, count, &devices[base], &count);
if (!(status == CL_SUCCESS || status == CL_DEVICE_NOT_FOUND))
{
CV_OCL_DBG_CHECK_RESULT(status, "clGetDeviceIDs get IDs");
@ -3455,19 +3456,33 @@ struct Kernel::Impl
void cleanupUMats()
{
bool exceptionOccurred = false;
for( int i = 0; i < MAX_ARRS; i++ )
{
if( u[i] )
{
if( CV_XADD(&u[i]->urefcount, -1) == 1 )
{
u[i]->flags |= UMatData::ASYNC_CLEANUP;
u[i]->currAllocator->deallocate(u[i]);
try
{
u[i]->currAllocator->deallocate(u[i]);
}
catch(const std::exception& exc)
{
// limited by legacy before C++11, therefore log and
// remember some exception occurred to throw below
CV_LOG_ERROR(NULL, "OCL: Unexpected C++ exception in OpenCL Kernel::Impl::cleanupUMats(): " << exc.what());
exceptionOccurred = true;
}
}
u[i] = 0;
}
}
nu = 0;
haveTempDstUMats = false;
haveTempSrcUMats = false;
CV_Assert(!exceptionOccurred);
}
void addUMat(const UMat& m, bool dst)
@ -3498,8 +3513,16 @@ struct Kernel::Impl
void finit(cl_event e)
{
CV_UNUSED(e);
cleanupUMats();
isInProgress = false;
try
{
cleanupUMats();
}
catch(...)
{
release();
throw;
}
release();
}
@ -3657,6 +3680,10 @@ bool Kernel::empty() const
static cv::String dumpValue(size_t sz, const void* p)
{
if (!p)
return "NULL";
if (sz == 2)
return cv::format("%d / %uu / 0x%04x", *(short*)p, *(unsigned short*)p, *(short*)p);
if (sz == 4)
return cv::format("%d / %uu / 0x%08x / %g", *(int*)p, *(int*)p, *(int*)p, *(float*)p);
if (sz == 8)
@ -3829,6 +3856,14 @@ bool Kernel::run(int dims, size_t _globalsize[], size_t _localsize[],
}
bool Kernel::run_(int dims, size_t _globalsize[], size_t _localsize[],
bool sync, const Queue& q)
{
CV_Assert(p);
return p->run(dims, _globalsize, _localsize, sync, NULL, q);
}
static bool isRaiseErrorOnReuseAsyncKernel()
{
static bool initialized = false;
@ -3869,6 +3904,10 @@ bool Kernel::Impl::run(int dims, size_t globalsize[], size_t localsize[],
return false; // OpenCV 5.0: raise error
}
#if CV_OPENCL_SYNC_RUN_KERNELS
sync = true;
#endif
cl_command_queue qq = getQueue(q);
if (haveTempDstUMats)
sync = true;
@ -4316,7 +4355,28 @@ struct Program::Impl
if (!param_buildExtraOptions.empty())
buildflags = joinBuildOptions(buildflags, param_buildExtraOptions);
}
#if CV_OPENCL_SHOW_BUILD_OPTIONS
CV_LOG_INFO(NULL, "OpenCL program '" << sourceModule_ << "/" << sourceName_ << "' options:" << buildflags);
#endif
compile(ctx, src_, errmsg);
#if CV_OPENCL_SHOW_BUILD_KERNELS
if (handle)
{
size_t retsz = 0;
char kernels_buffer[4096] = {0};
cl_int result = clGetProgramInfo(handle, CL_PROGRAM_KERNEL_NAMES, sizeof(kernels_buffer), &kernels_buffer[0], &retsz);
CV_OCL_DBG_CHECK_RESULT(result, cv::format("clGetProgramInfo(CL_PROGRAM_KERNEL_NAMES: %s/%s)", sourceModule_.c_str(), sourceName_.c_str()).c_str());
if (result == CL_SUCCESS && retsz < sizeof(kernels_buffer))
{
kernels_buffer[retsz] = 0;
CV_LOG_INFO(NULL, "OpenCL program '" << sourceModule_ << "/" << sourceName_ << "' kernels: '" << kernels_buffer << "'");
}
else
{
CV_LOG_ERROR(NULL, "OpenCL program '" << sourceModule_ << "/" << sourceName_ << "' can't retrieve kernel names!");
}
}
#endif
}
bool compile(const Context& ctx, const ProgramSource::Impl* src_, String& errmsg)
@ -4548,7 +4608,6 @@ struct Program::Impl
CV_LOG_INFO(NULL, result << ": Kernels='" << kernels_buffer << "'");
}
#endif
}
return handle != NULL;
}
@ -6668,6 +6727,10 @@ void convertFromImage(void* cl_mem_image, UMat& dst)
depth = CV_32F;
break;
case CL_HALF_FLOAT:
depth = CV_16F;
break;
default:
CV_Error(cv::Error::OpenCLApiCallError, "Not supported image_channel_data_type");
}
@ -6676,9 +6739,23 @@ void convertFromImage(void* cl_mem_image, UMat& dst)
switch (fmt.image_channel_order)
{
case CL_R:
case CL_A:
case CL_INTENSITY:
case CL_LUMINANCE:
type = CV_MAKE_TYPE(depth, 1);
break;
case CL_RG:
case CL_RA:
type = CV_MAKE_TYPE(depth, 2);
break;
// CL_RGB has no mappings to OpenCV types because CL_RGB can only be used with
// CL_UNORM_SHORT_565, CL_UNORM_SHORT_555, or CL_UNORM_INT_101010.
/*case CL_RGB:
type = CV_MAKE_TYPE(depth, 3);
break;*/
case CL_RGBA:
case CL_BGRA:
case CL_ARGB:
@ -7068,6 +7145,13 @@ static std::string kerToStr(const Mat & k)
stream << "DIG(" << data[i] << "f)";
stream << "DIG(" << data[width] << "f)";
}
else if (depth == CV_16F)
{
stream.setf(std::ios_base::showpoint);
for (int i = 0; i < width; ++i)
stream << "DIG(" << (float)data[i] << "h)";
stream << "DIG(" << (float)data[width] << "h)";
}
else
{
for (int i = 0; i < width; ++i)
@ -7091,7 +7175,7 @@ String kernelToStr(InputArray _kernel, int ddepth, const char * name)
typedef std::string (* func_t)(const Mat &);
static const func_t funcs[] = { kerToStr<uchar>, kerToStr<char>, kerToStr<ushort>, kerToStr<short>,
kerToStr<int>, kerToStr<float>, kerToStr<double>, 0 };
kerToStr<int>, kerToStr<float>, kerToStr<double>, kerToStr<float16_t> };
const func_t func = funcs[ddepth];
CV_Assert(func != 0);
@ -7130,14 +7214,14 @@ int predictOptimalVectorWidth(InputArray src1, InputArray src2, InputArray src3,
int vectorWidths[] = { d.preferredVectorWidthChar(), d.preferredVectorWidthChar(),
d.preferredVectorWidthShort(), d.preferredVectorWidthShort(),
d.preferredVectorWidthInt(), d.preferredVectorWidthFloat(),
d.preferredVectorWidthDouble(), -1 };
d.preferredVectorWidthDouble(), d.preferredVectorWidthHalf() };
// if the device says don't use vectors
if (vectorWidths[0] == 1)
{
// it's heuristic
vectorWidths[CV_8U] = vectorWidths[CV_8S] = 4;
vectorWidths[CV_16U] = vectorWidths[CV_16S] = 2;
vectorWidths[CV_16U] = vectorWidths[CV_16S] = vectorWidths[CV_16F] = 2;
vectorWidths[CV_32S] = vectorWidths[CV_32F] = vectorWidths[CV_64F] = 1;
}
@ -7225,10 +7309,12 @@ struct Image2D::Impl
{
cl_image_format format;
static const int channelTypes[] = { CL_UNSIGNED_INT8, CL_SIGNED_INT8, CL_UNSIGNED_INT16,
CL_SIGNED_INT16, CL_SIGNED_INT32, CL_FLOAT, -1, -1 };
CL_SIGNED_INT16, CL_SIGNED_INT32, CL_FLOAT, -1, CL_HALF_FLOAT };
static const int channelTypesNorm[] = { CL_UNORM_INT8, CL_SNORM_INT8, CL_UNORM_INT16,
CL_SNORM_INT16, -1, -1, -1, -1 };
static const int channelOrders[] = { -1, CL_R, CL_RG, -1, CL_RGBA };
// CL_RGB has no mappings to OpenCV types because CL_RGB can only be used with
// CL_UNORM_SHORT_565, CL_UNORM_SHORT_555, or CL_UNORM_INT_101010.
static const int channelOrders[] = { -1, CL_R, CL_RG, /*CL_RGB*/ -1, CL_RGBA };
int channelType = norm ? channelTypesNorm[depth] : channelTypes[depth];
int channelOrder = channelOrders[cn];

Some files were not shown because too many files have changed in this diff Show More