Merge pull request #1 from Itseez/master

merge with itseez/opencv/master
This commit is contained in:
Nuzhny007 2015-04-06 16:24:53 +03:00
commit 581923fa57
163 changed files with 8826 additions and 1445 deletions

7
.gitignore vendored
View File

@ -9,7 +9,6 @@ Thumbs.db
tags
tegra/
bin/
CMakeFiles/
*.sdf
*.opensdf
*.obj
@ -17,3 +16,9 @@ CMakeFiles/
*.depend
*.rule
*.tmp
*/debug
*/CMakeFiles
CMakeCache.txt
*.suo
*.log
*.tlog

View File

@ -842,7 +842,6 @@ static int jas_cmshapmat_apply(jas_cmpxform_t *pxform, jas_cmreal_t *in,
*dst++ = a2;
}
} else {
assert(0);
while (--cnt >= 0) {
a0 = *src++;
src++;

View File

@ -345,6 +345,7 @@ jas_stream_t *jas_stream_tmpfile()
{
jas_stream_t *stream;
jas_stream_fileobj_t *obj;
char *tmpname;
if (!(stream = jas_stream_create())) {
return 0;
@ -365,10 +366,12 @@ jas_stream_t *jas_stream_tmpfile()
#ifdef _WIN32
/* Choose a file name. */
tmpnam(obj->pathname);
tmpname = tempnam(NULL, NULL);
strcpy(obj->pathname, tmpname);
free(tmpname);
/* Open the underlying file. */
if ((obj->fd = open(obj->pathname, O_CREAT | O_EXCL | O_RDWR | O_TRUNC | O_BINARY,
if ((obj->fd = open(obj->pathname, O_CREAT | O_EXCL | O_RDWR | O_TRUNC | O_BINARY | O_TEMPORARY | _O_SHORT_LIVED,
JAS_STREAM_PERMS)) < 0) {
jas_stream_destroy(stream);
return 0;

View File

@ -15,6 +15,13 @@ else()
ocv_list_filterout(lib_srcs jmemnobs.c)
endif()
if(WINRT)
add_definitions(-DNO_GETENV)
get_directory_property( DirDefs COMPILE_DEFINITIONS )
message(STATUS "Adding NO_GETENV to compiler definitions for WINRT:")
message(STATUS " COMPILE_DEFINITIONS = ${DirDefs}")
endif()
# ----------------------------------------------------------------------------------
# Define the library target:
# ----------------------------------------------------------------------------------

View File

@ -17,7 +17,7 @@ check_include_file(string.h HAVE_STRING_H)
check_include_file(sys/types.h HAVE_SYS_TYPES_H)
check_include_file(unistd.h HAVE_UNISTD_H)
if(WIN32)
if(WIN32 AND NOT WINRT)
set(USE_WIN32_FILEIO 1)
endif()
@ -79,7 +79,7 @@ set(lib_srcs
"${CMAKE_CURRENT_BINARY_DIR}/tif_config.h"
)
if(WIN32)
if(WIN32 AND NOT WINRT)
list(APPEND lib_srcs tif_win32.c)
else()
list(APPEND lib_srcs tif_unix.c)

View File

@ -244,6 +244,7 @@ OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too no
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
OCV_OPTION(ANDROID_EXAMPLES_WITH_LIBS "Build binaries of Android examples with native libraries" OFF IF ANDROID )
OCV_OPTION(ENABLE_IMPL_COLLECTION "Collect implementation data on function call" OFF )
OCV_OPTION(GENERATE_ABI_DESCRIPTOR "Generate XML file for abi_compliance_checker tool" OFF IF UNIX)
if(ENABLE_IMPL_COLLECTION)
add_definitions(-DCV_COLLECT_IMPL_DATA)
@ -274,8 +275,6 @@ endif()
if(ANDROID OR WIN32)
set(OPENCV_DOC_INSTALL_PATH doc)
elseif(INSTALL_TO_MANGLED_PATHS)
set(OPENCV_DOC_INSTALL_PATH share/OpenCV-${OPENCV_VERSION}/doc)
else()
set(OPENCV_DOC_INSTALL_PATH share/OpenCV/doc)
endif()
@ -309,6 +308,10 @@ if(NOT OPENCV_TEST_INSTALL_PATH)
set(OPENCV_TEST_INSTALL_PATH "${OPENCV_BIN_INSTALL_PATH}")
endif()
if (OPENCV_TEST_DATA_PATH)
get_filename_component(OPENCV_TEST_DATA_PATH ${OPENCV_TEST_DATA_PATH} ABSOLUTE)
endif()
if(OPENCV_TEST_DATA_PATH AND NOT OPENCV_TEST_DATA_INSTALL_PATH)
if(ANDROID)
set(OPENCV_TEST_DATA_INSTALL_PATH "sdk/etc/testdata")
@ -327,9 +330,11 @@ if(ANDROID)
set(OPENCV_CONFIG_INSTALL_PATH sdk/native/jni)
set(OPENCV_INCLUDE_INSTALL_PATH sdk/native/jni/include)
set(OPENCV_SAMPLES_SRC_INSTALL_PATH samples/native)
set(OPENCV_OTHER_INSTALL_PATH sdk/etc)
else()
set(LIBRARY_OUTPUT_PATH "${OpenCV_BINARY_DIR}/lib")
set(3P_LIBRARY_OUTPUT_PATH "${OpenCV_BINARY_DIR}/3rdparty/lib${LIB_SUFFIX}")
if(WIN32 AND CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
if(OpenCV_STATIC)
set(OPENCV_LIB_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib${LIB_SUFFIX}")
@ -338,10 +343,14 @@ else()
endif()
set(OPENCV_3P_LIB_INSTALL_PATH "${OpenCV_INSTALL_BINARIES_PREFIX}staticlib${LIB_SUFFIX}")
set(OPENCV_SAMPLES_SRC_INSTALL_PATH samples/native)
set(OPENCV_JAR_INSTALL_PATH java)
set(OPENCV_OTHER_INSTALL_PATH etc)
else()
set(OPENCV_LIB_INSTALL_PATH lib${LIB_SUFFIX})
set(OPENCV_3P_LIB_INSTALL_PATH share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH})
set(OPENCV_SAMPLES_SRC_INSTALL_PATH share/OpenCV/samples)
set(OPENCV_JAR_INSTALL_PATH share/OpenCV/java)
set(OPENCV_OTHER_INSTALL_PATH share/OpenCV)
endif()
set(OPENCV_INCLUDE_INSTALL_PATH "include")
@ -358,8 +367,16 @@ set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
if(INSTALL_TO_MANGLED_PATHS)
set(OPENCV_INCLUDE_INSTALL_PATH ${OPENCV_INCLUDE_INSTALL_PATH}/opencv-${OPENCV_VERSION})
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OPENCV_3P_LIB_INSTALL_PATH "${OPENCV_3P_LIB_INSTALL_PATH}")
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OPENCV_SAMPLES_SRC_INSTALL_PATH "${OPENCV_SAMPLES_SRC_INSTALL_PATH}")
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OPENCV_CONFIG_INSTALL_PATH "${OPENCV_CONFIG_INSTALL_PATH}")
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OPENCV_DOC_INSTALL_PATH "${OPENCV_DOC_INSTALL_PATH}")
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OPENCV_JAR_INSTALL_PATH "${OPENCV_JAR_INSTALL_PATH}")
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OPENCV_TEST_DATA_INSTALL_PATH "${OPENCV_TEST_DATA_INSTALL_PATH}")
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OPENCV_OTHER_INSTALL_PATH "${OPENCV_OTHER_INSTALL_PATH}")
endif()
if(WIN32)
# Postfix of DLLs:
set(OPENCV_DLLVERSION "${OPENCV_VERSION_MAJOR}${OPENCV_VERSION_MINOR}${OPENCV_VERSION_PATCH}")
@ -623,6 +640,9 @@ include(cmake/OpenCVGenConfig.cmake)
# Generate Info.plist for the IOS framework
include(cmake/OpenCVGenInfoPlist.cmake)
# Generate ABI descriptor
include(cmake/OpenCVGenABI.cmake)
# Generate environment setup file
if(INSTALL_TESTS AND OPENCV_TEST_DATA_PATH)
if(ANDROID)
@ -963,8 +983,9 @@ if(DEFINED WITH_V4L)
else()
set(HAVE_CAMV4L2_STR "NO")
endif()
status(" V4L/V4L2:" HAVE_LIBV4L THEN "Using libv4l (ver ${ALIASOF_libv4l1_VERSION})"
ELSE "${HAVE_CAMV4L_STR}/${HAVE_CAMV4L2_STR}")
status(" V4L/V4L2:" HAVE_LIBV4L
THEN "Using libv4l1 (ver ${ALIASOF_libv4l1_VERSION}) / libv4l2 (ver ${ALIASOF_libv4l2_VERSION})"
ELSE "${HAVE_CAMV4L_STR}/${HAVE_CAMV4L2_STR}")
endif(DEFINED WITH_V4L)
if(DEFINED WITH_DSHOW)

View File

@ -135,7 +135,8 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
const CvCascadeParams& _cascadeParams,
const CvFeatureParams& _featureParams,
const CvCascadeBoostParams& _stageParams,
bool baseFormatSave )
bool baseFormatSave,
double acceptanceRatioBreakValue )
{
// Start recording clock ticks for training time output
const clock_t begin_time = clock();
@ -185,6 +186,7 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
cout << "numStages: " << numStages << endl;
cout << "precalcValBufSize[Mb] : " << _precalcValBufSize << endl;
cout << "precalcIdxBufSize[Mb] : " << _precalcIdxBufSize << endl;
cout << "acceptanceRatioBreakValue : " << acceptanceRatioBreakValue << endl;
cascadeParams.printAttrs();
stageParams->printAttrs();
featureParams->printAttrs();
@ -207,13 +209,18 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
if ( !updateTrainingSet( tempLeafFARate ) )
{
cout << "Train dataset for temp stage can not be filled. "
"Branch training terminated." << endl;
"Branch training terminated." << endl;
break;
}
if( tempLeafFARate <= requiredLeafFARate )
{
cout << "Required leaf false alarm rate achieved. "
"Branch training terminated." << endl;
"Branch training terminated." << endl;
break;
}
if( (tempLeafFARate <= acceptanceRatioBreakValue) && (acceptanceRatioBreakValue >= 0) ){
cout << "The required acceptanceRatio for the model has been reached to avoid overfitting of trainingdata. "
"Branch training terminated." << endl;
break;
}

View File

@ -94,7 +94,8 @@ public:
const CvCascadeParams& _cascadeParams,
const CvFeatureParams& _featureParams,
const CvCascadeBoostParams& _stageParams,
bool baseFormatSave = false );
bool baseFormatSave = false,
double acceptanceRatioBreakValue = -1.0 );
private:
int predict( int sampleIdx );
void save( const std::string cascadeDirName, bool baseFormat = false );

View File

@ -33,20 +33,12 @@ bool CvCascadeImageReader::NegReader::create( const string _filename, Size _winS
if ( !file.is_open() )
return false;
size_t pos = _filename.rfind('\\');
char dlmrt = '\\';
if (pos == string::npos)
{
pos = _filename.rfind('/');
dlmrt = '/';
}
dirname = pos == string::npos ? "" : _filename.substr(0, pos) + dlmrt;
while( !file.eof() )
{
std::getline(file, str);
if (str.empty()) break;
if (str.at(0) == '#' ) continue; /* comment */
imgFilenames.push_back(dirname + str);
imgFilenames.push_back(str);
}
file.close();

View File

@ -12,9 +12,10 @@ int main( int argc, char* argv[] )
int numNeg = 1000;
int numStages = 20;
int numThreads = getNumThreads();
int precalcValBufSize = 256,
precalcIdxBufSize = 256;
int precalcValBufSize = 1024,
precalcIdxBufSize = 1024;
bool baseFormatSave = false;
double acceptanceRatioBreakValue = -1.0;
CvCascadeParams cascadeParams;
CvCascadeBoostParams stageParams;
@ -36,6 +37,7 @@ int main( int argc, char* argv[] )
cout << " [-precalcIdxBufSize <precalculated_idxs_buffer_size_in_Mb = " << precalcIdxBufSize << ">]" << endl;
cout << " [-baseFormatSave]" << endl;
cout << " [-numThreads <max_number_of_threads = " << numThreads << ">]" << endl;
cout << " [-acceptanceRatioBreakValue <value> = " << acceptanceRatioBreakValue << ">]" << endl;
cascadeParams.printDefaults();
stageParams.printDefaults();
for( int fi = 0; fi < fc; fi++ )
@ -86,6 +88,10 @@ int main( int argc, char* argv[] )
{
numThreads = atoi(argv[++i]);
}
else if( !strcmp( argv[i], "-acceptanceRatioBreakValue" ) )
{
acceptanceRatioBreakValue = atof(argv[++i]);
}
else if ( cascadeParams.scanAttr( argv[i], argv[i+1] ) ) { i++; }
else if ( stageParams.scanAttr( argv[i], argv[i+1] ) ) { i++; }
else if ( !set )
@ -112,6 +118,7 @@ int main( int argc, char* argv[] )
cascadeParams,
*featureParams[cascadeParams.featureType],
stageParams,
baseFormatSave );
baseFormatSave,
acceptanceRatioBreakValue );
return 0;
}

View File

@ -47,7 +47,7 @@ endif()
if(NOT DEFINED OpenCV_STATIC)
# look for global setting
if(NOT DEFINED BUILD_SHARED_LIBS OR BUILD_SHARED_LIBS)
if(BUILD_SHARED_LIBS)
set(OpenCV_STATIC OFF)
else()
set(OpenCV_STATIC ON)
@ -89,7 +89,7 @@ elseif(MINGW)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
OUTPUT_VARIABLE OPENCV_GCC_TARGET_MACHINE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(CMAKE_OPENCV_GCC_TARGET_MACHINE MATCHES "64")
if(OPENCV_GCC_TARGET_MACHINE MATCHES "amd64|x86_64|AMD64")
set(MINGW64 1)
set(OpenCV_ARCH x64)
else()

View File

@ -91,9 +91,9 @@ elseif(CMAKE_COMPILER_IS_GNUCXX)
if(WIN32)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
OUTPUT_VARIABLE CMAKE_OPENCV_GCC_TARGET_MACHINE
OUTPUT_VARIABLE OPENCV_GCC_TARGET_MACHINE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(CMAKE_OPENCV_GCC_TARGET_MACHINE MATCHES "amd64|x86_64|AMD64")
if(OPENCV_GCC_TARGET_MACHINE MATCHES "amd64|x86_64|AMD64")
set(MINGW64 1)
endif()
endif()
@ -147,11 +147,7 @@ if(MSVC)
elseif(MINGW)
set(OpenCV_RUNTIME mingw)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
OUTPUT_VARIABLE OPENCV_GCC_TARGET_MACHINE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(CMAKE_OPENCV_GCC_TARGET_MACHINE MATCHES "64")
set(MINGW64 1)
if(MINGW64)
set(OpenCV_ARCH x64)
else()
set(OpenCV_ARCH x86)

View File

@ -75,10 +75,10 @@ function(find_python preferred_version min_version library_env include_dir_env
if(NOT ANDROID AND NOT IOS)
ocv_check_environment_variables(${library_env} ${include_dir_env})
if(${${library_env}})
if(NOT ${${library_env}} EQUAL "")
set(PYTHON_LIBRARY "${${library_env}}")
endif()
if(${${include_dir_env}})
if(NOT ${${include_dir_env}} EQUAL "")
set(PYTHON_INCLUDE_DIR "${${include_dir_env}}")
endif()

View File

@ -153,7 +153,13 @@ endif(WITH_XINE)
ocv_clear_vars(HAVE_LIBV4L HAVE_CAMV4L HAVE_CAMV4L2 HAVE_VIDEOIO)
if(WITH_V4L)
if(WITH_LIBV4L)
CHECK_MODULE(libv4l1 HAVE_LIBV4L)
CHECK_MODULE(libv4l1 HAVE_LIBV4L1)
CHECK_MODULE(libv4l2 HAVE_LIBV4L2)
if(HAVE_LIBV4L1 AND HAVE_LIBV4L2)
set(HAVE_LIBV4L YES)
else()
set(HAVE_LIBV4L NO)
endif()
endif()
CHECK_INCLUDE_FILE(linux/videodev.h HAVE_CAMV4L)
CHECK_INCLUDE_FILE(linux/videodev2.h HAVE_CAMV4L2)
@ -262,7 +268,9 @@ endif(WITH_MSMF)
# --- Extra HighGUI and VideoIO libs on Windows ---
if(WIN32)
list(APPEND HIGHGUI_LIBRARIES comctl32 gdi32 ole32 setupapi ws2_32)
list(APPEND VIDEOIO_LIBRARIES vfw32)
if(HAVE_VFW)
list(APPEND VIDEOIO_LIBRARIES vfw32)
endif()
if(MINGW64)
list(APPEND VIDEOIO_LIBRARIES avifil32 avicap32 winmm msvfw32)
list(REMOVE_ITEM VIDEOIO_LIBRARIES vfw32)

49
cmake/OpenCVGenABI.cmake Normal file
View File

@ -0,0 +1,49 @@
if (NOT GENERATE_ABI_DESCRIPTOR)
return()
endif()
set(filename "opencv_abi.xml")
set(path1 "${CMAKE_BINARY_DIR}/${filename}")
set(modules "${OPENCV_MODULES_PUBLIC}")
ocv_list_filterout(modules "opencv_ts")
message(STATUS "Generating ABI compliance checker configuration: ${filename}")
if (OPENCV_VCSVERSION AND NOT OPENCV_VCSVERSION STREQUAL "unknown")
set(OPENCV_ABI_VERSION "${OPENCV_VCSVERSION}")
else()
set(OPENCV_ABI_VERSION "${OPENCV_VERSION}")
endif()
# Headers
set(OPENCV_ABI_HEADERS "{RELPATH}/${OPENCV_INCLUDE_INSTALL_PATH}")
# Libraries
set(OPENCV_ABI_LIBRARIES "{RELPATH}/${OPENCV_LIB_INSTALL_PATH}")
set(OPENCV_ABI_SKIP_HEADERS "")
set(OPENCV_ABI_SKIP_LIBRARIES "")
foreach(mod ${OPENCV_MODULES_BUILD})
string(REGEX REPLACE "^opencv_" "" mod "${mod}")
if(NOT "${OPENCV_MODULE_opencv_${mod}_LOCATION}" STREQUAL "${OpenCV_SOURCE_DIR}/modules/${mod}")
# headers
foreach(h ${OPENCV_MODULE_opencv_${mod}_HEADERS})
file(RELATIVE_PATH h "${OPENCV_MODULE_opencv_${mod}_LOCATION}/include" "${h}")
list(APPEND OPENCV_ABI_SKIP_HEADERS "${h}")
endforeach()
# libraries
set(lib_name "")
get_target_property(lib_name opencv_${mod} LOCATION)
get_filename_component(lib_name "${lib_name}" NAME)
list(APPEND OPENCV_ABI_SKIP_LIBRARIES "${lib_name}")
endif()
endforeach()
string(REPLACE ";" "\n " OPENCV_ABI_SKIP_HEADERS "${OPENCV_ABI_SKIP_HEADERS}")
string(REPLACE ";" "\n " OPENCV_ABI_SKIP_LIBRARIES "${OPENCV_ABI_SKIP_LIBRARIES}")
# Options
set(OPENCV_ABI_GCC_OPTIONS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}")
string(REGEX REPLACE "([^ ]) +([^ ])" "\\1\\n \\2" OPENCV_ABI_GCC_OPTIONS "${OPENCV_ABI_GCC_OPTIONS}")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/opencv_abi.xml.in" "${path1}")

View File

@ -101,10 +101,7 @@ configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.
set(OpenCV_INCLUDE_DIRS_CONFIGCMAKE "\"\${OpenCV_INSTALL_PATH}/${OPENCV_INCLUDE_INSTALL_PATH}/opencv" "\${OpenCV_INSTALL_PATH}/${OPENCV_INCLUDE_INSTALL_PATH}\"")
set(OpenCV2_INCLUDE_DIRS_CONFIGCMAKE "\"\"")
if(INSTALL_TO_MANGLED_PATHS)
string(REPLACE "OpenCV" "OpenCV-${OPENCV_VERSION}" OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE "${OPENCV_3P_LIB_INSTALL_PATH}")
set(OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE "\"\${OpenCV_INSTALL_PATH}/${OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE}\"")
endif()
set(OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE "\"\${OpenCV_INSTALL_PATH}/${OPENCV_3P_LIB_INSTALL_PATH}\"")
if(UNIX) # ANDROID configuration is created here also
#http://www.vtk.org/Wiki/CMake/Tutorials/Packaging reference
@ -114,23 +111,13 @@ if(UNIX) # ANDROID configuration is created here also
# <prefix>/(share|lib)/<name>*/ (U)
# <prefix>/(share|lib)/<name>*/(cmake|CMake)/ (U)
if(USE_IPPICV)
if(INSTALL_TO_MANGLED_PATHS)
file(RELATIVE_PATH INSTALL_PATH_RELATIVE_IPPICV "${CMAKE_INSTALL_PREFIX}/${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/" ${IPPICV_INSTALL_PATH})
else()
file(RELATIVE_PATH INSTALL_PATH_RELATIVE_IPPICV "${CMAKE_INSTALL_PREFIX}/${OPENCV_CONFIG_INSTALL_PATH}/" ${IPPICV_INSTALL_PATH})
endif()
file(RELATIVE_PATH INSTALL_PATH_RELATIVE_IPPICV "${CMAKE_INSTALL_PREFIX}/${OPENCV_CONFIG_INSTALL_PATH}/" ${IPPICV_INSTALL_PATH})
endif()
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig.cmake.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake" @ONLY)
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCVConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake" @ONLY)
if(INSTALL_TO_MANGLED_PATHS)
install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/ COMPONENT dev)
install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/ COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}-${OPENCV_VERSION}/ FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
else()
install(FILES "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake" DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ COMPONENT dev)
install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
endif()
install(FILES "${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig.cmake" DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ COMPONENT dev)
install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCVConfig-version.cmake DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ COMPONENT dev)
install(EXPORT OpenCVModules DESTINATION ${OPENCV_CONFIG_INSTALL_PATH}/ FILE OpenCVModules${modules_file_suffix}.cmake COMPONENT dev)
endif()
if(ANDROID)

View File

@ -1,6 +0,0 @@
#include <wrl/client.h>
int main(int, char**)
{
return 0;
}

View File

@ -0,0 +1,45 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
This file is auto-generated
-->
<descriptor>
<version>
@OPENCV_ABI_VERSION@
</version>
<headers>
@OPENCV_ABI_HEADERS@
</headers>
<libs>
@OPENCV_ABI_LIBRARIES@
</libs>
<skip_headers>
opencv2/core/cuda*
opencv2/core/private*
opencv/cxeigen.hpp
opencv2/core/eigen.hpp
opencv2/flann/hdf5.h
opencv2/imgcodecs/ios.h
opencv2/videoio/cap_ios.h
opencv2/ts.hpp
opencv2/ts/*
opencv2/xobjdetect/private.hpp
@OPENCV_ABI_SKIP_HEADERS@
</skip_headers>
<skip_libs>
@OPENCV_ABI_SKIP_LIBRARIES@
</skip_libs>
<gcc_options>
@OPENCV_ABI_GCC_OPTIONS@
</gcc_options>
</descriptor>

View File

@ -1,14 +1,9 @@
file(GLOB HAAR_CASCADES haarcascades/*.xml)
file(GLOB LBP_CASCADES lbpcascades/*.xml)
if(ANDROID)
install(FILES ${HAAR_CASCADES} DESTINATION sdk/etc/haarcascades COMPONENT libs)
install(FILES ${LBP_CASCADES} DESTINATION sdk/etc/lbpcascades COMPONENT libs)
else()
install(FILES ${HAAR_CASCADES} DESTINATION share/OpenCV/haarcascades COMPONENT libs)
install(FILES ${LBP_CASCADES} DESTINATION share/OpenCV/lbpcascades COMPONENT libs)
endif()
install(FILES ${HAAR_CASCADES} DESTINATION ${OPENCV_OTHER_INSTALL_PATH}/haarcascades COMPONENT libs)
install(FILES ${LBP_CASCADES} DESTINATION ${OPENCV_OTHER_INSTALL_PATH}/lbpcascades COMPONENT libs)
if(INSTALL_TESTS AND OPENCV_TEST_DATA_PATH)
install(DIRECTORY "${OPENCV_TEST_DATA_PATH}/" DESTINATION "${OPENCV_TEST_DATA_INSTALL_PATH}" COMPONENT "tests")
endif()
endif()

View File

@ -27,15 +27,6 @@ if(HAVE_DOC_GENERATOR)
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching)
list(REMOVE_ITEM BASE_MODULES ${FIXED_ORDER_MODULES})
set(BASE_MODULES ${FIXED_ORDER_MODULES} ${BASE_MODULES})
set(DOC_LIST
"${OpenCV_SOURCE_DIR}/doc/opencv-logo.png"
"${OpenCV_SOURCE_DIR}/doc/opencv-logo2.png"
"${OpenCV_SOURCE_DIR}/doc/opencv-logo-white.png"
"${OpenCV_SOURCE_DIR}/doc/opencv.ico"
"${OpenCV_SOURCE_DIR}/doc/pattern.png"
"${OpenCV_SOURCE_DIR}/doc/acircles_pattern.png")
set(OPTIONAL_DOC_LIST "")
endif(HAVE_DOC_GENERATOR)
# ========= Doxygen docs =========
@ -160,18 +151,8 @@ if(BUILD_DOCS AND DOXYGEN_FOUND)
COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps}
)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen/html
DESTINATION "${OPENCV_DOC_INSTALL_PATH}"
COMPONENT "docs" OPTIONAL
)
endif()
if(HAVE_DOC_GENERATOR)
# installation
foreach(f ${DOC_LIST})
install(FILES "${f}" DESTINATION "${OPENCV_DOC_INSTALL_PATH}" COMPONENT docs)
endforeach()
foreach(f ${OPTIONAL_DOC_LIST})
install(FILES "${f}" DESTINATION "${OPENCV_DOC_INSTALL_PATH}" OPTIONAL COMPONENT docs)
endforeach()
# dummy targets
add_custom_target(docs)
add_custom_target(html_docs)
endif(HAVE_DOC_GENERATOR)

View File

@ -243,11 +243,7 @@ PREDEFINED = __cplusplus=1 \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_NEON=1 \
FLANN_DEPRECATED= \
"CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \
"CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;"
FLANN_DEPRECATED=
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =

View File

@ -17,6 +17,7 @@
<tab type="globals" visible="yes" title="Global objects" intro=""/>
</tab>
<tab type="examples" visible="yes" title="" intro=""/>
<tab type="user" url="/3.0-last-rst" title="Sphinx Documentation"/>
</navindex>
<!-- Layout definition for a class page -->

View File

@ -30,7 +30,7 @@ y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy]\f]
So we have five distortion parameters which in OpenCV are presented as one row matrix with 5
columns:
\f[Distortion_{coefficients}=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3)\f]
\f[distortion\_coefficients=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3)\f]
Now for the unit conversion we use the following formula:
@ -96,83 +96,30 @@ on how to do this you can find in the @ref tutorial_file_input_output_with_xml_y
Explanation
-----------
-# **Read the settings.**
@code{.cpp}
Settings s;
const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml";
FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
if (!fs.isOpened())
{
cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
return -1;
}
fs["Settings"] >> s;
fs.release(); // close Settings file
-# **Read the settings**
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp file_read
if (!s.goodInput)
{
cout << "Invalid input detected. Application stopping. " << endl;
return -1;
}
@endcode
For this I've used simple OpenCV class input operation. After reading the file I've an
additional post-processing function that checks validity of the input. Only if all inputs are
good then *goodInput* variable will be true.
-# **Get next input, if it fails or we have enough of them - calibrate**. After this we have a big
-# **Get next input, if it fails or we have enough of them - calibrate**
After this we have a big
loop where we do the following operations: get the next image from the image list, camera or
video file. If this fails or we have enough images then we run the calibration process. In case
of image we step out of the loop and otherwise the remaining frames will be undistorted (if the
option is set) via changing from *DETECTION* mode to the *CALIBRATED* one.
@code{.cpp}
for(int i = 0;;++i)
{
Mat view;
bool blinkOutput = false;
view = s.nextImage();
//----- If no more image, or got enough, then stop calibration and show result -------------
if( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames )
{
if( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
mode = CALIBRATED;
else
mode = DETECTION;
}
if(view.empty()) // If no more images then run calibration, save and stop loop.
{
if( imagePoints.size() > 0 )
runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
break;
imageSize = view.size(); // Format input image.
if( s.flipVertical ) flip( view, view, 0 );
}
@endcode
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp get_input
For some cameras we may need to flip the input image. Here we do this too.
-# **Find the pattern in the current input**. The formation of the equations I mentioned above aims
-# **Find the pattern in the current input**
The formation of the equations I mentioned above aims
to finding major patterns in the input: in case of the chessboard this are corners of the
squares and for the circles, well, the circles themselves. The position of these will form the
result which will be written into the *pointBuf* vector.
@code{.cpp}
vector<Point2f> pointBuf;
bool found;
switch( s.calibrationPattern ) // Find feature points on the input format
{
case Settings::CHESSBOARD:
found = findChessboardCorners( view, s.boardSize, pointBuf,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
break;
case Settings::CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf );
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
break;
}
@endcode
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp find_pattern
Depending on the type of the input pattern you use either the @ref cv::findChessboardCorners or
the @ref cv::findCirclesGrid function. For both of them you pass the current image and the size
of the board and you'll get the positions of the patterns. Furthermore, they return a boolean
@ -188,109 +135,27 @@ Explanation
*imagePoints* vector to collect all of the equations into a single container. Finally, for
visualization feedback purposes we will draw the found points on the input image using @ref
cv::findChessboardCorners function.
@code{.cpp}
if ( found) // If done with success,
{
// improve the found corners' coordinate accuracy for chessboard
if( s.calibrationPattern == Settings::CHESSBOARD)
{
Mat viewGray;
cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(11,11),
Size(-1,-1), TermCriteria( TermCriteria::EPS+TermCriteria::MAX_ITER, 30, 0.1 ));
}
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp pattern_found
-# **Show state and result to the user, plus command line control of the application**
if( mode == CAPTURING && // For camera only take new samples after delay time
(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) )
{
imagePoints.push_back(pointBuf);
prevTimestamp = clock();
blinkOutput = s.inputCapture.isOpened();
}
// Draw the corners.
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
}
@endcode
-# **Show state and result to the user, plus command line control of the application**. This part
shows text output on the image.
@code{.cpp}
//----------------------------- Output Text ------------------------------------------------
string msg = (mode == CAPTURING) ? "100/100" :
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
int baseLine = 0;
Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
if( mode == CAPTURING )
{
if(s.showUndistorsed)
msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
else
msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
}
putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);
if( blinkOutput )
bitwise_not(view, view);
@endcode
This part shows text output on the image.
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp output_text
If we ran calibration and got camera's matrix with the distortion coefficients we may want to
correct the image using @ref cv::undistort function:
@code{.cpp}
//------------------------- Video capture output undistorted ------------------------------
if( mode == CALIBRATED && s.showUndistorsed )
{
Mat temp = view.clone();
undistort(temp, view, cameraMatrix, distCoeffs);
}
//------------------------------ Show image and check for input commands -------------------
imshow("Image View", view);
@endcode
Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g*
we start again the detection process, and finally for the *ESC* key we quit the application:
@code{.cpp}
char key = waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
if( key == ESC_KEY )
break;
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp output_undistorted
Then we show the image and wait for an input key and if this is *u* we toggle the distortion removal,
if it is *g* we start again the detection process, and finally for the *ESC* key we quit the application:
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp await_input
-# **Show the distortion removal for the images too**
if( key == 'u' && mode == CALIBRATED )
s.showUndistorsed = !s.showUndistorsed;
if( s.inputCapture.isOpened() && key == 'g' )
{
mode = CAPTURING;
imagePoints.clear();
}
@endcode
-# **Show the distortion removal for the images too**. When you work with an image list it is not
When you work with an image list it is not
possible to remove the distortion inside the loop. Therefore, you must do this after the loop.
Taking advantage of this now I'll expand the @ref cv::undistort function, which is in fact first
calls @ref cv::initUndistortRectifyMap to find transformation matrices and then performs
transformation using @ref cv::remap function. Because, after successful calibration map
calculation needs to be done only once, by using this expanded form you may speed up your
application:
@code{.cpp}
if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
{
Mat view, rview, map1, map2;
initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
for(int i = 0; i < (int)s.imageList.size(); i++ )
{
view = imread(s.imageList[i], 1);
if(view.empty())
continue;
remap(view, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
char c = waitKey();
if( c == ESC_KEY || c == 'q' || c == 'Q' )
break;
}
}
@endcode
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp show_results
The calibration and save
------------------------
@ -304,24 +169,7 @@ Therefore in the first function we just split up these two processes. Because we
of the calibration variables we'll create these variables here and pass on both of them to the
calibration and saving function. Again, I'll not show the saving part as that has little in common
with the calibration. Explore the source file in order to find out how and what:
@code{.cpp}
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
{
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
reprojErrs, totalAvgErr);
cout << (ok ? "Calibration succeeded" : "Calibration failed")
<< ". avg re projection error = " << totalAvgErr ;
if( ok ) // save only if the calibration was done with success
saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
imagePoints, totalAvgErr);
return ok;
}
@endcode
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp run_and_save
We do the calibration with the help of the @ref cv::calibrateCamera function. It has the following
parameters:
@ -331,29 +179,7 @@ parameters:
present. Because, we use a single pattern for all the input images we can calculate this just
once and multiply it for all the other input views. We calculate the corner points with the
*calcBoardCornerPositions* function as:
@code{.cpp}
void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
{
corners.clear();
switch(patternType)
{
case Settings::CHESSBOARD:
case Settings::CIRCLES_GRID:
for( int i = 0; i < boardSize.height; ++i )
for( int j = 0; j < boardSize.width; ++j )
corners.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0));
break;
}
}
@endcode
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp board_corners
And then multiply it as:
@code{.cpp}
vector<vector<Point3f> > objectPoints(1);
@ -365,12 +191,8 @@ parameters:
circle pattern). We have already collected this from @ref cv::findChessboardCorners or @ref
cv::findCirclesGrid function. We just need to pass it on.
- The size of the image acquired from the camera, video file or the images.
- The camera matrix. If we used the fixed aspect ratio option we need to set the \f$f_x\f$ to zero:
@code{.cpp}
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( s.flag & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = 1.0;
@endcode
- The camera matrix. If we used the fixed aspect ratio option we need to set \f$f_x\f$:
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp fixed_aspect
- The distortion coefficient matrix. Initialize with zero.
@code{.cpp}
distCoeffs = Mat::zeros(8, 1, CV_64F);
@ -393,33 +215,7 @@ double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
calculate the absolute norm between what we got with our transformation and the corner/circle
finding algorithm. To find the average error we calculate the arithmetical mean of the errors
calculated for all the calibration images.
@code{.cpp}
double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const Mat& cameraMatrix , const Mat& distCoeffs,
vector<float>& perViewErrors)
{
vector<Point2f> imagePoints2;
int i, totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for( i = 0; i < (int)objectPoints.size(); ++i )
{
projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix, // project
distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), NORM_L2); // difference
int n = (int)objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n); // save for this view
totalErr += err*err; // sum it up
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints); // calculate the arithmetical mean
}
@endcode
@snippet samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp compute_errors
Results
-------
@ -461,20 +257,20 @@ the input. Here's, how a detected pattern should look:
In both cases in the specified output XML/YAML file you'll find the camera and distortion
coefficients matrices:
@code{.xml}
<Camera_Matrix type_id="opencv-matrix">
<camera_matrix type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
6.5746697944293521e+002 0. 3.1950000000000000e+002 0.
6.5746697944293521e+002 2.3950000000000000e+002 0. 0. 1.</data></Camera_Matrix>
<Distortion_Coefficients type_id="opencv-matrix">
6.5746697944293521e+002 2.3950000000000000e+002 0. 0. 1.</data></camera_matrix>
<distortion_coefficients type_id="opencv-matrix">
<rows>5</rows>
<cols>1</cols>
<dt>d</dt>
<data>
-4.1802327176423804e-001 5.0715244063187526e-001 0. 0.
-5.7843597214487474e-001</data></Distortion_Coefficients>
-5.7843597214487474e-001</data></distortion_coefficients>
@endcode
Add these values as constants to your program, call the @ref cv::initUndistortRectifyMap and the
@ref cv::remap function to remove distortion and enjoy distortion free inputs for cheap and low

View File

@ -74,7 +74,7 @@ available methods are 6:
-# **method=CV_TM_CCOEFF**
\f[R(x,y)= \sum _{x',y'} (T'(x',y') \cdot I(x+x',y+y'))\f]
\f[R(x,y)= \sum _{x',y'} (T'(x',y') \cdot I'(x+x',y+y'))\f]
where

View File

@ -149,3 +149,9 @@ Additionally you can find very basic sample source code to introduce you to the
_Author:_ Maksim Shabunin
This tutorial describes new documenting process and some useful Doxygen features.
- @subpage tutorial_transition_guide
_Author:_ Maksim Shabunin
This document describes some aspects of 2.4 -> 3.0 transition process.

View File

@ -0,0 +1,259 @@
Transition guide {#tutorial_transition_guide}
================
@tableofcontents
Changes overview {#tutorial_transition_overview}
================
This document is intended to software developers who want to migrate their code to OpenCV 3.0.
OpenCV 3.0 introduced many new algorithms and features comparing to version 2.4. Some modules have been rewritten, some have been reorganized. Although most of the algorithms from 2.4 are still present, the interfaces can differ.
This section describes most notable changes in general, all details and examples of transition actions are in the next part of the document.
##### Contrib repository
<https://github.com/Itseez/opencv_contrib>
This is a place for all new, experimental and non-free algorithms. It does not receive so much attention from the support team comparing to main repository, but the community makes an effort to keep it in a good shape.
To build OpenCV with _contrib_ repository, add the following option to your cmake command:
@code{.sh}
-DOPENCV_EXTRA_MODULES_PATH=<path-to-opencv_contrib>/modules
@endcode
##### Headers layout
In 2.4 all headers are located in corresponding module subfolder (_opencv2/\<module\>/\<module\>.hpp_), in 3.0 there are top-level module headers containing the most of the module functionality: _opencv2/\<module\>.hpp_ and all C-style API definitions have been moved to separate headers (for example opencv2/core/core_c.h).
##### Algorithm interfaces
General algorithm usage pattern has changed: now it must be created on heap wrapped in smart pointer cv::Ptr. Version 2.4 allowed both stack and heap allocations, directly or via smart pointer.
_get_ and _set_ methods have been removed from the cv::Algorithm class along with _CV_INIT_ALGORITHM_ macro. In 3.0 all properties have been converted to the pairs of _getProperty/setProperty_ pure virtual methods. As a result it is __not__ possible to create and use cv::Algorithm instance by name (using generic _Algorithm::create(String)_ method), one should call corresponding factory method explicitly.
##### Changed modules
- _ml_ module has been rewritten
- _highgui_ module has been split into parts: _imgcodecs_, _videoio_ and _highgui_ itself
- _features2d_ module have been reorganized (some feature detectors has been moved to _opencv_contrib/xfeatures2d_ module)
- _legacy_, _nonfree_ modules have been removed. Some algorithms have been moved to different locations and some have been completely rewritten or removed
- CUDA API has been updated (_gpu_ module -> several _cuda_ modules, namespace _gpu_ -> namespace _cuda_)
- OpenCL API has changed (_ocl_ module has been removed, separate _ocl::_ implementations -> Transparent API)
- Some other methods and classes have been relocated
Transition hints {#tutorial_transition_hints}
================
This section describes concrete actions with examples.
Prepare 2.4 {#tutorial_transition_hints_24}
-----------
Some changes made in the latest 2.4.11 OpenCV version allow you to prepare current codebase to migration:
- cv::makePtr function is now available
- _opencv2/\<module\>.hpp_ headers have been created
New headers layout {#tutorial_transition_hints_headers}
------------------
__Note:__
Changes intended to ease the migration have been made in OpenCV 3.0, thus the following instructions are not necessary, but recommended.
1. Replace inclusions of old module headers:
@code{.cpp}
// old header
#include "opencv2/<module>/<module>.hpp"
// new header
#include "opencv2/<module>.hpp"
@endcode
2. If your code is using C API (`cv*` functions, `Cv*` structures or `CV_*` enumerations), include corresponding `*_c.h` headers. Although it is recommended to use C++ API, most of C-functions are still accessible in separate header files (opencv2/core/core_c.h, opencv2/core/types_c.h, opencv2/imgproc/imgproc_c.h, etc.).
Modern way to use algorithm {#tutorial_transition_algorithm}
---------------------------
1. Algorithm instances must be created with cv::makePtr function or corresponding static factory method if available:
@code{.cpp}
// good ways
Ptr<SomeAlgo> algo = makePtr<SomeAlgo>(...);
Ptr<SomeAlgo> algo = SomeAlgo::create(...);
@endcode
Other ways are deprecated:
@code{.cpp}
// bad ways
Ptr<SomeAlgo> algo = new SomeAlgo(...);
SomeAlgo * algo = new SomeAlgo(...);
SomeAlgo algo(...);
Ptr<SomeAlgo> algo = Algorithm::create<SomeAlgo>("name");
@endcode
2. Algorithm properties should be accessed via corresponding virtual methods, _getSomeProperty/setSomeProperty_, generic _get/set_ methods have been removed:
@code{.cpp}
// good way
double clipLimit = clahe->getClipLimit();
clahe->setClipLimit(clipLimit);
// bad way
double clipLimit = clahe->getDouble("clipLimit");
clahe->set("clipLimit", clipLimit);
clahe->setDouble("clipLimit", clipLimit);
@endcode
3. Remove `initModule_<moduleName>()` calls
Machine learning module {#tutorial_transition_hints_ml}
-----------------------
Since this module has been rewritten, it will take some effort to adapt your software to it. All algorithms are located in separate _ml_ namespace along with their base class _StatModel_. Separate _SomeAlgoParams_ classes have been replaced with a sets of corresponding _getProperty/setProperty_ methods.
The following table illustrates correspondence between 2.4 and 3.0 machine learning classes.
| 2.4 | 3.0 |
| --------- | --------- |
| CvStatModel | cv::ml::StatModel |
| CvNormalBayesClassifier | cv::ml::NormalBayesClassifier |
| CvKNearest | cv::ml::KNearest |
| CvSVM | cv::ml::SVM |
| CvDTree | cv::ml::DTrees |
| CvBoost | cv::ml::Boost |
| CvGBTrees | _Not implemented_ |
| CvRTrees | cv::ml::RTrees |
| CvERTrees | _Not implemented_ |
| EM | cv::ml::EM |
| CvANN_MLP | cv::ml::ANN_MLP |
| _Not implemented_ | cv::ml::LogisticRegression |
| CvMLData | cv::ml::TrainData |
Although rewritten _ml_ algorithms in 3.0 allow you to load old trained models from _xml/yml_ file, deviations in prediction process are possible.
The following code snippets from the `points_classifier.cpp` example illustrate differences in model training process:
@code{.cpp}
using namespace cv;
// ======== version 2.4 ========
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
CvBoost boost;
Mat var_types( 1, trainSamples.cols + 1, CV_8UC1, Scalar(CV_VAR_ORDERED) );
var_types.at<uchar>( trainSamples.cols ) = CV_VAR_CATEGORICAL;
CvBoostParams params( CvBoost::DISCRETE, // boost_type
100, // weak_count
0.95, // weight_trim_rate
2, // max_depth
false, //use_surrogates
0 // priors
);
boost.train( trainSamples, CV_ROW_SAMPLE, trainClasses, Mat(), Mat(), var_types, Mat(), params );
// ======== version 3.0 ========
Ptr<Boost> boost = Boost::create();
boost->setBoostType(Boost::DISCRETE);
boost->setWeakCount(100);
boost->setWeightTrimRate(0.95);
boost->setMaxDepth(2);
boost->setUseSurrogates(false);
boost->setPriors(Mat());
boost->train(prepare_train_data()); // 'prepare_train_data' returns an instance of ml::TrainData class
@endcode
Features detect {#tutorial_transition_hints_features}
---------------
Some algorithms (FREAK, BRIEF, SIFT, SURF) has been moved to _opencv_contrib_ repository, to _xfeatures2d_ module, _xfeatures2d_ namespace. Their interface has been also changed (inherit from `cv::Feature2D` base class).
List of _xfeatures2d_ module classes:
- cv::xfeatures2d::BriefDescriptorExtractor - Class for computing BRIEF descriptors (2.4 location: _features2d_)
- cv::xfeatures2d::FREAK - Class implementing the FREAK (Fast Retina Keypoint) keypoint descriptor (2.4 location: _features2d_)
- cv::xfeatures2d::StarDetector - The class implements the CenSurE detector (2.4 location: _features2d_)
- cv::xfeatures2d::SIFT - Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm (2.4 location: _nonfree_)
- cv::xfeatures2d::SURF - Class for extracting Speeded Up Robust Features from an image (2.4 location: _nonfree_)
Following steps are needed:
1. Add _opencv_contrib_ to compilation process
2. Include `opencv2/xfeatures2d.h` header
3. Use namespace `xfeatures2d`
4. Replace `operator()` calls with `detect`, `compute` or `detectAndCompute` if needed
Some classes now use general methods `detect`, `compute` or `detectAndCompute` provided by `Feature2D` base class instead of custom `operator()`
Following code snippets illustrate the difference (from `video_homography.cpp` example):
@code{.cpp}
using namespace cv;
// ====== 2.4 =======
#include "opencv2/features2d/features2d.hpp"
BriefDescriptorExtractor brief(32);
GridAdaptedFeatureDetector detector(new FastFeatureDetector(10, true), DESIRED_FTRS, 4, 4);
// ...
detector.detect(gray, query_kpts); //Find interest points
brief.compute(gray, query_kpts, query_desc); //Compute brief descriptors at each keypoint location
// ====== 3.0 =======
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv::xfeatures2d;
Ptr<BriefDescriptorExtractor> brief = BriefDescriptorExtractor::create(32);
Ptr<FastFeatureDetector> detector = FastFeatureDetector::create(10, true);
// ...
detector->detect(gray, query_kpts); //Find interest points
brief->compute(gray, query_kpts, query_desc); //Compute brief descriptors at each keypoint location
@endcode
OpenCL {#tutorial_transition_hints_opencl}
------
All specialized `ocl` implemetations has been hidden behind general C++ algorithm interface. Now the function execution path can be selected dynamically at runtime: CPU or OpenCL; this mechanism is also called "Transparent API".
New class cv::UMat is intended to hide data exchange with OpenCL device in a convinient way.
Following example illustrate API modifications (from [OpenCV site](http://opencv.org/platforms/opencl.html)):
- OpenCL-aware code OpenCV-2.x
@code{.cpp}
// initialization
VideoCapture vcap(...);
ocl::OclCascadeClassifier fd("haar_ff.xml");
ocl::oclMat frame, frameGray;
Mat frameCpu;
vector<Rect> faces;
for(;;){
// processing loop
vcap >> frameCpu;
frame = frameCpu;
ocl::cvtColor(frame, frameGray, BGR2GRAY);
ocl::equalizeHist(frameGray, frameGray);
fd.detectMultiScale(frameGray, faces, ...);
// draw rectangles …
// show image …
}
@endcode
- OpenCL-aware code OpenCV-3.x
@code{.cpp}
// initialization
VideoCapture vcap(...);
CascadeClassifier fd("haar_ff.xml");
UMat frame, frameGray; // the only change from plain CPU version
vector<Rect> faces;
for(;;){
// processing loop
vcap >> frame;
cvtColor(frame, frameGray, BGR2GRAY);
equalizeHist(frameGray, frameGray);
fd.detectMultiScale(frameGray, faces, ...);
// draw rectangles …
// show image …
}
@endcode
CUDA {#tutorial_transition_hints_cuda}
----
_cuda_ module has been split into several smaller pieces:
- _cuda_ - @ref cuda
- _cudaarithm_ - @ref cudaarithm
- _cudabgsegm_ - @ref cudabgsegm
- _cudacodec_ - @ref cudacodec
- _cudafeatures2d_ - @ref cudafeatures2d
- _cudafilters_ - @ref cudafilters
- _cudaimgproc_ - @ref cudaimgproc
- _cudalegacy_ - @ref cudalegacy
- _cudaoptflow_ - @ref cudaoptflow
- _cudastereo_ - @ref cudastereo
- _cudawarping_ - @ref cudawarping
- _cudev_ - @ref cudev
`gpu` namespace has been removed, use cv::cuda namespace instead. Many classes has also been renamed, for example:
- `gpu::FAST_GPU` -> cv::cuda::FastFeatureDetector
- `gpu::createBoxFilter_GPU` -> cv::cuda::createBoxFilter
Documentation format {#tutorial_transition_docs}
--------------------
Documentation has been converted to Doxygen format. You can find updated documentation writing guide in _Tutorials_ section of _OpenCV_ reference documentation (@ref tutorial_documentation).

View File

@ -256,6 +256,12 @@ Command line arguments of opencv_traincascade application grouped by purposes:
Maximum number of threads to use during training. Notice that the actual number of used
threads may be lower, depending on your machine and compilation options.
- -acceptanceRatioBreakValue \<break_value\>
This argument is used to determine how precise your model should keep learning and when to stop.
A good guideline is to train not further than 10e-5, to ensure the model does not overtrain on your training data.
By default this value is set to -1 to disable this feature.
-# Cascade parameters:
- -stageType \<BOOST(default)\>

View File

@ -51,12 +51,6 @@
#include "opencv2/photo/photo_c.h"
#include "opencv2/video/tracking_c.h"
#include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/contrib/compat.hpp"
#include "opencv2/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
#include "opencv2/legacy/blobtrack.hpp"
#endif

View File

@ -314,7 +314,9 @@ public:
cameraId(0),
cameraCallback(callback),
userData(_userData),
emptyCameraCallbackReported(0)
emptyCameraCallbackReported(0),
width(),
height()
{
LOGD("Instantiated new CameraHandler (%p, %p)", callback, _userData);
void* params_buffer = operator new(sizeof(CameraParameters) + MAGIC_TAIL);
@ -1122,7 +1124,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
if (handler == NULL) {
LOGE("ERROR in applyProperties --- cannot reinit camera");
handler=initCameraConnect(cameraCallback, cameraId, userData, NULL);
LOGD("CameraHandler::applyProperties(): repeate initCameraConnect after ERROR, handler=0x%x", (int)handler);
LOGD("CameraHandler::applyProperties(): repeat initCameraConnect after ERROR, handler=0x%x", (int)handler);
if (handler == NULL) {
LOGE("ERROR in applyProperties --- cannot reinit camera AGAIN --- cannot do anything else");
}

View File

@ -184,7 +184,8 @@ namespace cv
//! type of the robust estimation algorithm
enum { LMEDS = 4, //!< least-median algorithm
RANSAC = 8 //!< RANSAC algorithm
RANSAC = 8, //!< RANSAC algorithm
RHO = 16 //!< RHO algorithm
};
enum { SOLVEPNP_ITERATIVE = 0,
@ -265,8 +266,9 @@ a vector\<Point2f\> .
- **0** - a regular method using all the points
- **RANSAC** - RANSAC-based robust method
- **LMEDS** - Least-Median robust method
- **RHO** - PROSAC-based robust method
@param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
(used in the RANSAC method only). That is, if
(used in the RANSAC and RHO methods only). That is, if
\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \| > \texttt{ransacReprojThreshold}\f]
then the point \f$i\f$ is considered an outlier. If srcPoints and dstPoints are measured in pixels,
it usually makes sense to set this parameter somewhere in the range of 1 to 10.
@ -289,7 +291,7 @@ pairs to compute an initial homography estimate with a simple least-squares sche
However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective
transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
you can use one of the two robust methods. Both methods, RANSAC and LMeDS , try many different
you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
random subsets of the corresponding point pairs (of four pairs each), estimate the homography matrix
using this subset and a simple least-square algorithm, and then compute the quality/goodness of the
computed homography (which is the number of inliers for RANSAC or the median re-projection error for
@ -300,7 +302,7 @@ Regardless of the method, robust or not, the computed homography matrix is refin
inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
re-projection error even more.
The method RANSAC can handle practically any ratio of outliers but it needs a threshold to
The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
noise is rather small, use the default method (method=0).
@ -512,6 +514,16 @@ projections, as well as the camera matrix and the distortion coefficients.
@note
- An example of how to use solvePnP for planar augmented reality can be found at
opencv_source_code/samples/python2/plane_ar.py
- If you are using Python:
- Numpy array slices won't work as input because solvePnP requires contiguous
arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
modules/calib3d/src/solvepnp.cpp version 2.4.9)
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
which requires 2-channel information.
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
*/
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,

View File

@ -19,8 +19,8 @@ typedef perf::TestBaseWithParam<int> PointsNum;
PERF_TEST_P(PointsNum_Algo, solvePnP,
testing::Combine(
testing::Values(4, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Values((int)SOLVEPNP_ITERATIVE, (int)SOLVEPNP_EPNP)
testing::Values(5, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Values((int)SOLVEPNP_ITERATIVE, (int)SOLVEPNP_EPNP, (int)SOLVEPNP_UPNP, (int)SOLVEPNP_DLS)
)
)
{
@ -64,13 +64,15 @@ PERF_TEST_P(PointsNum_Algo, solvePnP,
PERF_TEST_P(PointsNum_Algo, solvePnPSmallPoints,
testing::Combine(
testing::Values(4), //TODO: find why results on 4 points are too unstable
testing::Values((int)SOLVEPNP_P3P, (int)SOLVEPNP_DLS, (int)SOLVEPNP_UPNP)
testing::Values(5),
testing::Values((int)SOLVEPNP_P3P, (int)SOLVEPNP_EPNP, (int)SOLVEPNP_DLS, (int)SOLVEPNP_UPNP)
)
)
{
int pointsNum = get<0>(GetParam());
pnpAlgo algo = get<1>(GetParam());
if( algo == SOLVEPNP_P3P )
pointsNum = 4;
vector<Point2f> points2d(pointsNum);
vector<Point3f> points3d(pointsNum);
@ -92,7 +94,7 @@ PERF_TEST_P(PointsNum_Algo, solvePnPSmallPoints,
//add noise
Mat noise(1, (int)points2d.size(), CV_32FC2);
randu(noise, 0, 0.01);
randu(noise, -0.001, 0.001);
add(points2d, noise, points2d);
declare.in(points3d, points2d);
@ -107,7 +109,7 @@ PERF_TEST_P(PointsNum_Algo, solvePnPSmallPoints,
SANITY_CHECK(tvec, 1e-2);
}
PERF_TEST_P(PointsNum, DISABLED_SolvePnPRansac, testing::Values(4, 3*9, 7*13))
PERF_TEST_P(PointsNum, DISABLED_SolvePnPRansac, testing::Values(5, 3*9, 7*13))
{
int count = GetParam();

View File

@ -1595,7 +1595,10 @@ void cvCalibrationMatrixValues( const CvMat *calibMatr, CvSize imgSize,
my = imgHeight / apertureHeight;
} else {
mx = 1.0;
my = *pasp;
if(pasp)
my = *pasp;
else
my = 1.0;
}
/* Calculate fovx and fovy. */

View File

@ -2,7 +2,10 @@
#include "precomp.hpp"
#include "epnp.h"
epnp::epnp(const cv::Mat& cameraMatrix, const cv::Mat& opoints, const cv::Mat& ipoints)
namespace cv
{
epnp::epnp(const Mat& cameraMatrix, const Mat& opoints, const Mat& ipoints)
{
if (cameraMatrix.depth() == CV_32F)
init_camera_parameters<float>(cameraMatrix);
@ -17,14 +20,14 @@ epnp::epnp(const cv::Mat& cameraMatrix, const cv::Mat& opoints, const cv::Mat& i
if (opoints.depth() == ipoints.depth())
{
if (opoints.depth() == CV_32F)
init_points<cv::Point3f,cv::Point2f>(opoints, ipoints);
init_points<Point3f,Point2f>(opoints, ipoints);
else
init_points<cv::Point3d,cv::Point2d>(opoints, ipoints);
init_points<Point3d,Point2d>(opoints, ipoints);
}
else if (opoints.depth() == CV_32F)
init_points<cv::Point3f,cv::Point2d>(opoints, ipoints);
init_points<Point3f,Point2d>(opoints, ipoints);
else
init_points<cv::Point3d,cv::Point2f>(opoints, ipoints);
init_points<Point3d,Point2f>(opoints, ipoints);
alphas.resize(4 * number_of_correspondences);
pcs.resize(3 * number_of_correspondences);
@ -144,7 +147,7 @@ void epnp::compute_pcs(void)
}
}
void epnp::compute_pose(cv::Mat& R, cv::Mat& t)
void epnp::compute_pose(Mat& R, Mat& t)
{
choose_control_points();
compute_barycentric_coordinates();
@ -189,8 +192,8 @@ void epnp::compute_pose(cv::Mat& R, cv::Mat& t)
if (rep_errors[2] < rep_errors[1]) N = 2;
if (rep_errors[3] < rep_errors[N]) N = 3;
cv::Mat(3, 1, CV_64F, ts[N]).copyTo(t);
cv::Mat(3, 3, CV_64F, Rs[N]).copyTo(R);
Mat(3, 1, CV_64F, ts[N]).copyTo(t);
Mat(3, 3, CV_64F, Rs[N]).copyTo(R);
}
void epnp::copy_R_and_t(const double R_src[3][3], const double t_src[3],
@ -621,3 +624,5 @@ void epnp::qr_solve(CvMat * A, CvMat * b, CvMat * X)
pX[i] = (pb[i] - sum) / A2[i];
}
}
}

View File

@ -4,6 +4,9 @@
#include "precomp.hpp"
#include "opencv2/core/core_c.h"
namespace cv
{
class epnp {
public:
epnp(const cv::Mat& cameraMatrix, const cv::Mat& opoints, const cv::Mat& ipoints);
@ -78,4 +81,6 @@ class epnp {
double * A1, * A2;
};
}
#endif

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "rho.h"
#include <iostream>
namespace cv
@ -69,20 +70,6 @@ static bool haveCollinearPoints( const Mat& m, int count )
}
template<typename T> int compressPoints( T* ptr, const uchar* mask, int mstep, int count )
{
int i, j;
for( i = j = 0; i < count; i++ )
if( mask[i*mstep] )
{
if( i > j )
ptr[j] = ptr[i];
j++;
}
return j;
}
class HomographyEstimatorCallback : public PointSetRegistrator::Callback
{
public:
@ -273,6 +260,85 @@ public:
}
namespace cv{
static bool createAndRunRHORegistrator(double confidence,
int maxIters,
double ransacReprojThreshold,
int npoints,
InputArray _src,
InputArray _dst,
OutputArray _H,
OutputArray _tempMask){
Mat src = _src.getMat();
Mat dst = _dst.getMat();
Mat tempMask;
bool result;
double beta = 0.35;/* 0.35 is a value that often works. */
/* Create temporary output matrix (RHO outputs a single-precision H only). */
Mat tmpH = Mat(3, 3, CV_32FC1);
/* Create output mask. */
tempMask = Mat(npoints, 1, CV_8U);
/**
* Make use of the RHO estimator API.
*
* This is where the math happens. A homography estimation context is
* initialized, used, then finalized.
*/
Ptr<RHO_HEST> p = rhoInit();
/**
* Optional. Ideally, the context would survive across calls to
* findHomography(), but no clean way appears to exit to do so. The price
* to pay is marginally more computational work than strictly needed.
*/
rhoEnsureCapacity(p, npoints, beta);
/**
* The critical call. All parameters are heavily documented in rhorefc.h.
*
* Currently, NR (Non-Randomness criterion) and Final Refinement (with
* internal, optimized Levenberg-Marquardt method) are enabled. However,
* while refinement seems to correctly smooth jitter most of the time, when
* refinement fails it tends to make the estimate visually very much worse.
* It may be necessary to remove the refinement flags in a future commit if
* this behaviour is too problematic.
*/
result = !!rhoHest(p,
(const float*)src.data,
(const float*)dst.data,
(char*) tempMask.data,
(unsigned) npoints,
(float) ransacReprojThreshold,
(unsigned) maxIters,
(unsigned) maxIters,
confidence,
4U,
beta,
RHO_FLAG_ENABLE_NR | RHO_FLAG_ENABLE_FINAL_REFINEMENT,
NULL,
(float*)tmpH.data);
/* Convert float homography to double precision. */
tmpH.convertTo(_H, CV_64FC1);
/* Maps non-zero mask elems to 1, for the sake of the testcase. */
for(int k=0;k<npoints;k++){
tempMask.data[k] = !!tempMask.data[k];
}
tempMask.copyTo(_tempMask);
return result;
}
}
cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
int method, double ransacReprojThreshold, OutputArray _mask,
const int maxIters, const double confidence)
@ -317,13 +383,15 @@ cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
result = createRANSACPointSetRegistrator(cb, 4, ransacReprojThreshold, confidence, maxIters)->run(src, dst, H, tempMask);
else if( method == LMEDS )
result = createLMeDSPointSetRegistrator(cb, 4, confidence, maxIters)->run(src, dst, H, tempMask);
else if( method == RHO )
result = createAndRunRHORegistrator(confidence, maxIters, ransacReprojThreshold, npoints, src, dst, H, tempMask);
else
CV_Error(Error::StsBadArg, "Unknown estimation method");
if( result && npoints > 4 )
if( result && npoints > 4 && method != RHO)
{
compressPoints( src.ptr<Point2f>(), tempMask.ptr<uchar>(), 1, npoints );
npoints = compressPoints( dst.ptr<Point2f>(), tempMask.ptr<uchar>(), 1, npoints );
compressElems( src.ptr<Point2f>(), tempMask.ptr<uchar>(), 1, npoints );
npoints = compressElems( dst.ptr<Point2f>(), tempMask.ptr<uchar>(), 1, npoints );
if( npoints > 0 )
{
Mat src1 = src.rowRange(0, npoints);

View File

@ -102,6 +102,19 @@ CV_EXPORTS Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<Po
CV_EXPORTS Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& cb,
int modelPoints, double confidence=0.99, int maxIters=1000 );
template<typename T> inline int compressElems( T* ptr, const uchar* mask, int mstep, int count )
{
int i, j;
for( i = j = 0; i < count; i++ )
if( mask[i*mstep] )
{
if( i > j )
ptr[j] = ptr[i];
j++;
}
return j;
}
}
#endif

2673
modules/calib3d/src/rho.cpp Normal file

File diff suppressed because it is too large Load Diff

268
modules/calib3d/src/rho.h Normal file
View File

@ -0,0 +1,268 @@
/*
IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
BSD 3-Clause License
Copyright (C) 2014, Olexa Bilaniuk, Hamid Bazargani & Robert Laganiere, all rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistribution's of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistribution's in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name of the copyright holders may not be used to endorse or promote products
derived from this software without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall the Intel Corporation or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
/**
* Bilaniuk, Olexa, Hamid Bazargani, and Robert Laganiere. "Fast Target
* Recognition on Mobile Devices: Revisiting Gaussian Elimination for the
* Estimation of Planar Homographies." In Computer Vision and Pattern
* Recognition Workshops (CVPRW), 2014 IEEE Conference on, pp. 119-125.
* IEEE, 2014.
*/
/* Include Guards */
#ifndef __OPENCV_RHO_H__
#define __OPENCV_RHO_H__
/* Includes */
#include <opencv2/core.hpp>
#include <stdint.h>
/* Defines */
/* Flags */
#ifndef RHO_FLAG_NONE
#define RHO_FLAG_NONE (0U<<0)
#endif
#ifndef RHO_FLAG_ENABLE_NR
#define RHO_FLAG_ENABLE_NR (1U<<0)
#endif
#ifndef RHO_FLAG_ENABLE_REFINEMENT
#define RHO_FLAG_ENABLE_REFINEMENT (1U<<1)
#endif
#ifndef RHO_FLAG_ENABLE_FINAL_REFINEMENT
#define RHO_FLAG_ENABLE_FINAL_REFINEMENT (1U<<2)
#endif
/* Namespace cv */
namespace cv{
/* Data structures */
/**
* Homography Estimation context.
*/
struct RHO_HEST;
typedef struct RHO_HEST RHO_HEST;
/* Functions */
/**
* Initialize the estimator context, by allocating the aligned buffers
* internally needed.
*
* @return A pointer to the context if successful; NULL if an error occured.
*/
Ptr<RHO_HEST> rhoInit(void);
/**
* Ensure that the estimator context's internal table for non-randomness
* criterion is at least of the given size, and uses the given beta. The table
* should be larger than the maximum number of matches fed into the estimator.
*
* A value of N of 0 requests deallocation of the table.
*
* @param [in] p The initialized estimator context
* @param [in] N If 0, deallocate internal table. If > 0, ensure that the
* internal table is of at least this size, reallocating if
* necessary.
* @param [in] beta The beta-factor to use within the table.
* @return 0 if unsuccessful; non-zero otherwise.
*/
int rhoEnsureCapacity(Ptr<RHO_HEST> p, unsigned N, double beta);
/**
* Seeds the internal PRNG with the given seed.
*
* Although it is not required to call this function, since context
* initialization seeds itself with entropy from rand(), this function allows
* reproducible results by using a specified seed.
*
* @param [in] p The estimator context whose PRNG is to be seeded.
* @param [in] seed The 64-bit integer seed.
*/
void rhoSeed(Ptr<RHO_HEST> p, uint64_t seed);
/**
* Estimates the homography using the given context, matches and parameters to
* PROSAC.
*
* The given context must have been initialized.
*
* The matches are provided as two arrays of N single-precision, floating-point
* (x,y) points. Points with corresponding offsets in the two arrays constitute
* a match. The homography estimation attempts to find the 3x3 matrix H which
* best maps the homogeneous-coordinate points in the source array to their
* corresponding homogeneous-coordinate points in the destination array.
*
* Note: At least 4 matches must be provided (N >= 4).
* Note: A point in either array takes up 2 floats. The first of two stores
* the x-coordinate and the second of the two stores the y-coordinate.
* Thus, the arrays resemble this in memory:
*
* src = [x0, y0, x1, y1, x2, y2, x3, y3, x4, y4, ...]
* Matches: | | | | |
* dst = [x0, y0, x1, y1, x2, y2, x3, y3, x4, y4, ...]
* Note: The matches are expected to be provided sorted by quality, or at
* least not be worse-than-random in ordering.
*
* A pointer to the base of an array of N bytes can be provided. It serves as
* an output mask to indicate whether the corresponding match is an inlier to
* the returned homography, if any. A zero indicates an outlier; A non-zero
* value indicates an inlier.
*
* The PROSAC estimator requires a few parameters of its own. These are:
*
* - The maximum distance that a source point projected onto the destination
* plane can be from its putative match and still be considered an
* inlier. Must be non-negative.
* A sane default is 3.0.
* - The maximum number of PROSAC iterations. This corresponds to the
* largest number of samples that will be drawn and tested.
* A sane default is 2000.
* - The RANSAC convergence parameter. This corresponds to the number of
* iterations after which PROSAC will start sampling like RANSAC.
* A sane default is 2000.
* - The confidence threshold. This corresponds to the probability of
* finding a correct solution. Must be bounded by [0, 1].
* A sane default is 0.995.
* - The minimum number of inliers acceptable. Only a solution with at
* least this many inliers will be returned. The minimum is 4.
* A sane default is 10% of N.
* - The beta-parameter for the non-randomness termination criterion.
* Ignored if non-randomness criterion disabled, otherwise must be
* bounded by (0, 1).
* A sane default is 0.35.
* - Optional flags to control the estimation. Available flags are:
* HEST_FLAG_NONE:
* No special processing.
* HEST_FLAG_ENABLE_NR:
* Enable non-randomness criterion. If set, the beta parameter
* must also be set.
* HEST_FLAG_ENABLE_REFINEMENT:
* Enable refinement of each new best model, as they are found.
* HEST_FLAG_ENABLE_FINAL_REFINEMENT:
* Enable one final refinement of the best model found before
* returning it.
*
* The PROSAC estimator optionally accepts an extrinsic initial guess of H.
*
* The PROSAC estimator outputs a final estimate of H provided it was able to
* find one with a minimum of supporting inliers. If it was not, it outputs
* the all-zero matrix.
*
* The extrinsic guess at and final estimate of H are both in the same form:
* A 3x3 single-precision floating-point matrix with step 3. Thus, it is a
* 9-element array of floats, with the elements as follows:
*
* [ H00, H01, H02,
* H10, H11, H12,
* H20, H21, 1.0 ]
*
* Notice that the homography is normalized to H22 = 1.0.
*
* The function returns the number of inliers if it was able to find a
* homography with at least the minimum required support, and 0 if it was not.
*
*
* @param [in/out] p The context to use for homography estimation. Must
* be already initialized. Cannot be NULL.
* @param [in] src The pointer to the source points of the matches.
* Must be aligned to 4 bytes. Cannot be NULL.
* @param [in] dst The pointer to the destination points of the matches.
* Must be aligned to 4 bytes. Cannot be NULL.
* @param [out] inl The pointer to the output mask of inlier matches.
* Must be aligned to 4 bytes. May be NULL.
* @param [in] N The number of matches. Minimum 4.
* @param [in] maxD The maximum distance. Minimum 0.
* @param [in] maxI The maximum number of PROSAC iterations.
* @param [in] rConvg The RANSAC convergence parameter.
* @param [in] cfd The required confidence in the solution.
* @param [in] minInl The minimum required number of inliers. Minimum 4.
* @param [in] beta The beta-parameter for the non-randomness criterion.
* @param [in] flags A union of flags to fine-tune the estimation.
* @param [in] guessH An extrinsic guess at the solution H, or NULL if
* none provided.
* @param [out] finalH The final estimation of H, or the zero matrix if
* the minimum number of inliers was not met.
* Cannot be NULL.
* @return The number of inliers if the minimum number of
* inliers for acceptance was reached; 0 otherwise.
*/
unsigned rhoHest(Ptr<RHO_HEST> p, /* Homography estimation context. */
const float* src, /* Source points */
const float* dst, /* Destination points */
char* inl, /* Inlier mask */
unsigned N, /* = src.length = dst.length = inl.length */
float maxD, /* 3.0 */
unsigned maxI, /* 2000 */
unsigned rConvg, /* 2000 */
double cfd, /* 0.995 */
unsigned minInl, /* 4 */
double beta, /* 0.35 */
unsigned flags, /* 0 */
const float* guessH, /* Extrinsic guess, NULL if none provided */
float* finalH); /* Final result. */
/* End Namespace cv */
}
#endif

View File

@ -48,41 +48,43 @@
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>
using namespace cv;
bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
namespace cv
{
bool solvePnP( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
_rvec.create(3, 1, CV_64F);
_tvec.create(3, 1, CV_64F);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(_cameraMatrix.getMat()), distCoeffs = Mat_<double>(_distCoeffs.getMat());
if (flags == SOLVEPNP_EPNP)
if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
{
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
epnp PnP(cameraMatrix, opoints, undistortedPoints);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
PnP.compute_pose(R, tvec);
cv::Rodrigues(R, rvec);
Rodrigues(R, rvec);
return true;
}
else if (flags == SOLVEPNP_P3P)
{
CV_Assert( npoints == 4);
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
p3p P3Psolver(cameraMatrix);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
bool result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
cv::Rodrigues(R, rvec);
Rodrigues(R, rvec);
return result;
}
else if (flags == SOLVEPNP_ITERATIVE)
@ -95,32 +97,32 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
&c_rvec, &c_tvec, useExtrinsicGuess );
return true;
}
else if (flags == SOLVEPNP_DLS)
/*else if (flags == SOLVEPNP_DLS)
{
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
dls PnP(opoints, undistortedPoints);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
bool result = PnP.compute_pose(R, tvec);
if (result)
cv::Rodrigues(R, rvec);
Rodrigues(R, rvec);
return result;
}
else if (flags == SOLVEPNP_UPNP)
{
upnp PnP(cameraMatrix, opoints, ipoints);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
double f = PnP.compute_pose(R, tvec);
cv::Rodrigues(R, rvec);
Rodrigues(R, rvec);
if(cameraMatrix.type() == CV_32F)
cameraMatrix.at<float>(0,0) = cameraMatrix.at<float>(1,1) = (float)f;
else
cameraMatrix.at<double>(0,0) = cameraMatrix.at<double>(1,1) = f;
return true;
}
}*/
else
CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
return false;
@ -131,7 +133,7 @@ class PnPRansacCallback : public PointSetRegistrator::Callback
public:
PnPRansacCallback(Mat _cameraMatrix=Mat(3,3,CV_64F), Mat _distCoeffs=Mat(4,1,CV_64F), int _flags=cv::SOLVEPNP_ITERATIVE,
PnPRansacCallback(Mat _cameraMatrix=Mat(3,3,CV_64F), Mat _distCoeffs=Mat(4,1,CV_64F), int _flags=SOLVEPNP_ITERATIVE,
bool _useExtrinsicGuess=false, Mat _rvec=Mat(), Mat _tvec=Mat() )
: cameraMatrix(_cameraMatrix), distCoeffs(_distCoeffs), flags(_flags), useExtrinsicGuess(_useExtrinsicGuess),
rvec(_rvec), tvec(_tvec) {}
@ -142,12 +144,11 @@ public:
{
Mat opoints = _m1.getMat(), ipoints = _m2.getMat();
bool correspondence = cv::solvePnP( _m1, _m2, cameraMatrix, distCoeffs,
bool correspondence = solvePnP( _m1, _m2, cameraMatrix, distCoeffs,
rvec, tvec, useExtrinsicGuess, flags );
Mat _local_model;
cv::hconcat(rvec, tvec, _local_model);
hconcat(rvec, tvec, _local_model);
_local_model.copyTo(_model);
return correspondence;
@ -166,7 +167,7 @@ public:
Mat projpoints(count, 2, CV_32FC1);
cv::projectPoints(opoints, _rvec, _tvec, cameraMatrix, distCoeffs, projpoints);
projectPoints(opoints, _rvec, _tvec, cameraMatrix, distCoeffs, projpoints);
const Point2f* ipoints_ptr = ipoints.ptr<Point2f>();
const Point2f* projpoints_ptr = projpoints.ptr<Point2f>();
@ -175,7 +176,7 @@ public:
float* err = _err.getMat().ptr<float>();
for ( i = 0; i < count; ++i)
err[i] = (float)cv::norm( ipoints_ptr[i] - projpoints_ptr[i] );
err[i] = (float)norm( ipoints_ptr[i] - projpoints_ptr[i] );
}
@ -188,7 +189,7 @@ public:
Mat tvec;
};
bool cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
int iterationsCount, float reprojectionError, double confidence,
@ -214,23 +215,45 @@ bool cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
Ptr<PointSetRegistrator::Callback> cb; // pointer to callback
cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, flags, useExtrinsicGuess, rvec, tvec);
int model_points = 5;
int ransac_kernel_method = SOLVEPNP_EPNP;
int model_points = 4; // minimum of number of model points
if( flags == cv::SOLVEPNP_ITERATIVE ) model_points = 6;
else if( flags == cv::SOLVEPNP_UPNP ) model_points = 6;
else if( flags == cv::SOLVEPNP_EPNP ) model_points = 5;
if( npoints == 4 )
{
model_points = 4;
ransac_kernel_method = SOLVEPNP_P3P;
}
Ptr<PointSetRegistrator::Callback> cb; // pointer to callback
cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, ransac_kernel_method, useExtrinsicGuess, rvec, tvec);
double param1 = reprojectionError; // reprojection error
double param2 = confidence; // confidence
int param3 = iterationsCount; // number maximum iterations
cv::Mat _local_model(3, 2, CV_64FC1);
cv::Mat _mask_local_inliers(1, opoints.rows, CV_8UC1);
Mat _local_model(3, 2, CV_64FC1);
Mat _mask_local_inliers(1, opoints.rows, CV_8UC1);
// call Ransac
int result = createRANSACPointSetRegistrator(cb, model_points, param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers);
int result = createRANSACPointSetRegistrator(cb, model_points,
param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers);
if( result > 0 )
{
vector<Point3d> opoints_inliers;
vector<Point2d> ipoints_inliers;
opoints.convertTo(opoints_inliers, CV_64F);
ipoints.convertTo(ipoints_inliers, CV_64F);
const uchar* mask = _mask_local_inliers.ptr<uchar>();
int npoints1 = compressElems(&opoints_inliers[0], mask, 1, npoints);
compressElems(&ipoints_inliers[0], mask, 1, npoints);
opoints_inliers.resize(npoints1);
ipoints_inliers.resize(npoints1);
result = solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix,
distCoeffs, rvec, tvec, false, flags == SOLVEPNP_P3P ? SOLVEPNP_EPNP : flags) ? 1 : -1;
}
if( result <= 0 || _local_model.rows <= 0)
{
@ -260,3 +283,5 @@ bool cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
}
return true;
}
}

View File

@ -62,10 +62,10 @@
#define MAX_COUNT_OF_POINTS 303
#define COUNT_NORM_TYPES 3
#define METHODS_COUNT 3
#define METHODS_COUNT 4
int NORM_TYPE[COUNT_NORM_TYPES] = {cv::NORM_L1, cv::NORM_L2, cv::NORM_INF};
int METHOD[METHODS_COUNT] = {0, cv::RANSAC, cv::LMEDS};
int METHOD[METHODS_COUNT] = {0, cv::RANSAC, cv::LMEDS, cv::RHO};
using namespace cv;
using namespace std;
@ -94,12 +94,12 @@ private:
void print_information_1(int j, int N, int method, const Mat& H);
void print_information_2(int j, int N, int method, const Mat& H, const Mat& H_res, int k, double diff);
void print_information_3(int j, int N, const Mat& mask);
void print_information_3(int method, int j, int N, const Mat& mask);
void print_information_4(int method, int j, int N, int k, int l, double diff);
void print_information_5(int method, int j, int N, int l, double diff);
void print_information_6(int j, int N, int k, double diff, bool value);
void print_information_7(int j, int N, int k, double diff, bool original_value, bool found_value);
void print_information_8(int j, int N, int k, int l, double diff);
void print_information_6(int method, int j, int N, int k, double diff, bool value);
void print_information_7(int method, int j, int N, int k, double diff, bool original_value, bool found_value);
void print_information_8(int method, int j, int N, int k, int l, double diff);
};
CV_HomographyTest::CV_HomographyTest() : max_diff(1e-2f), max_2diff(2e-2f)
@ -144,7 +144,7 @@ void CV_HomographyTest::print_information_1(int j, int N, int _method, const Mat
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;
cout << "Count of points: " << N << endl; cout << endl;
cout << "Method: "; if (_method == 0) cout << 0; else if (_method == 8) cout << "RANSAC"; else cout << "LMEDS"; cout << endl;
cout << "Method: "; if (_method == 0) cout << 0; else if (_method == 8) cout << "RANSAC"; else if (_method == cv::RHO) cout << "RHO"; else cout << "LMEDS"; cout << endl;
cout << "Homography matrix:" << endl; cout << endl;
cout << H << endl; cout << endl;
cout << "Number of rows: " << H.rows << " Number of cols: " << H.cols << endl; cout << endl;
@ -156,7 +156,7 @@ void CV_HomographyTest::print_information_2(int j, int N, int _method, const Mat
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;
cout << "Count of points: " << N << endl; cout << endl;
cout << "Method: "; if (_method == 0) cout << 0; else if (_method == 8) cout << "RANSAC"; else cout << "LMEDS"; cout << endl;
cout << "Method: "; if (_method == 0) cout << 0; else if (_method == 8) cout << "RANSAC"; else if (_method == cv::RHO) cout << "RHO"; else cout << "LMEDS"; cout << endl;
cout << "Original matrix:" << endl; cout << endl;
cout << H << endl; cout << endl;
cout << "Found matrix:" << endl; cout << endl;
@ -166,13 +166,13 @@ void CV_HomographyTest::print_information_2(int j, int N, int _method, const Mat
cout << "Maximum allowed difference: " << max_diff << endl; cout << endl;
}
void CV_HomographyTest::print_information_3(int j, int N, const Mat& mask)
void CV_HomographyTest::print_information_3(int _method, int j, int N, const Mat& mask)
{
cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl;
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;
cout << "Count of points: " << N << endl; cout << endl;
cout << "Method: RANSAC" << endl;
cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl;
cout << "Found mask:" << endl; cout << endl;
cout << mask << endl; cout << endl;
cout << "Number of rows: " << mask.rows << " Number of cols: " << mask.cols << endl; cout << endl;
@ -205,10 +205,10 @@ void CV_HomographyTest::print_information_5(int _method, int j, int N, int l, do
cout << "Maxumum allowed difference: " << max_diff << endl; cout << endl;
}
void CV_HomographyTest::print_information_6(int j, int N, int k, double diff, bool value)
void CV_HomographyTest::print_information_6(int _method, int j, int N, int k, double diff, bool value)
{
cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl;
cout << "Method: RANSAC" << endl;
cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl;
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;
cout << "Count of points: " << N << " " << endl;
@ -218,10 +218,10 @@ void CV_HomographyTest::print_information_6(int j, int N, int k, double diff, bo
cout << "Value of found mask: "<< value << endl; cout << endl;
}
void CV_HomographyTest::print_information_7(int j, int N, int k, double diff, bool original_value, bool found_value)
void CV_HomographyTest::print_information_7(int _method, int j, int N, int k, double diff, bool original_value, bool found_value)
{
cout << endl; cout << "Checking for inliers/outliers mask..." << endl; cout << endl;
cout << "Method: RANSAC" << endl;
cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl;
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;
cout << "Count of points: " << N << " " << endl;
@ -231,10 +231,10 @@ void CV_HomographyTest::print_information_7(int j, int N, int k, double diff, bo
cout << "Value of original mask: "<< original_value << " Value of found mask: " << found_value << endl; cout << endl;
}
void CV_HomographyTest::print_information_8(int j, int N, int k, int l, double diff)
void CV_HomographyTest::print_information_8(int _method, int j, int N, int k, int l, double diff)
{
cout << endl; cout << "Checking for reprojection error of inlier..." << endl; cout << endl;
cout << "Method: RANSAC" << endl;
cout << "Method: "; if (_method == RANSAC) cout << "RANSAC" << endl; else if (_method == cv::RHO) cout << "RHO" << endl; else cout << _method << endl;
cout << "Sigma of normal noise: " << sigma << endl;
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;
@ -339,14 +339,15 @@ void CV_HomographyTest::run(int)
continue;
}
case cv::RHO:
case RANSAC:
{
cv::Mat mask [4]; double diff;
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask[0]),
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask[1]),
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask[2]),
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask[3]) };
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, method, reproj_threshold, mask[0]),
cv::findHomography(src_mat_2f, dst_vec, method, reproj_threshold, mask[1]),
cv::findHomography(src_vec, dst_mat_2f, method, reproj_threshold, mask[2]),
cv::findHomography(src_vec, dst_vec, method, reproj_threshold, mask[3]) };
for (int j = 0; j < 4; ++j)
{
@ -370,7 +371,7 @@ void CV_HomographyTest::run(int)
if (code)
{
print_information_3(j, N, mask[j]);
print_information_3(method, j, N, mask[j]);
switch (code)
{
@ -466,14 +467,15 @@ void CV_HomographyTest::run(int)
continue;
}
case cv::RHO:
case RANSAC:
{
cv::Mat mask_res [4];
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask_res[0]),
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask_res[1]),
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask_res[2]),
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask_res[3]) };
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, method, reproj_threshold, mask_res[0]),
cv::findHomography(src_mat_2f, dst_vec, method, reproj_threshold, mask_res[1]),
cv::findHomography(src_vec, dst_mat_2f, method, reproj_threshold, mask_res[2]),
cv::findHomography(src_vec, dst_vec, method, reproj_threshold, mask_res[3]) };
for (int j = 0; j < 4; ++j)
{
@ -488,7 +490,7 @@ void CV_HomographyTest::run(int)
if (code)
{
print_information_3(j, N, mask_res[j]);
print_information_3(method, j, N, mask_res[j]);
switch (code)
{
@ -520,14 +522,14 @@ void CV_HomographyTest::run(int)
if (mask_res[j].at<bool>(k, 0) != (diff <= reproj_threshold))
{
print_information_6(j, N, k, diff, mask_res[j].at<bool>(k, 0));
print_information_6(method, j, N, k, diff, mask_res[j].at<bool>(k, 0));
CV_Error(CALIB3D_HOMOGRAPHY_ERROR_RANSAC_MASK, MESSAGE_RANSAC_MASK_4);
return;
}
if (mask.at<bool>(k, 0) && !mask_res[j].at<bool>(k, 0))
{
print_information_7(j, N, k, diff, mask.at<bool>(k, 0), mask_res[j].at<bool>(k, 0));
print_information_7(method, j, N, k, diff, mask.at<bool>(k, 0), mask_res[j].at<bool>(k, 0));
CV_Error(CALIB3D_HOMOGRAPHY_ERROR_RANSAC_MASK, MESSAGE_RANSAC_MASK_5);
return;
}
@ -547,7 +549,7 @@ void CV_HomographyTest::run(int)
if (diff - cv::norm(noise_2d, NORM_TYPE[l]) > max_2diff)
{
print_information_8(j, N, k, l, diff - cv::norm(noise_2d, NORM_TYPE[l]));
print_information_8(method, j, N, k, l, diff - cv::norm(noise_2d, NORM_TYPE[l]));
CV_Error(CALIB3D_HOMOGRAPHY_ERROR_RANSAC_DIFF, MESSAGE_RANSAC_DIFF);
return;
}

View File

@ -985,22 +985,131 @@ horizontal axis.
*/
CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx);
/** @brief concatenate matrices horizontally
@todo document
/** @brief Applies horizontal concatenation to given matrices.
The function horizontally concatenates two or more cv::Mat matrices (with the same number of rows).
@code{.cpp}
cv::Mat matArray[] = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),
cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),
cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};
cv::Mat out;
cv::hconcat( matArray, 3, out );
//out:
//[1, 2, 3;
// 1, 2, 3;
// 1, 2, 3;
// 1, 2, 3]
@endcode
@param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.
@param nsrc number of matrices in src.
@param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.
@sa cv::vconcat(const Mat*, size_t, OutputArray), @sa cv::vconcat(InputArrayOfArrays, OutputArray) and @sa cv::vconcat(InputArray, InputArray, OutputArray)
*/
CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst);
/** @overload */
/** @overload
@code{.cpp}
cv::Mat_<float> A = (cv::Mat_<float>(3, 2) << 1, 4,
2, 5,
3, 6);
cv::Mat_<float> B = (cv::Mat_<float>(3, 2) << 7, 10,
8, 11,
9, 12);
cv::Mat C;
cv::hconcat(A, B, C);
//C:
//[1, 4, 7, 10;
// 2, 5, 8, 11;
// 3, 6, 9, 12]
@endcode
@param src1 first input array to be considered for horizontal concatenation.
@param src2 second input array to be considered for horizontal concatenation.
@param dst output array. It has the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2.
*/
CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst);
/** @overload */
/** @overload
@code{.cpp}
std::vector<cv::Mat> matrices = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),
cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),
cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};
cv::Mat out;
cv::hconcat( matrices, out );
//out:
//[1, 2, 3;
// 1, 2, 3;
// 1, 2, 3;
// 1, 2, 3]
@endcode
@param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.
@param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.
same depth.
*/
CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst);
/** @brief concatenate matrices vertically
@todo document
/** @brief Applies vertical concatenation to given matrices.
The function vertically concatenates two or more cv::Mat matrices (with the same number of cols).
@code{.cpp}
cv::Mat matArray[] = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),
cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),
cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};
cv::Mat out;
cv::vconcat( matArray, 3, out );
//out:
//[1, 1, 1, 1;
// 2, 2, 2, 2;
// 3, 3, 3, 3]
@endcode
@param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth.
@param nsrc number of matrices in src.
@param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.
@sa cv::hconcat(const Mat*, size_t, OutputArray), @sa cv::hconcat(InputArrayOfArrays, OutputArray) and @sa cv::hconcat(InputArray, InputArray, OutputArray)
*/
CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst);
/** @overload */
/** @overload
@code{.cpp}
cv::Mat_<float> A = (cv::Mat_<float>(3, 2) << 1, 7,
2, 8,
3, 9);
cv::Mat_<float> B = (cv::Mat_<float>(3, 2) << 4, 10,
5, 11,
6, 12);
cv::Mat C;
cv::vconcat(A, B, C);
//C:
//[1, 7;
// 2, 8;
// 3, 9;
// 4, 10;
// 5, 11;
// 6, 12]
@endcode
@param src1 first input array to be considered for vertical concatenation.
@param src2 second input array to be considered for vertical concatenation.
@param dst output array. It has the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2.
*/
CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst);
/** @overload */
/** @overload
@code{.cpp}
std::vector<cv::Mat> matrices = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),
cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),
cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};
cv::Mat out;
cv::vconcat( matrices, out );
//out:
//[1, 1, 1, 1;
// 2, 2, 2, 2;
// 3, 3, 3, 3]
@endcode
@param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth
@param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.
same depth.
*/
CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst);
/** @brief computes bitwise conjunction of the two arrays (dst = src1 & src2)
@ -2821,41 +2930,6 @@ public:
virtual void read(const FileNode& fn) { (void)fn; }
};
// define properties
#define CV_PURE_PROPERTY(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(type val) = 0;
#define CV_PURE_PROPERTY_S(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(const type & val) = 0;
#define CV_PURE_PROPERTY_RO(type, name) \
CV_WRAP virtual type get##name() const = 0;
// basic property implementation
#define CV_IMPL_PROPERTY_RO(type, name, member) \
inline type get##name() const { return member; }
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
CV_IMPL_PROPERTY_RO(r_type, name, member) \
inline void set##name(w_type val) { member = val; }
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
r_type get##name() const { return internal_obj.get##internal_name(); } \
void set##name(w_type val) { internal_obj.set##internal_name(val); }
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
struct Param {
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
UNSIGNED_INT=8, UINT64=9, UCHAR=11 };

View File

@ -442,6 +442,10 @@ template<typename _Tp> static inline _Tp saturate_cast(int v) { return _Tp(
template<typename _Tp> static inline _Tp saturate_cast(float v) { return _Tp(v); }
/** @overload */
template<typename _Tp> static inline _Tp saturate_cast(double v) { return _Tp(v); }
/** @overload */
template<typename _Tp> static inline _Tp saturate_cast(int64 v) { return _Tp(v); }
/** @overload */
template<typename _Tp> static inline _Tp saturate_cast(uint64 v) { return _Tp(v); }
//! @cond IGNORED
@ -452,6 +456,8 @@ template<> inline uchar saturate_cast<uchar>(short v) { return saturate_c
template<> inline uchar saturate_cast<uchar>(unsigned v) { return (uchar)std::min(v, (unsigned)UCHAR_MAX); }
template<> inline uchar saturate_cast<uchar>(float v) { int iv = cvRound(v); return saturate_cast<uchar>(iv); }
template<> inline uchar saturate_cast<uchar>(double v) { int iv = cvRound(v); return saturate_cast<uchar>(iv); }
template<> inline uchar saturate_cast<uchar>(int64 v) { return (uchar)((uint64)v <= (uint64)UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }
template<> inline uchar saturate_cast<uchar>(uint64 v) { return (uchar)std::min(v, (uint64)UCHAR_MAX); }
template<> inline schar saturate_cast<schar>(uchar v) { return (schar)std::min((int)v, SCHAR_MAX); }
template<> inline schar saturate_cast<schar>(ushort v) { return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); }
@ -460,6 +466,8 @@ template<> inline schar saturate_cast<schar>(short v) { return saturate_c
template<> inline schar saturate_cast<schar>(unsigned v) { return (schar)std::min(v, (unsigned)SCHAR_MAX); }
template<> inline schar saturate_cast<schar>(float v) { int iv = cvRound(v); return saturate_cast<schar>(iv); }
template<> inline schar saturate_cast<schar>(double v) { int iv = cvRound(v); return saturate_cast<schar>(iv); }
template<> inline schar saturate_cast<schar>(int64 v) { return (schar)((uint64)((int64)v-SCHAR_MIN) <= (uint64)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); }
template<> inline schar saturate_cast<schar>(uint64 v) { return (schar)std::min(v, (uint64)SCHAR_MAX); }
template<> inline ushort saturate_cast<ushort>(schar v) { return (ushort)std::max((int)v, 0); }
template<> inline ushort saturate_cast<ushort>(short v) { return (ushort)std::max((int)v, 0); }
@ -467,12 +475,16 @@ template<> inline ushort saturate_cast<ushort>(int v) { return (ushort)((
template<> inline ushort saturate_cast<ushort>(unsigned v) { return (ushort)std::min(v, (unsigned)USHRT_MAX); }
template<> inline ushort saturate_cast<ushort>(float v) { int iv = cvRound(v); return saturate_cast<ushort>(iv); }
template<> inline ushort saturate_cast<ushort>(double v) { int iv = cvRound(v); return saturate_cast<ushort>(iv); }
template<> inline ushort saturate_cast<ushort>(int64 v) { return (ushort)((uint64)v <= (uint64)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }
template<> inline ushort saturate_cast<ushort>(uint64 v) { return (ushort)std::min(v, (uint64)USHRT_MAX); }
template<> inline short saturate_cast<short>(ushort v) { return (short)std::min((int)v, SHRT_MAX); }
template<> inline short saturate_cast<short>(int v) { return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); }
template<> inline short saturate_cast<short>(unsigned v) { return (short)std::min(v, (unsigned)SHRT_MAX); }
template<> inline short saturate_cast<short>(float v) { int iv = cvRound(v); return saturate_cast<short>(iv); }
template<> inline short saturate_cast<short>(double v) { int iv = cvRound(v); return saturate_cast<short>(iv); }
template<> inline short saturate_cast<short>(int64 v) { return (short)((uint64)((int64)v - SHRT_MIN) <= (uint64)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); }
template<> inline short saturate_cast<short>(uint64 v) { return (short)std::min(v, (uint64)SHRT_MAX); }
template<> inline int saturate_cast<int>(float v) { return cvRound(v); }
template<> inline int saturate_cast<int>(double v) { return cvRound(v); }

View File

@ -480,16 +480,14 @@ CV_INLINE int cvRound( double value )
fistp t;
}
return t;
#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND(value);
#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND_DBL(value);
#elif defined CV_ICC || defined __GNUC__
# ifdef HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND(value);
# elif CV_VFP
# if CV_VFP
ARM_ROUND_DBL(value)
# else
# else
return (int)lrint(value);
# endif
# endif
#else
double intpart, fractpart;
fractpart = modf(value, &intpart);
@ -505,7 +503,9 @@ CV_INLINE int cvRound( double value )
/** @overload */
CV_INLINE int cvRound(float value)
{
#if CV_VFP && !defined HAVE_TEGRA_OPTIMIZATION
#if defined ANDROID && (defined CV_ICC || defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND_FLT(value);
#elif CV_VFP && !defined HAVE_TEGRA_OPTIMIZATION
ARM_ROUND_FLT(value)
#else
return cvRound((double)value);

View File

@ -1040,9 +1040,11 @@ static inline bool operator>= (const String& lhs, const char* rhs) { return lh
#ifndef OPENCV_NOSTL_TRANSITIONAL
namespace std
{
static inline void swap(cv::String& a, cv::String& b) { a.swap(b); }
}
#else
namespace cv
#endif
{
template<> inline
void swap<cv::String>(cv::String& a, cv::String& b)
@ -1050,6 +1052,7 @@ namespace cv
a.swap(b);
}
}
#endif
#include "opencv2/core/ptr.inl.hpp"

View File

@ -257,7 +257,7 @@ Here is how to read the file created by the code sample above:
cout << " " << (int)lbpval[i];
cout << ")" << endl;
}
fs.release();
fs2.release();
@endcode
Format specification {#format_spec}

View File

@ -172,6 +172,27 @@ namespace cv
CV_EXPORTS void scalarToRawData(const cv::Scalar& s, void* buf, int type, int unroll_to = 0);
}
// property implementation macros
#define CV_IMPL_PROPERTY_RO(type, name, member) \
inline type get##name() const { return member; }
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
CV_IMPL_PROPERTY_RO(r_type, name, member) \
inline void set##name(w_type val) { member = val; }
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
r_type get##name() const { return internal_obj.get##internal_name(); } \
void set##name(w_type val) { internal_obj.set##internal_name(val); }
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
/****************************************************************************************\
* Structures and macros for integration with IPP *

View File

@ -133,7 +133,7 @@ typedef int CVStatus;
/** @see cv::Error::Code */
enum {
CV_StsOk= 0, /**< everithing is ok */
CV_StsOk= 0, /**< everything is ok */
CV_StsBackTrace= -1, /**< pseudo error for back trace */
CV_StsError= -2, /**< unknown /unspecified error */
CV_StsInternal= -3, /**< internal error (bad state) */
@ -143,28 +143,28 @@ enum {
CV_StsNoConv= -7, /**< iter. didn't converge */
CV_StsAutoTrace= -8, /**< tracing */
CV_HeaderIsNull= -9, /**< image header is NULL */
CV_BadImageSize= -10, /**< image size is invalid */
CV_BadOffset= -11, /**< offset is invalid */
CV_BadDataPtr= -12, /**/
CV_BadStep= -13, /**/
CV_BadModelOrChSeq= -14, /**/
CV_BadNumChannels= -15, /**/
CV_BadNumChannel1U= -16, /**/
CV_BadDepth= -17, /**/
CV_BadAlphaChannel= -18, /**/
CV_BadOrder= -19, /**/
CV_BadOrigin= -20, /**/
CV_BadAlign= -21, /**/
CV_BadCallBack= -22, /**/
CV_BadTileSize= -23, /**/
CV_BadCOI= -24, /**/
CV_BadROISize= -25, /**/
CV_MaskIsTiled= -26, /**/
CV_StsNullPtr= -27, /**< null pointer */
CV_StsVecLengthErr= -28, /**< incorrect vector length */
CV_StsFilterStructContentErr= -29, /**< incorr. filter structure content */
CV_StsKernelStructContentErr= -30, /**< incorr. transform kernel content */
CV_StsFilterOffsetErr= -31, /**< incorrect filter offset value */
CV_BadImageSize= -10, /**< image size is invalid */
CV_BadOffset= -11, /**< offset is invalid */
CV_BadDataPtr= -12, /**/
CV_BadStep= -13, /**/
CV_BadModelOrChSeq= -14, /**/
CV_BadNumChannels= -15, /**/
CV_BadNumChannel1U= -16, /**/
CV_BadDepth= -17, /**/
CV_BadAlphaChannel= -18, /**/
CV_BadOrder= -19, /**/
CV_BadOrigin= -20, /**/
CV_BadAlign= -21, /**/
CV_BadCallBack= -22, /**/
CV_BadTileSize= -23, /**/
CV_BadCOI= -24, /**/
CV_BadROISize= -25, /**/
CV_MaskIsTiled= -26, /**/
CV_StsNullPtr= -27, /**< null pointer */
CV_StsVecLengthErr= -28, /**< incorrect vector length */
CV_StsFilterStructContentErr= -29, /**< incorr. filter structure content */
CV_StsKernelStructContentErr= -30, /**< incorr. transform kernel content */
CV_StsFilterOffsetErr= -31, /**< incorrect filter offset value */
CV_StsBadSize= -201, /**< the input/output structure size is incorrect */
CV_StsDivByZero= -202, /**< division by zero */
CV_StsInplaceNotSupported= -203, /**< in-place operation is not supported */

View File

@ -61,7 +61,7 @@ CV_EXPORTS void addImpl(int flag, const char* func = 0); // add implementation a
// Each implementation entry correspond to function name entry, so you can find which implementation was executed in which fucntion
CV_EXPORTS int getImpl(std::vector<int> &impl, std::vector<String> &funName);
CV_EXPORTS bool useCollection(); // return implementation colelction state
CV_EXPORTS bool useCollection(); // return implementation collection state
CV_EXPORTS void setUseCollection(bool flag); // set implementation collection state
#define CV_IMPL_PLAIN 0x01 // native CPU OpenCV implementation

View File

@ -0,0 +1,45 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
template <typename T>
static void CvRoundMat(const cv::Mat & src, cv::Mat & dst)
{
for (int y = 0; y < dst.rows; ++y)
{
const T * sptr = src.ptr<T>(y);
int * dptr = dst.ptr<int>(y);
for (int x = 0; x < dst.cols; ++x)
dptr[x] = cvRound(sptr[x]);
}
}
PERF_TEST_P(Size_MatType, CvRound_Float,
testing::Combine(testing::Values(TYPICAL_MAT_SIZES),
testing::Values(CV_32FC1, CV_64FC1)))
{
Size size = get<0>(GetParam());
int type = get<1>(GetParam()), depth = CV_MAT_DEPTH(type);
cv::Mat src(size, type), dst(size, CV_32SC1);
declare.in(src, WARMUP_RNG).out(dst);
if (depth == CV_32F)
{
TEST_CYCLE()
CvRoundMat<float>(src, dst);
}
else if (depth == CV_64F)
{
TEST_CYCLE()
CvRoundMat<double>(src, dst);
}
SANITY_CHECK_NOTHING();
}

View File

@ -2449,7 +2449,6 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows )
(DFTFunc)CCSIDFT_64f
};
AutoBuffer<uchar> buf;
void *spec = 0;
Mat src0 = _src0.getMat(), src = src0;
int prev_len = 0, stage = 0;
bool inv = (flags & DFT_INVERSE) != 0;
@ -2570,7 +2569,7 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows )
sz = 2*len*complex_elem_size;
}
spec = 0;
void *spec = 0;
#ifdef USE_IPP_DFT
if( CV_IPP_CHECK_COND && (len*count >= 64) ) // use IPP DFT if available
{

View File

@ -725,11 +725,11 @@ template<typename T1, typename T2, typename T3> static void
MatrAXPY( int m, int n, const T1* x, int dx,
const T2* a, int inca, T3* y, int dy )
{
int i, j;
int i;
for( i = 0; i < m; i++, x += dx, y += dy )
{
T2 s = a[i*inca];
j=0;
int j = 0;
#if CV_ENABLE_UNROLLED
for(; j <= n - 4; j += 4 )
{

View File

@ -3957,6 +3957,7 @@ public:
derived()._releaseBufferEntry(entry);
}
reservedEntries_.clear();
currentReservedSize = 0;
}
};

View File

@ -69,7 +69,7 @@
#define HAVE_GCD
#endif
#if defined _MSC_VER && _MSC_VER >= 1600 && !defined(WINRT)
#if defined _MSC_VER && _MSC_VER >= 1600
#define HAVE_CONCURRENCY
#endif
@ -78,7 +78,8 @@
2. HAVE_CSTRIPES - 3rdparty library, should be explicitly enabled
3. HAVE_OPENMP - integrated to compiler, should be explicitly enabled
4. HAVE_GCD - system wide, used automatically (APPLE only)
5. HAVE_CONCURRENCY - part of runtime, used automatically (Windows only - MSVS 10, MSVS 11)
5. WINRT - system wide, used automatically (Windows RT only)
6. HAVE_CONCURRENCY - part of runtime, used automatically (Windows only - MSVS 10, MSVS 11)
*/
#if defined HAVE_TBB
@ -105,6 +106,8 @@
#elif defined HAVE_GCD
#include <dispatch/dispatch.h>
#include <pthread.h>
#elif defined WINRT
#include <ppltasks.h>
#elif defined HAVE_CONCURRENCY
#include <ppl.h>
#endif
@ -118,6 +121,8 @@
# define CV_PARALLEL_FRAMEWORK "openmp"
#elif defined HAVE_GCD
# define CV_PARALLEL_FRAMEWORK "gcd"
#elif defined WINRT
# define CV_PARALLEL_FRAMEWORK "winrt-concurrency"
#elif defined HAVE_CONCURRENCY
# define CV_PARALLEL_FRAMEWORK "ms-concurrency"
#endif
@ -179,7 +184,7 @@ namespace
ProxyLoopBody* ptr_body = static_cast<ProxyLoopBody*>(context);
(*ptr_body)(cv::Range((int)index, (int)index + 1));
}
#elif defined HAVE_CONCURRENCY
#elif defined WINRT || defined HAVE_CONCURRENCY
class ProxyLoopBody : public ParallelLoopBodyWrapper
{
public:
@ -206,7 +211,10 @@ static tbb::task_scheduler_init tbbScheduler(tbb::task_scheduler_init::deferred)
static int numThreadsMax = omp_get_max_threads();
#elif defined HAVE_GCD
// nothing for GCD
#elif defined WINRT
// nothing for WINRT
#elif defined HAVE_CONCURRENCY
class SchedPtr
{
Concurrency::Scheduler* sched_;
@ -224,6 +232,7 @@ public:
~SchedPtr() { *this = 0; }
};
static SchedPtr pplScheduler;
#endif
#endif // CV_PARALLEL_FRAMEWORK
@ -272,6 +281,10 @@ void cv::parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body,
dispatch_queue_t concurrent_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
dispatch_apply_f(stripeRange.end - stripeRange.start, concurrent_queue, &pbody, block_function);
#elif defined WINRT
Concurrency::parallel_for(stripeRange.start, stripeRange.end, pbody);
#elif defined HAVE_CONCURRENCY
if(!pplScheduler || pplScheduler->Id() == Concurrency::CurrentScheduler::Id())
@ -330,11 +343,15 @@ int cv::getNumThreads(void)
return 512; // the GCD thread pool limit
#elif defined WINRT
return 0;
#elif defined HAVE_CONCURRENCY
return 1 + (pplScheduler == 0
? Concurrency::CurrentScheduler::Get()->GetNumberOfVirtualProcessors()
: pplScheduler->GetNumberOfVirtualProcessors());
? Concurrency::CurrentScheduler::Get()->GetNumberOfVirtualProcessors()
: pplScheduler->GetNumberOfVirtualProcessors());
#else
@ -371,6 +388,10 @@ void cv::setNumThreads( int threads )
// unsupported
// there is only private dispatch_queue_set_width() and only for desktop
#elif defined WINRT
return;
#elif defined HAVE_CONCURRENCY
if (threads <= 0)
@ -407,6 +428,8 @@ int cv::getThreadNum(void)
return omp_get_thread_num();
#elif defined HAVE_GCD
return (int)(size_t)(void*)pthread_self(); // no zero-based indexing
#elif defined WINRT
return 0;
#elif defined HAVE_CONCURRENCY
return std::max(0, (int)Concurrency::Context::VirtualProcessorId()); // zero for master thread, unique number for others but not necessary 1,2,3,...
#else

View File

@ -66,7 +66,7 @@ PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComp
{
Mat data = _data.getMat(), _mean = __mean.getMat();
int covar_flags = CV_COVAR_SCALE;
int i, len, in_count;
int len, in_count;
Size mean_sz;
CV_Assert( data.channels() == 1 );
@ -131,6 +131,7 @@ PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComp
eigenvectors = evects1;
// normalize eigenvectors
int i;
for( i = 0; i < out_count; i++ )
{
Mat vec = eigenvectors.row(i);
@ -202,7 +203,7 @@ PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, double reta
{
Mat data = _data.getMat(), _mean = __mean.getMat();
int covar_flags = CV_COVAR_SCALE;
int i, len, in_count;
int len, in_count;
Size mean_sz;
CV_Assert( data.channels() == 1 );
@ -266,6 +267,7 @@ PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, double reta
eigenvectors = evects1;
// normalize all eigenvectors
int i;
for( i = 0; i < eigenvectors.rows; i++ )
{
Mat vec = eigenvectors.row(i);

View File

@ -232,15 +232,30 @@ inline bool checkScalar(InputArray sc, int atype, int sckind, int akind)
void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t blocksize );
#ifdef CV_COLLECT_IMPL_DATA
struct ImplCollector
{
ImplCollector()
{
useCollection = false;
implFlags = 0;
}
bool useCollection; // enable/disable impl data collection
int implFlags;
std::vector<int> implCode;
std::vector<String> implFun;
cv::Mutex mutex;
};
#endif
struct CoreTLSData
{
CoreTLSData() : device(0), useOpenCL(-1), useIPP(-1), useCollection(false)
CoreTLSData() : device(0), useOpenCL(-1), useIPP(-1)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
useTegra = -1;
#endif
#ifdef CV_COLLECT_IMPL_DATA
implFlags = 0;
#endif
}
@ -251,13 +266,6 @@ struct CoreTLSData
int useIPP; // 1 - use, 0 - do not use, -1 - auto/not initialized
#ifdef HAVE_TEGRA_OPTIMIZATION
int useTegra; // 1 - use, 0 - do not use, -1 - auto/not initialized
#endif
bool useCollection; // enable/disable impl data collection
#ifdef CV_COLLECT_IMPL_DATA
int implFlags;
std::vector<int> implCode;
std::vector<String> implFun;
#endif
};

View File

@ -1163,47 +1163,56 @@ TLSData<CoreTLSData>& getCoreTlsData()
#ifdef CV_COLLECT_IMPL_DATA
ImplCollector& getImplData()
{
static ImplCollector *value = new ImplCollector();
return *value;
}
void setImpl(int flags)
{
CoreTLSData* data = getCoreTlsData().get();
data->implFlags = flags;
data->implCode.clear();
data->implFun.clear();
cv::AutoLock lock(getImplData().mutex);
getImplData().implFlags = flags;
getImplData().implCode.clear();
getImplData().implFun.clear();
}
void addImpl(int flag, const char* func)
{
CoreTLSData* data = getCoreTlsData().get();
data->implFlags |= flag;
cv::AutoLock lock(getImplData().mutex);
getImplData().implFlags |= flag;
if(func) // use lazy collection if name was not specified
{
size_t index = data->implCode.size();
if(!index || (data->implCode[index-1] != flag || data->implFun[index-1].compare(func))) // avoid duplicates
size_t index = getImplData().implCode.size();
if(!index || (getImplData().implCode[index-1] != flag || getImplData().implFun[index-1].compare(func))) // avoid duplicates
{
data->implCode.push_back(flag);
data->implFun.push_back(func);
getImplData().implCode.push_back(flag);
getImplData().implFun.push_back(func);
}
}
}
int getImpl(std::vector<int> &impl, std::vector<String> &funName)
{
CoreTLSData* data = getCoreTlsData().get();
impl = data->implCode;
funName = data->implFun;
return data->implFlags; // return actual flags for lazy collection
cv::AutoLock lock(getImplData().mutex);
impl = getImplData().implCode;
funName = getImplData().implFun;
return getImplData().implFlags; // return actual flags for lazy collection
}
bool useCollection()
{
CoreTLSData* data = getCoreTlsData().get();
return data->useCollection;
return getImplData().useCollection;
}
void setUseCollection(bool flag)
{
CoreTLSData* data = getCoreTlsData().get();
data->useCollection = flag;
cv::AutoLock lock(getImplData().mutex);
getImplData().useCollection = flag;
}
#endif

View File

@ -45,7 +45,7 @@
using namespace cv;
using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
#if !defined HAVE_CUDA || !defined(HAVE_OPENCV_CUDAARITHM) || defined(CUDA_DISABLER)
void cv::cuda::FastOpticalFlowBM::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, int, int, Stream&) { throw_no_cuda(); }

View File

@ -45,7 +45,7 @@
using namespace cv;
using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
#if !defined (HAVE_CUDA) || !defined(HAVE_OPENCV_CUDAIMGPROC) || defined (CUDA_DISABLER)
void cv::cuda::createOpticalFlowNeedleMap(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&) { throw_no_cuda(); }

View File

@ -1,7 +1,3 @@
if(WINRT)
ocv_module_disable(imgcodecs)
endif()
set(the_description "Image codecs")
ocv_add_module(imgcodecs opencv_imgproc WRAP java python)

View File

@ -185,13 +185,14 @@ compression parameters :
void createAlphaMat(Mat &mat)
{
CV_Assert(mat.channels() == 4);
for (int i = 0; i < mat.rows; ++i) {
for (int j = 0; j < mat.cols; ++j) {
Vec4b& rgba = mat.at<Vec4b>(i, j);
rgba[0] = UCHAR_MAX;
rgba[1] = saturate_cast<uchar>((float (mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX);
rgba[2] = saturate_cast<uchar>((float (mat.rows - i)) / ((float)mat.rows) * UCHAR_MAX);
rgba[3] = saturate_cast<uchar>(0.5 * (rgba[1] + rgba[2]));
Vec4b& bgra = mat.at<Vec4b>(i, j);
bgra[0] = UCHAR_MAX; // Blue
bgra[1] = saturate_cast<uchar>((float (mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX); // Green
bgra[2] = saturate_cast<uchar>((float (mat.rows - i)) / ((float)mat.rows) * UCHAR_MAX); // Red
bgra[3] = saturate_cast<uchar>(0.5 * (bgra[1] + bgra[2])); // Alpha
}
}
}

View File

@ -45,6 +45,7 @@
#ifdef HAVE_JASPER
#include "grfmt_jpeg2000.hpp"
#include "opencv2/imgproc.hpp"
#ifdef WIN32
#define JAS_WIN_MSVC_BUILD 1
@ -159,6 +160,21 @@ bool Jpeg2KDecoder::readData( Mat& img )
jas_stream_t* stream = (jas_stream_t*)m_stream;
jas_image_t* image = (jas_image_t*)m_image;
#ifndef WIN32
// At least on some Linux instances the
// system libjasper segfaults when
// converting color to grey.
// We do this conversion manually at the end.
Mat clr;
if (CV_MAT_CN(img.type()) < CV_MAT_CN(this->type()))
{
clr.create(img.size().height, img.size().width, this->type());
color = true;
data = clr.ptr();
step = (int)clr.step;
}
#endif
if( stream && image )
{
bool convert;
@ -171,7 +187,7 @@ bool Jpeg2KDecoder::readData( Mat& img )
else
{
convert = (jas_clrspc_fam( jas_image_clrspc( image ) ) != JAS_CLRSPC_FAM_GRAY);
colorspace = JAS_CLRSPC_SGRAY; // TODO GENGRAY or SGRAY?
colorspace = JAS_CLRSPC_SGRAY; // TODO GENGRAY or SGRAY? (GENGRAY fails on Win.)
}
// convert to the desired colorspace
@ -256,6 +272,13 @@ bool Jpeg2KDecoder::readData( Mat& img )
close();
#ifndef WIN32
if (!clr.empty())
{
cv::cvtColor(clr, img, COLOR_BGR2GRAY);
}
#endif
return result;
}

View File

@ -374,15 +374,8 @@ imreadmulti_(const String& filename, int flags, std::vector<Mat>& mats)
type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1);
}
// established the required input image size.
CvSize size;
size.width = decoder->width();
size.height = decoder->height();
Mat mat;
mat.create(size.height, size.width, type);
// read the image data
Mat mat(decoder->height(), decoder->width(), type);
if (!decoder->readData(mat))
{
break;

View File

@ -87,6 +87,9 @@ TEST(Imgcodecs_imread, regression)
{
const char* const filenames[] =
{
#ifdef HAVE_JASPER
"Rome.jp2",
#endif
"color_palette_alpha.png",
"multipage.tif",
"rle.hdr",
@ -99,16 +102,32 @@ TEST(Imgcodecs_imread, regression)
for (size_t i = 0; i < sizeof(filenames) / sizeof(filenames[0]); ++i)
{
ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_UNCHANGED));
ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_GRAYSCALE));
ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_COLOR));
ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_ANYDEPTH));
ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_ANYCOLOR));
if (i != 2) // GDAL does not support hdr
ASSERT_TRUE(imread_compare(folder + string(filenames[i]), IMREAD_LOAD_GDAL));
const string path = folder + string(filenames[i]);
ASSERT_TRUE(imread_compare(path, IMREAD_UNCHANGED));
ASSERT_TRUE(imread_compare(path, IMREAD_GRAYSCALE));
ASSERT_TRUE(imread_compare(path, IMREAD_COLOR));
ASSERT_TRUE(imread_compare(path, IMREAD_ANYDEPTH));
ASSERT_TRUE(imread_compare(path, IMREAD_ANYCOLOR));
if (path.substr(path.length() - 3) != "hdr")
{
// GDAL does not support hdr
ASSERT_TRUE(imread_compare(path, IMREAD_LOAD_GDAL));
}
}
}
#ifdef HAVE_JASPER
TEST(Imgcodecs_jasper, regression)
{
const string folder = string(cvtest::TS::ptr()->get_data_path()) + "/readwrite/";
ASSERT_TRUE(imread_compare(folder + "Bretagne2.jp2", IMREAD_COLOR));
ASSERT_TRUE(imread_compare(folder + "Bretagne2.jp2", IMREAD_GRAYSCALE));
ASSERT_TRUE(imread_compare(folder + "Grey.jp2", IMREAD_COLOR));
ASSERT_TRUE(imread_compare(folder + "Grey.jp2", IMREAD_GRAYSCALE));
}
#endif
class CV_GrfmtWriteBigImageTest : public cvtest::BaseTest
{
public:

View File

@ -83,7 +83,6 @@ Mat interp1_(const Mat& X_, const Mat& Y_, const Mat& XI)
// interpolated values
Mat yi = Mat::zeros(XI.size(), XI.type());
for(int i = 0; i < n; i++) {
int c = 0;
int low = 0;
int high = X.rows - 1;
// set bounds
@ -93,7 +92,7 @@ Mat interp1_(const Mat& X_, const Mat& Y_, const Mat& XI)
low = high - 1;
// binary search
while((high-low)>1) {
c = low + ((high - low) >> 1);
const int c = low + ((high - low) >> 1);
if(XI.at<_Tp>(i,0) > X.at<_Tp>(c,0)) {
low = c;
} else {

View File

@ -1152,6 +1152,7 @@ float cv::EMD( InputArray _signature1, InputArray _signature2,
{
_flow.create(signature1.rows, signature2.rows, CV_32F);
flow = _flow.getMat();
flow = Scalar::all(0);
_cflow = flow;
}

View File

@ -90,11 +90,8 @@ HoughLinesStandard( const Mat& img, float rho, float theta,
int width = img.cols;
int height = img.rows;
if (max_theta < 0 || max_theta > CV_PI ) {
CV_Error( CV_StsBadArg, "max_theta must fall between 0 and pi" );
}
if (min_theta < 0 || min_theta > max_theta ) {
CV_Error( CV_StsBadArg, "min_theta must fall between 0 and max_theta" );
if (max_theta < min_theta ) {
CV_Error( CV_StsBadArg, "max_theta must be greater than min_theta" );
}
int numangle = cvRound((max_theta - min_theta) / theta);
int numrho = cvRound(((width + height) * 2 + 1) / rho);
@ -178,7 +175,7 @@ HoughLinesStandard( const Mat& img, float rho, float theta,
int n = cvFloor(idx*scale) - 1;
int r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.angle = static_cast<float>(min_theta) + n * theta;
lines.push_back(Vec2f(line.rho, line.angle));
}
}

View File

@ -318,12 +318,7 @@ else(ANDROID)
COMMENT "Generating ${JAR_NAME}"
)
if(WIN32)
set(JAR_INSTALL_DIR java)
else(WIN32)
set(JAR_INSTALL_DIR share/OpenCV/java)
endif(WIN32)
install(FILES ${JAR_FILE} OPTIONAL DESTINATION ${JAR_INSTALL_DIR} COMPONENT java)
install(FILES ${JAR_FILE} OPTIONAL DESTINATION ${OPENCV_JAR_INSTALL_PATH} COMPONENT java)
endif(ANDROID)
# step 5: build native part
@ -398,12 +393,12 @@ if(ANDROID)
else()
if(NOT INSTALL_CREATE_DISTRIB)
ocv_install_target(${the_module} OPTIONAL EXPORT OpenCVModules
RUNTIME DESTINATION ${JAR_INSTALL_DIR} COMPONENT java
LIBRARY DESTINATION ${JAR_INSTALL_DIR} COMPONENT java)
RUNTIME DESTINATION ${OPENCV_JAR_INSTALL_PATH} COMPONENT java
LIBRARY DESTINATION ${OPENCV_JAR_INSTALL_PATH} COMPONENT java)
else()
ocv_install_target(${the_module} OPTIONAL EXPORT OpenCVModules
RUNTIME DESTINATION ${JAR_INSTALL_DIR}/${OpenCV_ARCH} COMPONENT java
LIBRARY DESTINATION ${JAR_INSTALL_DIR}/${OpenCV_ARCH} COMPONENT java)
RUNTIME DESTINATION ${OPENCV_JAR_INSTALL_PATH}/${OpenCV_ARCH} COMPONENT java
LIBRARY DESTINATION ${OPENCV_JAR_INSTALL_PATH}/${OpenCV_ARCH} COMPONENT java)
endif()
endif()

View File

@ -82,7 +82,7 @@ public class OpenCVTestRunner extends InstrumentationTestRunner {
// Using OpenCV Manager for initialization;
Log("Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, getContext(), mLoaderCallback);
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_0_0, getContext(), mLoaderCallback);
synchronized (this) {
try {

View File

@ -75,7 +75,7 @@ const_ignore_list = (
"CV_CAP_PROP_CONVERT_RGB",
"CV_CAP_PROP_WHITE_BALANCE_BLUE_U",
"CV_CAP_PROP_RECTIFICATION",
"CV_CAP_PROP_MONOCROME",
"CV_CAP_PROP_MONOCHROME",
"CV_CAP_PROP_SHARPNESS",
"CV_CAP_PROP_AUTO_EXPOSURE",
"CV_CAP_PROP_GAMMA",

View File

@ -21,8 +21,9 @@ class AsyncServiceHelper
final LoaderCallbackInterface Callback)
{
AsyncServiceHelper helper = new AsyncServiceHelper(Version, AppContext, Callback);
if (AppContext.bindService(new Intent("org.opencv.engine.BIND"),
helper.mServiceConnection, Context.BIND_AUTO_CREATE))
Intent intent = new Intent("org.opencv.engine.BIND");
intent.setPackage("org.opencv.engine");
if (AppContext.bindService(intent, helper.mServiceConnection, Context.BIND_AUTO_CREATE))
{
return true;
}

View File

@ -47,6 +47,22 @@ public class OpenCVLoader
*/
public static final String OPENCV_VERSION_2_4_9 = "2.4.9";
/**
* OpenCV Library version 2.4.10.
*/
public static final String OPENCV_VERSION_2_4_10 = "2.4.10";
/**
* OpenCV Library version 2.4.11.
*/
public static final String OPENCV_VERSION_2_4_11 = "2.4.11";
/**
* OpenCV Library version 3.0.0.
*/
public static final String OPENCV_VERSION_3_0_0 = "3.0.0";
/**
* Loads and initializes OpenCV library from current application package. Roughly, it's an analog of system.loadLibrary("opencv_java").
* @return Returns true is initialization of OpenCV was successful.

View File

@ -1,2 +1,2 @@
set(the_description "Machine Learning")
ocv_define_module(ml opencv_core)
ocv_define_module(ml opencv_core WRAP java python)

View File

@ -104,7 +104,7 @@ enum SampleTypes
It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate
being computed by cross-validation.
*/
class CV_EXPORTS_W_MAP ParamGrid
class CV_EXPORTS ParamGrid
{
public:
/** @brief Default constructor */
@ -112,8 +112,8 @@ public:
/** @brief Constructor with parameters */
ParamGrid(double _minVal, double _maxVal, double _logStep);
CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0.
CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0.
double minVal; //!< Minimum value of the statmodel parameter. Default value is 0.
double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0.
/** @brief Logarithmic step for iterating the statmodel parameter.
The grid determines the following iteration sequence of the statmodel parameter values:
@ -122,7 +122,7 @@ public:
\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]
The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.
*/
CV_PROP_RW double logStep;
double logStep;
};
/** @brief Class encapsulating training data.
@ -134,22 +134,22 @@ of this class into StatModel::train.
@sa @ref ml_intro_data
*/
class CV_EXPORTS TrainData
class CV_EXPORTS_W TrainData
{
public:
static inline float missingValue() { return FLT_MAX; }
virtual ~TrainData();
virtual int getLayout() const = 0;
virtual int getNTrainSamples() const = 0;
virtual int getNTestSamples() const = 0;
virtual int getNSamples() const = 0;
virtual int getNVars() const = 0;
virtual int getNAllVars() const = 0;
CV_WRAP virtual int getLayout() const = 0;
CV_WRAP virtual int getNTrainSamples() const = 0;
CV_WRAP virtual int getNTestSamples() const = 0;
CV_WRAP virtual int getNSamples() const = 0;
CV_WRAP virtual int getNVars() const = 0;
CV_WRAP virtual int getNAllVars() const = 0;
virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0;
virtual Mat getSamples() const = 0;
virtual Mat getMissing() const = 0;
CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0;
CV_WRAP virtual Mat getSamples() const = 0;
CV_WRAP virtual Mat getMissing() const = 0;
/** @brief Returns matrix of train samples
@ -163,7 +163,7 @@ public:
In current implementation the function tries to avoid physical data copying and returns the
matrix stored inside TrainData (unless the transposition or compression is needed).
*/
virtual Mat getTrainSamples(int layout=ROW_SAMPLE,
CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE,
bool compressSamples=true,
bool compressVars=true) const = 0;
@ -172,7 +172,7 @@ public:
The function returns ordered or the original categorical responses. Usually it's used in
regression algorithms.
*/
virtual Mat getTrainResponses() const = 0;
CV_WRAP virtual Mat getTrainResponses() const = 0;
/** @brief Returns the vector of normalized categorical responses
@ -180,38 +180,38 @@ public:
classes>-1`. The actual label value can be retrieved then from the class label vector, see
TrainData::getClassLabels.
*/
virtual Mat getTrainNormCatResponses() const = 0;
virtual Mat getTestResponses() const = 0;
virtual Mat getTestNormCatResponses() const = 0;
virtual Mat getResponses() const = 0;
virtual Mat getNormCatResponses() const = 0;
virtual Mat getSampleWeights() const = 0;
virtual Mat getTrainSampleWeights() const = 0;
virtual Mat getTestSampleWeights() const = 0;
virtual Mat getVarIdx() const = 0;
virtual Mat getVarType() const = 0;
virtual int getResponseType() const = 0;
virtual Mat getTrainSampleIdx() const = 0;
virtual Mat getTestSampleIdx() const = 0;
virtual void getValues(int vi, InputArray sidx, float* values) const = 0;
CV_WRAP virtual Mat getTrainNormCatResponses() const = 0;
CV_WRAP virtual Mat getTestResponses() const = 0;
CV_WRAP virtual Mat getTestNormCatResponses() const = 0;
CV_WRAP virtual Mat getResponses() const = 0;
CV_WRAP virtual Mat getNormCatResponses() const = 0;
CV_WRAP virtual Mat getSampleWeights() const = 0;
CV_WRAP virtual Mat getTrainSampleWeights() const = 0;
CV_WRAP virtual Mat getTestSampleWeights() const = 0;
CV_WRAP virtual Mat getVarIdx() const = 0;
CV_WRAP virtual Mat getVarType() const = 0;
CV_WRAP virtual int getResponseType() const = 0;
CV_WRAP virtual Mat getTrainSampleIdx() const = 0;
CV_WRAP virtual Mat getTestSampleIdx() const = 0;
CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0;
virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0;
virtual Mat getDefaultSubstValues() const = 0;
CV_WRAP virtual Mat getDefaultSubstValues() const = 0;
virtual int getCatCount(int vi) const = 0;
CV_WRAP virtual int getCatCount(int vi) const = 0;
/** @brief Returns the vector of class labels
The function returns vector of unique labels occurred in the responses.
*/
virtual Mat getClassLabels() const = 0;
CV_WRAP virtual Mat getClassLabels() const = 0;
virtual Mat getCatOfs() const = 0;
virtual Mat getCatMap() const = 0;
CV_WRAP virtual Mat getCatOfs() const = 0;
CV_WRAP virtual Mat getCatMap() const = 0;
/** @brief Splits the training data into the training and test parts
@sa TrainData::setTrainTestSplitRatio
*/
virtual void setTrainTestSplit(int count, bool shuffle=true) = 0;
CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0;
/** @brief Splits the training data into the training and test parts
@ -221,10 +221,10 @@ public:
subset can be retrieved and processed as well.
@sa TrainData::setTrainTestSplit
*/
virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0;
virtual void shuffleTrainTest() = 0;
CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0;
CV_WRAP virtual void shuffleTrainTest() = 0;
static Mat getSubVector(const Mat& vec, const Mat& idx);
CV_WRAP static Mat getSubVector(const Mat& vec, const Mat& idx);
/** @brief Reads the dataset from a .csv file and returns the ready-to-use training data.
@ -280,7 +280,7 @@ public:
<number_of_variables_in_responses>`, containing types of each input and output variable. See
ml::VariableTypes.
*/
static Ptr<TrainData> create(InputArray samples, int layout, InputArray responses,
CV_WRAP static Ptr<TrainData> create(InputArray samples, int layout, InputArray responses,
InputArray varIdx=noArray(), InputArray sampleIdx=noArray(),
InputArray sampleWeights=noArray(), InputArray varType=noArray());
};
@ -297,15 +297,15 @@ public:
COMPRESSED_INPUT=2,
PREPROCESSED_INPUT=4
};
virtual void clear();
CV_WRAP virtual void clear();
/** @brief Returns the number of variables in training samples */
virtual int getVarCount() const = 0;
CV_WRAP virtual int getVarCount() const = 0;
/** @brief Returns true if the model is trained */
virtual bool isTrained() const = 0;
CV_WRAP virtual bool isTrained() const = 0;
/** @brief Returns true if the model is classifier */
virtual bool isClassifier() const = 0;
CV_WRAP virtual bool isClassifier() const = 0;
/** @brief Trains the statistical model
@ -314,7 +314,7 @@ public:
@param flags optional flags, depending on the model. Some of the models can be updated with the
new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
*/
virtual bool train( const Ptr<TrainData>& trainData, int flags=0 );
CV_WRAP virtual bool train( const Ptr<TrainData>& trainData, int flags=0 );
/** @brief Trains the statistical model
@ -322,7 +322,7 @@ public:
@param layout See ml::SampleTypes.
@param responses vector of responses associated with the training samples.
*/
virtual bool train( InputArray samples, int layout, InputArray responses );
CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses );
/** @brief Computes error on the training or test dataset
@ -337,7 +337,7 @@ public:
The method uses StatModel::predict to compute the error. For regression models the error is
computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).
*/
virtual float calcError( const Ptr<TrainData>& data, bool test, OutputArray resp ) const;
CV_WRAP virtual float calcError( const Ptr<TrainData>& data, bool test, OutputArray resp ) const;
/** @brief Predicts response(s) for the provided sample(s)
@ -345,7 +345,7 @@ public:
@param results The optional output matrix of results.
@param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.
*/
virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
/** @brief Loads model from the file
@ -393,11 +393,11 @@ public:
/** Saves the model to a file.
In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */
virtual void save(const String& filename) const;
CV_WRAP virtual void save(const String& filename) const;
/** Returns model string identifier.
This string is used as top level xml/yml node tag when model is saved to a file or string. */
virtual String getDefaultModelName() const = 0;
CV_WRAP virtual String getDefaultModelName() const = 0;
};
/****************************************************************************************\
@ -419,12 +419,12 @@ public:
The vector outputProbs contains the output probabilities corresponding to each element of
result.
*/
virtual float predictProb( InputArray inputs, OutputArray outputs,
CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs,
OutputArray outputProbs, int flags=0 ) const = 0;
/** Creates empty model
Use StatModel::train to train the model after creation. */
static Ptr<NormalBayesClassifier> create();
CV_WRAP static Ptr<NormalBayesClassifier> create();
};
/****************************************************************************************\
@ -440,16 +440,28 @@ class CV_EXPORTS_W KNearest : public StatModel
public:
/** Default number of neighbors to use in predict method. */
CV_PURE_PROPERTY(int, DefaultK)
/** @see setDefaultK */
CV_WRAP virtual int getDefaultK() const = 0;
/** @copybrief getDefaultK @see getDefaultK */
CV_WRAP virtual void setDefaultK(int val) = 0;
/** Whether classification or regression model should be trained. */
CV_PURE_PROPERTY(bool, IsClassifier)
/** @see setIsClassifier */
CV_WRAP virtual bool getIsClassifier() const = 0;
/** @copybrief getIsClassifier @see getIsClassifier */
CV_WRAP virtual void setIsClassifier(bool val) = 0;
/** Parameter for KDTree implementation. */
CV_PURE_PROPERTY(int, Emax)
/** @see setEmax */
CV_WRAP virtual int getEmax() const = 0;
/** @copybrief getEmax @see getEmax */
CV_WRAP virtual void setEmax(int val) = 0;
/** %Algorithm type, one of KNearest::Types. */
CV_PURE_PROPERTY(int, AlgorithmType)
/** @see setAlgorithmType */
CV_WRAP virtual int getAlgorithmType() const = 0;
/** @copybrief getAlgorithmType @see getAlgorithmType */
CV_WRAP virtual void setAlgorithmType(int val) = 0;
/** @brief Finds the neighbors and predicts responses for input vectors.
@ -477,7 +489,7 @@ public:
The function is parallelized with the TBB library.
*/
virtual float findNearest( InputArray samples, int k,
CV_WRAP virtual float findNearest( InputArray samples, int k,
OutputArray results,
OutputArray neighborResponses=noArray(),
OutputArray dist=noArray() ) const = 0;
@ -494,7 +506,7 @@ public:
The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.
*/
static Ptr<KNearest> create();
CV_WRAP static Ptr<KNearest> create();
};
/****************************************************************************************\
@ -518,52 +530,79 @@ public:
/** Type of a %SVM formulation.
See SVM::Types. Default value is SVM::C_SVC. */
CV_PURE_PROPERTY(int, Type)
/** @see setType */
CV_WRAP virtual int getType() const = 0;
/** @copybrief getType @see getType */
CV_WRAP virtual void setType(int val) = 0;
/** Parameter \f$\gamma\f$ of a kernel function.
For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */
CV_PURE_PROPERTY(double, Gamma)
/** @see setGamma */
CV_WRAP virtual double getGamma() const = 0;
/** @copybrief getGamma @see getGamma */
CV_WRAP virtual void setGamma(double val) = 0;
/** Parameter _coef0_ of a kernel function.
For SVM::POLY or SVM::SIGMOID. Default value is 0.*/
CV_PURE_PROPERTY(double, Coef0)
/** @see setCoef0 */
CV_WRAP virtual double getCoef0() const = 0;
/** @copybrief getCoef0 @see getCoef0 */
CV_WRAP virtual void setCoef0(double val) = 0;
/** Parameter _degree_ of a kernel function.
For SVM::POLY. Default value is 0. */
CV_PURE_PROPERTY(double, Degree)
/** @see setDegree */
CV_WRAP virtual double getDegree() const = 0;
/** @copybrief getDegree @see getDegree */
CV_WRAP virtual void setDegree(double val) = 0;
/** Parameter _C_ of a %SVM optimization problem.
For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */
CV_PURE_PROPERTY(double, C)
/** @see setC */
CV_WRAP virtual double getC() const = 0;
/** @copybrief getC @see getC */
CV_WRAP virtual void setC(double val) = 0;
/** Parameter \f$\nu\f$ of a %SVM optimization problem.
For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */
CV_PURE_PROPERTY(double, Nu)
/** @see setNu */
CV_WRAP virtual double getNu() const = 0;
/** @copybrief getNu @see getNu */
CV_WRAP virtual void setNu(double val) = 0;
/** Parameter \f$\epsilon\f$ of a %SVM optimization problem.
For SVM::EPS_SVR. Default value is 0. */
CV_PURE_PROPERTY(double, P)
/** @see setP */
CV_WRAP virtual double getP() const = 0;
/** @copybrief getP @see getP */
CV_WRAP virtual void setP(double val) = 0;
/** Optional weights in the SVM::C_SVC problem, assigned to particular classes.
They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus
these weights affect the misclassification penalty for different classes. The larger weight,
the larger penalty on misclassification of data from the corresponding class. Default value is
empty Mat. */
CV_PURE_PROPERTY_S(cv::Mat, ClassWeights)
/** @see setClassWeights */
CV_WRAP virtual cv::Mat getClassWeights() const = 0;
/** @copybrief getClassWeights @see getClassWeights */
CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0;
/** Termination criteria of the iterative %SVM training procedure which solves a partial
case of constrained quadratic optimization problem.
You can specify tolerance and/or the maximum number of iterations. Default value is
`TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */
CV_PURE_PROPERTY_S(cv::TermCriteria, TermCriteria)
/** @see setTermCriteria */
CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;
/** Type of a %SVM kernel.
See SVM::KernelTypes. Default value is SVM::RBF. */
virtual int getKernelType() const = 0;
CV_WRAP virtual int getKernelType() const = 0;
/** Initialize with one of predefined kernels.
See SVM::KernelTypes. */
virtual void setKernel(int kernelType) = 0;
CV_WRAP virtual void setKernel(int kernelType) = 0;
/** Initialize with custom kernel.
See SVM::Kernel class for implementation details */
@ -695,7 +734,7 @@ public:
The method returns rho parameter of the decision function, a scalar subtracted from the weighted
sum of kernel responses.
*/
virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0;
CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0;
/** @brief Generates a grid for %SVM parameters.
@ -710,7 +749,7 @@ public:
/** Creates empty model.
Use StatModel::train to train the model. Since %SVM has several parameters, you may want to
find the best parameters for your problem, it can be done with SVM::trainAuto. */
static Ptr<SVM> create();
CV_WRAP static Ptr<SVM> create();
};
/****************************************************************************************\
@ -755,29 +794,38 @@ public:
Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could
determine the optimal number of mixtures within a specified value range, but that is not the
case in ML yet. */
CV_PURE_PROPERTY(int, ClustersNumber)
/** @see setClustersNumber */
CV_WRAP virtual int getClustersNumber() const = 0;
/** @copybrief getClustersNumber @see getClustersNumber */
CV_WRAP virtual void setClustersNumber(int val) = 0;
/** Constraint on covariance matrices which defines type of matrices.
See EM::Types. */
CV_PURE_PROPERTY(int, CovarianceMatrixType)
/** @see setCovarianceMatrixType */
CV_WRAP virtual int getCovarianceMatrixType() const = 0;
/** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */
CV_WRAP virtual void setCovarianceMatrixType(int val) = 0;
/** The termination criteria of the %EM algorithm.
The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of
M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default
maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */
CV_PURE_PROPERTY_S(TermCriteria, TermCriteria)
/** @see setTermCriteria */
CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;
/** @brief Returns weights of the mixtures
Returns vector with the number of elements equal to the number of mixtures.
*/
virtual Mat getWeights() const = 0;
CV_WRAP virtual Mat getWeights() const = 0;
/** @brief Returns the cluster centers (means of the Gaussian mixture)
Returns matrix with the number of rows equal to the number of mixtures and number of columns
equal to the space dimensionality.
*/
virtual Mat getMeans() const = 0;
CV_WRAP virtual Mat getMeans() const = 0;
/** @brief Returns covariation matrices
Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
@ -797,7 +845,7 @@ public:
the sample. First element is an index of the most probable mixture component for the given
sample.
*/
CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0;
CV_WRAP CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0;
/** @brief Estimate the Gaussian mixture parameters from a samples set.
@ -827,7 +875,7 @@ public:
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
*/
virtual bool trainEM(InputArray samples,
CV_WRAP virtual bool trainEM(InputArray samples,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray()) = 0;
@ -859,7 +907,7 @@ public:
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
*/
virtual bool trainE(InputArray samples, InputArray means0,
CV_WRAP virtual bool trainE(InputArray samples, InputArray means0,
InputArray covs0=noArray(),
InputArray weights0=noArray(),
OutputArray logLikelihoods=noArray(),
@ -884,7 +932,7 @@ public:
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
*/
virtual bool trainM(InputArray samples, InputArray probs0,
CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray()) = 0;
@ -893,7 +941,7 @@ public:
The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
can use one of the EM::train\* methods or load it from file using StatModel::load\<EM\>(filename).
*/
static Ptr<EM> create();
CV_WRAP static Ptr<EM> create();
};
/****************************************************************************************\
@ -926,46 +974,70 @@ public:
values. In case of regression and 2-class classification the optimal split can be found
efficiently without employing clustering, thus the parameter is not used in these cases.
Default value is 10.*/
CV_PURE_PROPERTY(int, MaxCategories)
/** @see setMaxCategories */
CV_WRAP virtual int getMaxCategories() const = 0;
/** @copybrief getMaxCategories @see getMaxCategories */
CV_WRAP virtual void setMaxCategories(int val) = 0;
/** The maximum possible depth of the tree.
That is the training algorithms attempts to split a node while its depth is less than maxDepth.
The root node has zero depth. The actual depth may be smaller if the other termination criteria
are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the
tree is pruned. Default value is INT_MAX.*/
CV_PURE_PROPERTY(int, MaxDepth)
/** @see setMaxDepth */
CV_WRAP virtual int getMaxDepth() const = 0;
/** @copybrief getMaxDepth @see getMaxDepth */
CV_WRAP virtual void setMaxDepth(int val) = 0;
/** If the number of samples in a node is less than this parameter then the node will not be split.
Default value is 10.*/
CV_PURE_PROPERTY(int, MinSampleCount)
/** @see setMinSampleCount */
CV_WRAP virtual int getMinSampleCount() const = 0;
/** @copybrief getMinSampleCount @see getMinSampleCount */
CV_WRAP virtual void setMinSampleCount(int val) = 0;
/** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold
cross-validation procedure where K is equal to CVFolds.
Default value is 10.*/
CV_PURE_PROPERTY(int, CVFolds)
/** @see setCVFolds */
CV_WRAP virtual int getCVFolds() const = 0;
/** @copybrief getCVFolds @see getCVFolds */
CV_WRAP virtual void setCVFolds(int val) = 0;
/** If true then surrogate splits will be built.
These splits allow to work with missing data and compute variable importance correctly.
Default value is false.
@note currently it's not implemented.*/
CV_PURE_PROPERTY(bool, UseSurrogates)
/** @see setUseSurrogates */
CV_WRAP virtual bool getUseSurrogates() const = 0;
/** @copybrief getUseSurrogates @see getUseSurrogates */
CV_WRAP virtual void setUseSurrogates(bool val) = 0;
/** If true then a pruning will be harsher.
This will make a tree more compact and more resistant to the training data noise but a bit less
accurate. Default value is true.*/
CV_PURE_PROPERTY(bool, Use1SERule)
/** @see setUse1SERule */
CV_WRAP virtual bool getUse1SERule() const = 0;
/** @copybrief getUse1SERule @see getUse1SERule */
CV_WRAP virtual void setUse1SERule(bool val) = 0;
/** If true then pruned branches are physically removed from the tree.
Otherwise they are retained and it is possible to get results from the original unpruned (or
pruned less aggressively) tree. Default value is true.*/
CV_PURE_PROPERTY(bool, TruncatePrunedTree)
/** @see setTruncatePrunedTree */
CV_WRAP virtual bool getTruncatePrunedTree() const = 0;
/** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */
CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0;
/** Termination criteria for regression trees.
If all absolute differences between an estimated value in a node and values of train samples
in this node are less than this parameter then the node will not be split further. Default
value is 0.01f*/
CV_PURE_PROPERTY(float, RegressionAccuracy)
/** @see setRegressionAccuracy */
CV_WRAP virtual float getRegressionAccuracy() const = 0;
/** @copybrief getRegressionAccuracy @see getRegressionAccuracy */
CV_WRAP virtual void setRegressionAccuracy(float val) = 0;
/** @brief The array of a priori class probabilities, sorted by the class label value.
@ -982,7 +1054,10 @@ public:
category is 1 and the weight of the second category is 10, then each mistake in predicting
the second category is equivalent to making 10 mistakes in predicting the first category.
Default value is empty Mat.*/
CV_PURE_PROPERTY_S(cv::Mat, Priors)
/** @see setPriors */
CV_WRAP virtual cv::Mat getPriors() const = 0;
/** @copybrief getPriors @see getPriors */
CV_WRAP virtual void setPriors(const cv::Mat &val) = 0;
/** @brief The class represents a decision tree node.
*/
@ -1054,7 +1129,7 @@ public:
trained using train method (see StatModel::train). Alternatively, you can load the model from
file using StatModel::load\<DTrees\>(filename).
*/
static Ptr<DTrees> create();
CV_WRAP static Ptr<DTrees> create();
};
/****************************************************************************************\
@ -1071,13 +1146,19 @@ public:
/** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.
Default value is false.*/
CV_PURE_PROPERTY(bool, CalculateVarImportance)
/** @see setCalculateVarImportance */
CV_WRAP virtual bool getCalculateVarImportance() const = 0;
/** @copybrief getCalculateVarImportance @see getCalculateVarImportance */
CV_WRAP virtual void setCalculateVarImportance(bool val) = 0;
/** The size of the randomly selected subset of features at each tree node and that are used
to find the best split(s).
If you set it to 0 then the size will be set to the square root of the total number of
features. Default value is 0.*/
CV_PURE_PROPERTY(int, ActiveVarCount)
/** @see setActiveVarCount */
CV_WRAP virtual int getActiveVarCount() const = 0;
/** @copybrief getActiveVarCount @see getActiveVarCount */
CV_WRAP virtual void setActiveVarCount(int val) = 0;
/** The termination criteria that specifies when the training algorithm stops.
Either when the specified number of trees is trained and added to the ensemble or when
@ -1086,20 +1167,23 @@ public:
pass a certain number of trees. Also to keep in mind, the number of tree increases the
prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +
TermCriteria::EPS, 50, 0.1)*/
CV_PURE_PROPERTY_S(TermCriteria, TermCriteria)
/** @see setTermCriteria */
CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;
/** Returns the variable importance array.
The method returns the variable importance vector, computed at the training stage when
CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is
returned.
*/
virtual Mat getVarImportance() const = 0;
CV_WRAP virtual Mat getVarImportance() const = 0;
/** Creates the empty model.
Use StatModel::train to train the model, StatModel::train to create and train the model,
StatModel::load to load the pre-trained model.
*/
static Ptr<RTrees> create();
CV_WRAP static Ptr<RTrees> create();
};
/****************************************************************************************\
@ -1115,16 +1199,25 @@ class CV_EXPORTS_W Boost : public DTrees
public:
/** Type of the boosting algorithm.
See Boost::Types. Default value is Boost::REAL. */
CV_PURE_PROPERTY(int, BoostType)
/** @see setBoostType */
CV_WRAP virtual int getBoostType() const = 0;
/** @copybrief getBoostType @see getBoostType */
CV_WRAP virtual void setBoostType(int val) = 0;
/** The number of weak classifiers.
Default value is 100. */
CV_PURE_PROPERTY(int, WeakCount)
/** @see setWeakCount */
CV_WRAP virtual int getWeakCount() const = 0;
/** @copybrief getWeakCount @see getWeakCount */
CV_WRAP virtual void setWeakCount(int val) = 0;
/** A threshold between 0 and 1 used to save computational time.
Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next*
iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/
CV_PURE_PROPERTY(double, WeightTrimRate)
/** @see setWeightTrimRate */
CV_WRAP virtual double getWeightTrimRate() const = 0;
/** @copybrief getWeightTrimRate @see getWeightTrimRate */
CV_WRAP virtual void setWeightTrimRate(double val) = 0;
/** Boosting type.
Gentle AdaBoost and Real AdaBoost are often the preferable choices. */
@ -1139,7 +1232,7 @@ public:
/** Creates the empty model.
Use StatModel::train to train the model, StatModel::load\<Boost\>(filename) to load the pre-trained model. */
static Ptr<Boost> create();
CV_WRAP static Ptr<Boost> create();
};
/****************************************************************************************\
@ -1189,7 +1282,7 @@ Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.
@sa @ref ml_intro_ann
*/
class CV_EXPORTS_W ANN_MLP : public StatModel
class CV_EXPORTS ANN_MLP : public StatModel
{
public:
/** Available training methods */
@ -1232,37 +1325,61 @@ public:
You can specify the maximum number of iterations (maxCount) and/or how much the error could
change between the iterations to make the algorithm continue (epsilon). Default value is
TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/
CV_PURE_PROPERTY(TermCriteria, TermCriteria)
/** @see setTermCriteria */
virtual TermCriteria getTermCriteria() const = 0;
/** @copybrief getTermCriteria @see getTermCriteria */
virtual void setTermCriteria(TermCriteria val) = 0;
/** BPROP: Strength of the weight gradient term.
The recommended value is about 0.1. Default value is 0.1.*/
CV_PURE_PROPERTY(double, BackpropWeightScale)
/** @see setBackpropWeightScale */
virtual double getBackpropWeightScale() const = 0;
/** @copybrief getBackpropWeightScale @see getBackpropWeightScale */
virtual void setBackpropWeightScale(double val) = 0;
/** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).
This parameter provides some inertia to smooth the random fluctuations of the weights. It can
vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
Default value is 0.1.*/
CV_PURE_PROPERTY(double, BackpropMomentumScale)
/** @see setBackpropMomentumScale */
virtual double getBackpropMomentumScale() const = 0;
/** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */
virtual void setBackpropMomentumScale(double val) = 0;
/** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.
Default value is 0.1.*/
CV_PURE_PROPERTY(double, RpropDW0)
/** @see setRpropDW0 */
virtual double getRpropDW0() const = 0;
/** @copybrief getRpropDW0 @see getRpropDW0 */
virtual void setRpropDW0(double val) = 0;
/** RPROP: Increase factor \f$\eta^+\f$.
It must be \>1. Default value is 1.2.*/
CV_PURE_PROPERTY(double, RpropDWPlus)
/** @see setRpropDWPlus */
virtual double getRpropDWPlus() const = 0;
/** @copybrief getRpropDWPlus @see getRpropDWPlus */
virtual void setRpropDWPlus(double val) = 0;
/** RPROP: Decrease factor \f$\eta^-\f$.
It must be \<1. Default value is 0.5.*/
CV_PURE_PROPERTY(double, RpropDWMinus)
/** @see setRpropDWMinus */
virtual double getRpropDWMinus() const = 0;
/** @copybrief getRpropDWMinus @see getRpropDWMinus */
virtual void setRpropDWMinus(double val) = 0;
/** RPROP: Update-values lower limit \f$\Delta_{min}\f$.
It must be positive. Default value is FLT_EPSILON.*/
CV_PURE_PROPERTY(double, RpropDWMin)
/** @see setRpropDWMin */
virtual double getRpropDWMin() const = 0;
/** @copybrief getRpropDWMin @see getRpropDWMin */
virtual void setRpropDWMin(double val) = 0;
/** RPROP: Update-values upper limit \f$\Delta_{max}\f$.
It must be \>1. Default value is 50.*/
CV_PURE_PROPERTY(double, RpropDWMax)
/** @see setRpropDWMax */
virtual double getRpropDWMax() const = 0;
/** @copybrief getRpropDWMax @see getRpropDWMax */
virtual void setRpropDWMax(double val) = 0;
/** possible activation functions */
enum ActivationFunctions {
@ -1313,29 +1430,47 @@ public:
@sa @ref ml_intro_lr
*/
class CV_EXPORTS LogisticRegression : public StatModel
class CV_EXPORTS_W LogisticRegression : public StatModel
{
public:
/** Learning rate. */
CV_PURE_PROPERTY(double, LearningRate)
/** @see setLearningRate */
CV_WRAP virtual double getLearningRate() const = 0;
/** @copybrief getLearningRate @see getLearningRate */
CV_WRAP virtual void setLearningRate(double val) = 0;
/** Number of iterations. */
CV_PURE_PROPERTY(int, Iterations)
/** @see setIterations */
CV_WRAP virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
CV_WRAP virtual void setIterations(int val) = 0;
/** Kind of regularization to be applied. See LogisticRegression::RegKinds. */
CV_PURE_PROPERTY(int, Regularization)
/** @see setRegularization */
CV_WRAP virtual int getRegularization() const = 0;
/** @copybrief getRegularization @see getRegularization */
CV_WRAP virtual void setRegularization(int val) = 0;
/** Kind of training method used. See LogisticRegression::Methods. */
CV_PURE_PROPERTY(int, TrainMethod)
/** @see setTrainMethod */
CV_WRAP virtual int getTrainMethod() const = 0;
/** @copybrief getTrainMethod @see getTrainMethod */
CV_WRAP virtual void setTrainMethod(int val) = 0;
/** Specifies the number of training samples taken in each step of Mini-Batch Gradient
Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It
has to take values less than the total number of training samples. */
CV_PURE_PROPERTY(int, MiniBatchSize)
/** @see setMiniBatchSize */
CV_WRAP virtual int getMiniBatchSize() const = 0;
/** @copybrief getMiniBatchSize @see getMiniBatchSize */
CV_WRAP virtual void setMiniBatchSize(int val) = 0;
/** Termination criteria of the algorithm. */
CV_PURE_PROPERTY(TermCriteria, TermCriteria)
/** @see setTermCriteria */
CV_WRAP virtual TermCriteria getTermCriteria() const = 0;
/** @copybrief getTermCriteria @see getTermCriteria */
CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;
//! Regularization kinds
enum RegKinds {
@ -1357,20 +1492,20 @@ public:
@param results Predicted labels as a column matrix of type CV_32S.
@param flags Not used.
*/
virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
/** @brief This function returns the trained paramters arranged across rows.
For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of
the Logistic Regression as a matrix of type CV_32F.
*/
virtual Mat get_learnt_thetas() const = 0;
CV_WRAP virtual Mat get_learnt_thetas() const = 0;
/** @brief Creates empty model.
Creates Logistic Regression model with parameters given.
*/
static Ptr<LogisticRegression> create();
CV_WRAP static Ptr<LogisticRegression> create();
};
/****************************************************************************************\

View File

@ -1669,13 +1669,13 @@ public:
Mat samples = data->getTrainSamples();
Mat responses;
bool is_classification = false;
Mat class_labels0 = class_labels;
int class_count = (int)class_labels.total();
if( svmType == C_SVC || svmType == NU_SVC )
{
responses = data->getTrainNormCatResponses();
class_labels = data->getClassLabels();
class_count = (int)class_labels.total();
is_classification = true;
vector<int> temp_class_labels;
@ -1755,8 +1755,9 @@ public:
Mat temp_train_responses(train_sample_count, 1, rtype);
Mat temp_test_responses;
// If grid.minVal == grid.maxVal, this will allow one and only one pass through the loop with params.var = grid.minVal.
#define FOR_IN_GRID(var, grid) \
for( params.var = grid.minVal; params.var == grid.minVal || params.var < grid.maxVal; params.var *= grid.logStep )
for( params.var = grid.minVal; params.var == grid.minVal || params.var < grid.maxVal; params.var = (grid.minVal == grid.maxVal) ? grid.maxVal + 1 : params.var * grid.logStep )
FOR_IN_GRID(C, C_grid)
FOR_IN_GRID(gamma, gamma_grid)
@ -1814,7 +1815,6 @@ public:
}
params = best_params;
class_labels = class_labels0;
return do_train( samples, responses );
}

View File

@ -286,7 +286,14 @@ int DTreesImpl::addTree(const vector<int>& sidx )
int ssize = getSubsetSize(split.varIdx);
split.subsetOfs = (int)subsets.size();
subsets.resize(split.subsetOfs + ssize);
memcpy(&subsets[split.subsetOfs], &w->wsubsets[wsplit.subsetOfs], ssize*sizeof(int));
// This check verifies that subsets index is in the correct range
// as in case ssize == 0 no real resize performed.
// Thus memory kept safe.
// Also this skips useless memcpy call when size parameter is zero
if(ssize > 0)
{
memcpy(&subsets[split.subsetOfs], &w->wsubsets[wsplit.subsetOfs], ssize*sizeof(int));
}
}
node.split = (int)splits.size();
splits.push_back(split);

View File

@ -0,0 +1,89 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
using namespace cv;
using namespace std;
using cv::ml::SVM;
using cv::ml::TrainData;
//--------------------------------------------------------------------------------------------
class CV_SVMTrainAutoTest : public cvtest::BaseTest {
public:
CV_SVMTrainAutoTest() {}
protected:
virtual void run( int start_from );
};
void CV_SVMTrainAutoTest::run( int /*start_from*/ )
{
int datasize = 100;
cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 );
cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S );
RNG rng(0);
for (int i = 0; i < datasize; ++i)
{
int response = rng.uniform(0, 2); // Random from {0, 1}.
samples.at<float>( i, 0 ) = rng.uniform(0.f, 0.5f) + response * 0.5f;
samples.at<float>( i, 1 ) = rng.uniform(0.f, 0.5f) + response * 0.5f;
responses.at<int>( i, 0 ) = response;
}
cv::Ptr<TrainData> data = TrainData::create( samples, cv::ml::ROW_SAMPLE, responses );
cv::Ptr<SVM> svm = SVM::create();
svm->trainAuto( data, 10 ); // 2-fold cross validation.
float test_data0[2] = {0.25f, 0.25f};
cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 );
float result0 = svm->predict( test_point0 );
float test_data1[2] = {0.75f, 0.75f};
cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 );
float result1 = svm->predict( test_point1 );
if ( fabs( result0 - 0 ) > 0.001 || fabs( result1 - 1 ) > 0.001 )
{
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
}
}
TEST(ML_SVM, trainauto) { CV_SVMTrainAutoTest test; test.safe_run(); }

View File

@ -555,6 +555,7 @@ HaarEvaluator::HaarEvaluator()
localSize = Size(4, 2);
lbufSize = Size(0, 0);
nchannels = 0;
tofs = 0;
}
HaarEvaluator::~HaarEvaluator()
@ -617,8 +618,7 @@ Ptr<FeatureEvaluator> HaarEvaluator::clone() const
void HaarEvaluator::computeChannels(int scaleIdx, InputArray img)
{
const ScaleData& s = scaleData->at(scaleIdx);
tofs = (int)sbufSize.area();
sqofs = hasTiltedFeatures ? tofs*2 : tofs;
sqofs = hasTiltedFeatures ? sbufSize.area() * 2 : sbufSize.area();
if (img.isUMat())
{
@ -659,6 +659,9 @@ void HaarEvaluator::computeChannels(int scaleIdx, InputArray img)
void HaarEvaluator::computeOptFeatures()
{
if (hasTiltedFeatures)
tofs = sbufSize.area();
int sstep = sbufSize.width;
CV_SUM_OFS( nofs[0], nofs[1], nofs[2], nofs[3], 0, normrect, sstep );
@ -676,6 +679,10 @@ void HaarEvaluator::computeOptFeatures()
copyVectorToUMat(*optfeatures_lbuf, ufbuf);
}
bool HaarEvaluator::setImage(InputArray _image, const std::vector<float>& _scales){
tofs = 0;
return FeatureEvaluator::setImage(_image, _scales);
}
bool HaarEvaluator::setWindow( Point pt, int scaleIdx )
{
@ -1268,7 +1275,7 @@ void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::
scales.push_back((float)factor);
}
if( !featureEvaluator->setImage(gray, scales) )
if( scales.size() == 0 || !featureEvaluator->setImage(gray, scales) )
return;
// OpenCL code

View File

@ -347,6 +347,7 @@ public:
virtual Ptr<FeatureEvaluator> clone() const;
virtual int getFeatureType() const { return FeatureEvaluator::HAAR; }
virtual bool setImage(InputArray _image, const std::vector<float>& _scales);
virtual bool setWindow(Point p, int scaleIdx);
Rect getNormRect() const;
int getSquaresOffset() const;

View File

@ -180,11 +180,11 @@ void runHaarClassifier(
int4 ofs = f->ofs[0];
sval = (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.x;
ofs = f->ofs[1];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.y;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.y, sval);
if( weight.z > 0 )
{
ofs = f->ofs[2];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.z;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.z, sval);
}
s += (sval < st.y*nf) ? st.z : st.w;
@ -204,11 +204,11 @@ void runHaarClassifier(
sval = (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.x;
ofs = f->ofs[1];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.y;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.y, sval);
if( weight.z > 0 )
{
ofs = f->ofs[2];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.z;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.z, sval);
}
idx = (sval < as_float(n.y)*nf) ? n.z : n.w;
@ -281,11 +281,12 @@ void runHaarClassifier(
int4 ofs = f->ofs[0];
float sval = (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.x;
ofs = f->ofs[1];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.y;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.y, sval);
//if( weight.z > 0 )
if( fabs(weight.z) > 0 )
{
ofs = f->ofs[2];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.z;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.z, sval);
}
partsum += (sval < st.y*nf) ? st.z : st.w;
@ -303,11 +304,11 @@ void runHaarClassifier(
float sval = (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.x;
ofs = f->ofs[1];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.y;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.y, sval);
if( weight.z > 0 )
{
ofs = f->ofs[2];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.z;
sval = mad((psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w]), weight.z, sval);
}
idx = (sval < as_float(n.y)*nf) ? n.z : n.w;

View File

@ -119,7 +119,7 @@ CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
optimizations. Noise expected to be a gaussian white noise
@param src Input 8-bit 1-channel, 2-channel or 3-channel image.
@param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
@param dst Output image with the same size and type as src .
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
Should be odd. Recommended value 7 pixels
@ -138,6 +138,35 @@ parameter.
CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Perform image denoising using Non-local Means Denoising algorithm
<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
optimizations. Noise expected to be a gaussian white noise
@param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
2-channel, 3-channel or 4-channel image.
@param dst Output image with the same size and type as src .
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
Should be odd. Recommended value 7 pixels
@param searchWindowSize Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
denoising time. Recommended value 21 pixels
@param h Array of parameters regulating filter strength, either one
parameter applied to all channels or one per channel in dst. Big h value
perfectly removes noise but also removes image details, smaller h
value preserves details but also preserves some noise
@param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
This function expected to be applied to grayscale images. For colored images look at
fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
image to CIELAB colorspace and then separately denoise L and AB components with different h
parameter.
*/
CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst,
const std::vector<float>& h,
int templateWindowSize = 7, int searchWindowSize = 21,
int normType = NORM_L2);
/** @brief Modification of fastNlMeansDenoising function for colored images
@param src Input 8-bit 3-channel image.
@ -165,7 +194,35 @@ captured in small period of time. For example video. This version of the functio
images or for manual manipulation with colorspaces. For more details see
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
@param srcImgs Input 8-bit 1-channel, 2-channel or 3-channel images sequence. All images should
@param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
4-channel images sequence. All images should have the same type and
size.
@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
@param temporalWindowSize Number of surrounding images to use for target image denoising. Should
be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
srcImgs[imgToDenoiseIndex] image.
@param dst Output image with the same size and type as srcImgs images.
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
Should be odd. Recommended value 7 pixels
@param searchWindowSize Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
denoising time. Recommended value 21 pixels
@param h Parameter regulating filter strength. Bigger h value
perfectly removes noise but also removes image details, smaller h
value preserves details but also preserves some noise
*/
CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
int imgToDenoiseIndex, int temporalWindowSize,
float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been
captured in small period of time. For example video. This version of the function is for grayscale
images or for manual manipulation with colorspaces. For more details see
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
@param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
2-channel, 3-channel or 4-channel images sequence. All images should
have the same type and size.
@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
@param temporalWindowSize Number of surrounding images to use for target image denoising. Should
@ -178,13 +235,17 @@ Should be odd. Recommended value 7 pixels
@param searchWindowSize Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
denoising time. Recommended value 21 pixels
@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
removes noise but also removes image details, smaller h value preserves details but also preserves
some noise
@param h Array of parameters regulating filter strength, either one
parameter applied to all channels or one per channel in dst. Big h value
perfectly removes noise but also removes image details, smaller h
value preserves details but also preserves some noise
@param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
*/
CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
int imgToDenoiseIndex, int temporalWindowSize,
float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
int imgToDenoiseIndex, int temporalWindowSize,
const std::vector<float>& h,
int templateWindowSize = 7, int searchWindowSize = 21,
int normType = NORM_L2);
/** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences

View File

@ -45,42 +45,115 @@
#include "fast_nlmeans_multi_denoising_invoker.hpp"
#include "fast_nlmeans_denoising_opencl.hpp"
template<typename ST, typename IT, typename UIT, typename D>
static void fastNlMeansDenoising_( const Mat& src, Mat& dst, const std::vector<float>& h,
int templateWindowSize, int searchWindowSize)
{
int hn = (int)h.size();
switch (CV_MAT_CN(src.type())) {
case 1:
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<ST, IT, UIT, D, int>(
src, dst, templateWindowSize, searchWindowSize, &h[0]));
break;
case 2:
if (hn == 1)
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<Vec<ST, 2>, IT, UIT, D, int>(
src, dst, templateWindowSize, searchWindowSize, &h[0]));
else
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<Vec<ST, 2>, IT, UIT, D, Vec2i>(
src, dst, templateWindowSize, searchWindowSize, &h[0]));
break;
case 3:
if (hn == 1)
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<Vec<ST, 3>, IT, UIT, D, int>(
src, dst, templateWindowSize, searchWindowSize, &h[0]));
else
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<Vec<ST, 3>, IT, UIT, D, Vec3i>(
src, dst, templateWindowSize, searchWindowSize, &h[0]));
break;
case 4:
if (hn == 1)
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<Vec<ST, 4>, IT, UIT, D, int>(
src, dst, templateWindowSize, searchWindowSize, &h[0]));
else
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<Vec<ST, 4>, IT, UIT, D, Vec4i>(
src, dst, templateWindowSize, searchWindowSize, &h[0]));
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported number of channels! Only 1, 2, 3, and 4 are supported");
}
}
void cv::fastNlMeansDenoising( InputArray _src, OutputArray _dst, float h,
int templateWindowSize, int searchWindowSize)
{
fastNlMeansDenoising(_src, _dst, std::vector<float>(1, h),
templateWindowSize, searchWindowSize);
}
void cv::fastNlMeansDenoising( InputArray _src, OutputArray _dst, const std::vector<float>& h,
int templateWindowSize, int searchWindowSize, int normType)
{
int hn = (int)h.size(), type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
CV_Assert(hn == 1 || hn == cn);
Size src_size = _src.size();
CV_OCL_RUN(_src.dims() <= 2 && (_src.isUMat() || _dst.isUMat()) &&
src_size.width > 5 && src_size.height > 5, // low accuracy on small sizes
ocl_fastNlMeansDenoising(_src, _dst, h, templateWindowSize, searchWindowSize))
ocl_fastNlMeansDenoising(_src, _dst, &h[0], hn,
templateWindowSize, searchWindowSize, normType))
Mat src = _src.getMat();
_dst.create(src_size, src.type());
Mat dst = _dst.getMat();
switch (normType) {
case NORM_L2:
#ifdef HAVE_TEGRA_OPTIMIZATION
if(tegra::useTegra() && tegra::fastNlMeansDenoising(src, dst, h, templateWindowSize, searchWindowSize))
return;
if(hn == 1 && tegra::useTegra() &&
tegra::fastNlMeansDenoising(src, dst, h[0], templateWindowSize, searchWindowSize))
return;
#endif
switch (src.type()) {
case CV_8U:
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<uchar>(
src, dst, templateWindowSize, searchWindowSize, h));
switch (depth) {
case CV_8U:
fastNlMeansDenoising_<uchar, int, unsigned, DistSquared>(src, dst, h,
templateWindowSize,
searchWindowSize);
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported depth! Only CV_8U is supported for NORM_L2");
}
break;
case CV_8UC2:
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<cv::Vec2b>(
src, dst, templateWindowSize, searchWindowSize, h));
break;
case CV_8UC3:
parallel_for_(cv::Range(0, src.rows),
FastNlMeansDenoisingInvoker<cv::Vec3b>(
src, dst, templateWindowSize, searchWindowSize, h));
case NORM_L1:
switch (depth) {
case CV_8U:
fastNlMeansDenoising_<uchar, int, unsigned, DistAbs>(src, dst, h,
templateWindowSize,
searchWindowSize);
break;
case CV_16U:
fastNlMeansDenoising_<ushort, int64, uint64, DistAbs>(src, dst, h,
templateWindowSize,
searchWindowSize);
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported depth! Only CV_8U and CV_16U are supported for NORM_L1");
}
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported image format! Only CV_8UC1, CV_8UC2 and CV_8UC3 are supported");
"Unsupported norm type! Only NORM_L2 and NORM_L1 are supported");
}
}
@ -92,7 +165,7 @@ void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
Size src_size = _src.size();
if (type != CV_8UC3 && type != CV_8UC4)
{
CV_Error(Error::StsBadArg, "Type of input image should be CV_8UC3!");
CV_Error(Error::StsBadArg, "Type of input image should be CV_8UC3 or CV_8UC4!");
return;
}
@ -108,8 +181,8 @@ void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
Mat src_lab;
cvtColor(src, src_lab, COLOR_LBGR2Lab);
Mat l(src_size, CV_8U);
Mat ab(src_size, CV_8UC2);
Mat l(src_size, CV_MAKE_TYPE(depth, 1));
Mat ab(src_size, CV_MAKE_TYPE(depth, 2));
Mat l_ab[] = { l, ab };
int from_to[] = { 0,0, 1,1, 2,2 };
mixChannels(&src_lab, 1, l_ab, 2, from_to, 3);
@ -157,9 +230,76 @@ static void fastNlMeansDenoisingMultiCheckPreconditions(
}
}
template<typename ST, typename IT, typename UIT, typename D>
static void fastNlMeansDenoisingMulti_( const std::vector<Mat>& srcImgs, Mat& dst,
int imgToDenoiseIndex, int temporalWindowSize,
const std::vector<float>& h,
int templateWindowSize, int searchWindowSize)
{
int hn = (int)h.size();
switch (srcImgs[0].type())
{
case CV_8U:
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<uchar, IT, UIT, D, int>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, &h[0]));
break;
case CV_8UC2:
if (hn == 1)
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<Vec<ST, 2>, IT, UIT, D, int>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, &h[0]));
else
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<Vec<ST, 2>, IT, UIT, D, Vec2i>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, &h[0]));
break;
case CV_8UC3:
if (hn == 1)
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<Vec<ST, 3>, IT, UIT, D, int>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, &h[0]));
else
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<Vec<ST, 3>, IT, UIT, D, Vec3i>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, &h[0]));
break;
case CV_8UC4:
if (hn == 1)
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<Vec<ST, 4>, IT, UIT, D, int>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, &h[0]));
else
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<Vec<ST, 4>, IT, UIT, D, Vec4i>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, &h[0]));
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported image format! Only CV_8U, CV_8UC2, CV_8UC3 and CV_8UC4 are supported");
}
}
void cv::fastNlMeansDenoisingMulti( InputArrayOfArrays _srcImgs, OutputArray _dst,
int imgToDenoiseIndex, int temporalWindowSize,
float h, int templateWindowSize, int searchWindowSize)
{
fastNlMeansDenoisingMulti(_srcImgs, _dst, imgToDenoiseIndex, temporalWindowSize,
std::vector<float>(1, h), templateWindowSize, searchWindowSize);
}
void cv::fastNlMeansDenoisingMulti( InputArrayOfArrays _srcImgs, OutputArray _dst,
int imgToDenoiseIndex, int temporalWindowSize,
const std::vector<float>& h,
int templateWindowSize, int searchWindowSize, int normType)
{
std::vector<Mat> srcImgs;
_srcImgs.getMatVector(srcImgs);
@ -168,32 +308,52 @@ void cv::fastNlMeansDenoisingMulti( InputArrayOfArrays _srcImgs, OutputArray _ds
srcImgs, imgToDenoiseIndex,
temporalWindowSize, templateWindowSize, searchWindowSize);
int hn = (int)h.size();
int type = srcImgs[0].type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
CV_Assert(hn == 1 || hn == cn);
_dst.create(srcImgs[0].size(), srcImgs[0].type());
Mat dst = _dst.getMat();
switch (srcImgs[0].type())
{
case CV_8U:
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<uchar>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, h));
switch (normType) {
case NORM_L2:
switch (depth) {
case CV_8U:
fastNlMeansDenoisingMulti_<uchar, int, unsigned,
DistSquared>(srcImgs, dst,
imgToDenoiseIndex, temporalWindowSize,
h,
templateWindowSize, searchWindowSize);
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported depth! Only CV_8U is supported for NORM_L2");
}
break;
case CV_8UC2:
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<cv::Vec2b>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, h));
break;
case CV_8UC3:
parallel_for_(cv::Range(0, srcImgs[0].rows),
FastNlMeansMultiDenoisingInvoker<cv::Vec3b>(
srcImgs, imgToDenoiseIndex, temporalWindowSize,
dst, templateWindowSize, searchWindowSize, h));
case NORM_L1:
switch (depth) {
case CV_8U:
fastNlMeansDenoisingMulti_<uchar, int, unsigned,
DistAbs>(srcImgs, dst,
imgToDenoiseIndex, temporalWindowSize,
h,
templateWindowSize, searchWindowSize);
break;
case CV_16U:
fastNlMeansDenoisingMulti_<ushort, int64, uint64,
DistAbs>(srcImgs, dst,
imgToDenoiseIndex, temporalWindowSize,
h,
templateWindowSize, searchWindowSize);
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported depth! Only CV_8U and CV_16U are supported for NORM_L1");
}
break;
default:
CV_Error(Error::StsBadArg,
"Unsupported matrix format! Only uchar, Vec2b, Vec3b are supported");
"Unsupported norm type! Only NORM_L2 and NORM_L1 are supported");
}
}
@ -212,9 +372,10 @@ void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputAr
_dst.create(srcImgs[0].size(), srcImgs[0].type());
Mat dst = _dst.getMat();
int type = srcImgs[0].type(), depth = CV_MAT_DEPTH(type);
int src_imgs_size = static_cast<int>(srcImgs.size());
if (srcImgs[0].type() != CV_8UC3)
if (type != CV_8UC3)
{
CV_Error(Error::StsBadArg, "Type of input images should be CV_8UC3!");
return;
@ -228,9 +389,9 @@ void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputAr
std::vector<Mat> ab(src_imgs_size);
for (int i = 0; i < src_imgs_size; i++)
{
src_lab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC3);
l[i] = Mat::zeros(srcImgs[0].size(), CV_8UC1);
ab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC2);
src_lab[i] = Mat::zeros(srcImgs[0].size(), type);
l[i] = Mat::zeros(srcImgs[0].size(), CV_MAKE_TYPE(depth, 1));
ab[i] = Mat::zeros(srcImgs[0].size(), CV_MAKE_TYPE(depth, 2));
cvtColor(srcImgs[i], src_lab[i], COLOR_LBGR2Lab);
Mat l_ab[] = { l[i], ab[i] };

View File

@ -50,13 +50,13 @@
using namespace cv;
template <typename T>
template <typename T, typename IT, typename UIT, typename D, typename WT>
struct FastNlMeansDenoisingInvoker :
public ParallelLoopBody
{
public:
FastNlMeansDenoisingInvoker(const Mat& src, Mat& dst,
int template_window_size, int search_window_size, const float h);
int template_window_size, int search_window_size, const float *h);
void operator() (const Range& range) const;
@ -75,9 +75,9 @@ private:
int template_window_half_size_;
int search_window_half_size_;
int fixed_point_mult_;
typename pixelInfo<WT>::sampleType fixed_point_mult_;
int almost_template_window_size_sq_bin_shift_;
std::vector<int> almost_dist2weight_;
std::vector<WT> almost_dist2weight_;
void calcDistSumsForFirstElementInRow(
int i, Array2d<int>& dist_sums,
@ -99,15 +99,15 @@ inline int getNearestPowerOf2(int value)
return p;
}
template <class T>
FastNlMeansDenoisingInvoker<T>::FastNlMeansDenoisingInvoker(
template <typename T, typename IT, typename UIT, typename D, typename WT>
FastNlMeansDenoisingInvoker<T, IT, UIT, D, WT>::FastNlMeansDenoisingInvoker(
const Mat& src, Mat& dst,
int template_window_size,
int search_window_size,
const float h) :
const float *h) :
src_(src), dst_(dst)
{
CV_Assert(src.channels() == sizeof(T)); //T is Vec1b or Vec2b or Vec3b
CV_Assert(src.channels() == pixelInfo<T>::channels);
template_window_half_size_ = template_window_size / 2;
search_window_half_size_ = search_window_size / 2;
@ -117,8 +117,10 @@ FastNlMeansDenoisingInvoker<T>::FastNlMeansDenoisingInvoker(
border_size_ = search_window_half_size_ + template_window_half_size_;
copyMakeBorder(src_, extended_src_, border_size_, border_size_, border_size_, border_size_, BORDER_DEFAULT);
const int max_estimate_sum_value = search_window_size_ * search_window_size_ * 255;
fixed_point_mult_ = std::numeric_limits<int>::max() / max_estimate_sum_value;
const IT max_estimate_sum_value =
(IT)search_window_size_ * (IT)search_window_size_ * (IT)pixelInfo<T>::sampleMax();
fixed_point_mult_ = (int)std::min<IT>(std::numeric_limits<IT>::max() / max_estimate_sum_value,
pixelInfo<WT>::sampleMax());
// precalc weight for every possible l2 dist between blocks
// additional optimization of precalced weights to replace division(averaging) by binary shift
@ -127,30 +129,24 @@ FastNlMeansDenoisingInvoker<T>::FastNlMeansDenoisingInvoker(
almost_template_window_size_sq_bin_shift_ = getNearestPowerOf2(template_window_size_sq);
double almost_dist2actual_dist_multiplier = ((double)(1 << almost_template_window_size_sq_bin_shift_)) / template_window_size_sq;
int max_dist = 255 * 255 * sizeof(T);
int max_dist = D::template maxDist<T>();
int almost_max_dist = (int)(max_dist / almost_dist2actual_dist_multiplier + 1);
almost_dist2weight_.resize(almost_max_dist);
const double WEIGHT_THRESHOLD = 0.001;
for (int almost_dist = 0; almost_dist < almost_max_dist; almost_dist++)
{
double dist = almost_dist * almost_dist2actual_dist_multiplier;
int weight = cvRound(fixed_point_mult_ * std::exp(-dist / (h * h * sizeof(T))));
if (weight < WEIGHT_THRESHOLD * fixed_point_mult_)
weight = 0;
almost_dist2weight_[almost_dist] = weight;
almost_dist2weight_[almost_dist] =
D::template calcWeight<T, WT>(dist, h, fixed_point_mult_);
}
CV_Assert(almost_dist2weight_[0] == fixed_point_mult_);
// additional optimization init end
if (dst_.empty())
dst_ = Mat::zeros(src_.size(), src_.type());
}
template <class T>
void FastNlMeansDenoisingInvoker<T>::operator() (const Range& range) const
template <typename T, typename IT, typename UIT, typename D, typename WT>
void FastNlMeansDenoisingInvoker<T, IT, UIT, D, WT>::operator() (const Range& range) const
{
int row_from = range.start;
int row_to = range.end - 1;
@ -215,7 +211,7 @@ void FastNlMeansDenoisingInvoker<T>::operator() (const Range& range) const
dist_sums_row[x] -= col_dist_sums_row[x];
int bx = start_bx + x;
col_dist_sums_row[x] = up_col_dist_sums_row[x] + calcUpDownDist(a_up, a_down, b_up_ptr[bx], b_down_ptr[bx]);
col_dist_sums_row[x] = up_col_dist_sums_row[x] + D::template calcUpDownDist<T>(a_up, a_down, b_up_ptr[bx], b_down_ptr[bx]);
dist_sums_row[x] += col_dist_sums_row[x];
up_col_dist_sums_row[x] = col_dist_sums_row[x];
@ -227,9 +223,11 @@ void FastNlMeansDenoisingInvoker<T>::operator() (const Range& range) const
}
// calc weights
int estimation[3], weights_sum = 0;
for (size_t channel_num = 0; channel_num < sizeof(T); channel_num++)
IT estimation[pixelInfo<T>::channels], weights_sum[pixelInfo<WT>::channels];
for (int channel_num = 0; channel_num < pixelInfo<T>::channels; channel_num++)
estimation[channel_num] = 0;
for (int channel_num = 0; channel_num < pixelInfo<WT>::channels; channel_num++)
weights_sum[channel_num] = 0;
for (int y = 0; y < search_window_size_; y++)
{
@ -238,24 +236,21 @@ void FastNlMeansDenoisingInvoker<T>::operator() (const Range& range) const
for (int x = 0; x < search_window_size_; x++)
{
int almostAvgDist = dist_sums_row[x] >> almost_template_window_size_sq_bin_shift_;
int weight = almost_dist2weight_[almostAvgDist];
weights_sum += weight;
WT weight = almost_dist2weight_[almostAvgDist];
T p = cur_row_ptr[border_size_ + search_window_x + x];
incWithWeight(estimation, weight, p);
incWithWeight<T, IT, WT>(estimation, weights_sum, weight, p);
}
}
for (size_t channel_num = 0; channel_num < sizeof(T); channel_num++)
estimation[channel_num] = ((unsigned)estimation[channel_num] + weights_sum/2) / weights_sum;
dst_.at<T>(i,j) = saturateCastFromArray<T>(estimation);
divByWeightsSum<IT, UIT, pixelInfo<T>::channels, pixelInfo<WT>::channels>(estimation,
weights_sum);
dst_.at<T>(i,j) = saturateCastFromArray<T, IT>(estimation);
}
}
}
template <class T>
inline void FastNlMeansDenoisingInvoker<T>::calcDistSumsForFirstElementInRow(
template <typename T, typename IT, typename UIT, typename D, typename WT>
inline void FastNlMeansDenoisingInvoker<T, IT, UIT, D, WT>::calcDistSumsForFirstElementInRow(
int i,
Array2d<int>& dist_sums,
Array3d<int>& col_dist_sums,
@ -276,7 +271,7 @@ inline void FastNlMeansDenoisingInvoker<T>::calcDistSumsForFirstElementInRow(
for (int ty = -template_window_half_size_; ty <= template_window_half_size_; ty++)
for (int tx = -template_window_half_size_; tx <= template_window_half_size_; tx++)
{
int dist = calcDist<T>(extended_src_,
int dist = D::template calcDist<T>(extended_src_,
border_size_ + i + ty, border_size_ + j + tx,
border_size_ + start_y + ty, border_size_ + start_x + tx);
@ -288,8 +283,8 @@ inline void FastNlMeansDenoisingInvoker<T>::calcDistSumsForFirstElementInRow(
}
}
template <class T>
inline void FastNlMeansDenoisingInvoker<T>::calcDistSumsForElementInFirstRow(
template <typename T, typename IT, typename UIT, typename D, typename WT>
inline void FastNlMeansDenoisingInvoker<T, IT, UIT, D, WT>::calcDistSumsForElementInFirstRow(
int i, int j, int first_col_num,
Array2d<int>& dist_sums,
Array3d<int>& col_dist_sums,
@ -312,7 +307,7 @@ inline void FastNlMeansDenoisingInvoker<T>::calcDistSumsForElementInFirstRow(
int by = start_by + y;
int bx = start_bx + x;
for (int ty = -template_window_half_size_; ty <= template_window_half_size_; ty++)
col_dist_sums[new_last_col_num][y][x] += calcDist<T>(extended_src_, ay + ty, ax, by + ty, bx);
col_dist_sums[new_last_col_num][y][x] += D::template calcDist<T>(extended_src_, ay + ty, ax, by + ty, bx);
dist_sums[y][x] += col_dist_sums[new_last_col_num][y][x];
up_col_dist_sums[j][y][x] = col_dist_sums[new_last_col_num][y][x];

View File

@ -44,118 +44,446 @@
using namespace cv;
template <typename T> static inline int calcDist(const T a, const T b);
// std::isnan is a part of C++11 and it is not supported in MSVS2010/2012
#if defined _MSC_VER && _MSC_VER < 1800 /* MSVC 2013 */
#include <float.h>
namespace std {
template <typename T> bool isnan(T value) { return _isnan(value) != 0; }
}
#endif
template <> inline int calcDist(const uchar a, const uchar b)
template <typename T> struct pixelInfo_
{
return (a-b) * (a-b);
static const int channels = 1;
typedef T sampleType;
};
template <typename ET, int n> struct pixelInfo_<Vec<ET, n> >
{
static const int channels = n;
typedef ET sampleType;
};
template <typename T> struct pixelInfo: public pixelInfo_<T>
{
typedef typename pixelInfo_<T>::sampleType sampleType;
static inline sampleType sampleMax()
{
return std::numeric_limits<sampleType>::max();
}
static inline sampleType sampleMin()
{
return std::numeric_limits<sampleType>::min();
}
static inline size_t sampleBytes()
{
return sizeof(sampleType);
}
static inline size_t sampleBits()
{
return 8*sampleBytes();
}
};
class DistAbs
{
template <typename T> struct calcDist_
{
static inline int f(const T a, const T b)
{
return std::abs((int)(a-b));
}
};
template <typename ET> struct calcDist_<Vec<ET, 2> >
{
static inline int f(const Vec<ET, 2> a, const Vec<ET, 2> b)
{
return std::abs((int)(a[0]-b[0])) + std::abs((int)(a[1]-b[1]));
}
};
template <typename ET> struct calcDist_<Vec<ET, 3> >
{
static inline int f(const Vec<ET, 3> a, const Vec<ET, 3> b)
{
return
std::abs((int)(a[0]-b[0])) +
std::abs((int)(a[1]-b[1])) +
std::abs((int)(a[2]-b[2]));
}
};
template <typename ET> struct calcDist_<Vec<ET, 4> >
{
static inline int f(const Vec<ET, 4> a, const Vec<ET, 4> b)
{
return
std::abs((int)(a[0]-b[0])) +
std::abs((int)(a[1]-b[1])) +
std::abs((int)(a[2]-b[2])) +
std::abs((int)(a[3]-b[3]));
}
};
template <typename T, typename WT> struct calcWeight_
{
static inline WT f(double dist, const float *h, WT fixed_point_mult)
{
double w = std::exp(-dist*dist / (h[0]*h[0] * pixelInfo<T>::channels));
if (std::isnan(w)) w = 1.0; // Handle h = 0.0
static const double WEIGHT_THRESHOLD = 0.001;
WT weight = (WT)cvRound(fixed_point_mult * w);
if (weight < WEIGHT_THRESHOLD * fixed_point_mult) weight = 0;
return weight;
}
};
template <typename T, typename ET, int n> struct calcWeight_<T, Vec<ET, n> >
{
static inline Vec<ET, n> f(double dist, const float *h, ET fixed_point_mult)
{
Vec<ET, n> res;
for (int i=0; i<n; i++)
res[i] = calcWeight<T, ET>(dist, &h[i], fixed_point_mult);
return res;
}
};
public:
template <typename T> static inline int calcDist(const T a, const T b)
{
return calcDist_<T>::f(a, b);
}
template <typename T>
static inline int calcDist(const Mat& m, int i1, int j1, int i2, int j2)
{
const T a = m.at<T>(i1, j1);
const T b = m.at<T>(i2, j2);
return calcDist<T>(a,b);
}
template <typename T>
static inline int calcUpDownDist(T a_up, T a_down, T b_up, T b_down)
{
return calcDist<T>(a_down, b_down) - calcDist<T>(a_up, b_up);
};
template <typename T, typename WT>
static inline WT calcWeight(double dist, const float *h,
typename pixelInfo<WT>::sampleType fixed_point_mult)
{
return calcWeight_<T, WT>::f(dist, h, fixed_point_mult);
}
template <typename T>
static inline int maxDist()
{
return (int)pixelInfo<T>::sampleMax() * pixelInfo<T>::channels;
}
};
class DistSquared
{
template <typename T> struct calcDist_
{
static inline int f(const T a, const T b)
{
return (int)(a-b) * (int)(a-b);
}
};
template <typename ET> struct calcDist_<Vec<ET, 2> >
{
static inline int f(const Vec<ET, 2> a, const Vec<ET, 2> b)
{
return (int)(a[0]-b[0])*(int)(a[0]-b[0]) + (int)(a[1]-b[1])*(int)(a[1]-b[1]);
}
};
template <typename ET> struct calcDist_<Vec<ET, 3> >
{
static inline int f(const Vec<ET, 3> a, const Vec<ET, 3> b)
{
return
(int)(a[0]-b[0])*(int)(a[0]-b[0]) +
(int)(a[1]-b[1])*(int)(a[1]-b[1]) +
(int)(a[2]-b[2])*(int)(a[2]-b[2]);
}
};
template <typename ET> struct calcDist_<Vec<ET, 4> >
{
static inline int f(const Vec<ET, 4> a, const Vec<ET, 4> b)
{
return
(int)(a[0]-b[0])*(int)(a[0]-b[0]) +
(int)(a[1]-b[1])*(int)(a[1]-b[1]) +
(int)(a[2]-b[2])*(int)(a[2]-b[2]) +
(int)(a[3]-b[3])*(int)(a[3]-b[3]);
}
};
template <typename T> struct calcUpDownDist_
{
static inline int f(T a_up, T a_down, T b_up, T b_down)
{
int A = a_down - b_down;
int B = a_up - b_up;
return (A-B)*(A+B);
}
};
template <typename ET, int n> struct calcUpDownDist_<Vec<ET, n> >
{
private:
typedef Vec<ET, n> T;
public:
static inline int f(T a_up, T a_down, T b_up, T b_down)
{
return calcDist<T>(a_down, b_down) - calcDist<T>(a_up, b_up);
}
};
template <typename T, typename WT> struct calcWeight_
{
static inline WT f(double dist, const float *h, WT fixed_point_mult)
{
double w = std::exp(-dist / (h[0]*h[0] * pixelInfo<T>::channels));
if (std::isnan(w)) w = 1.0; // Handle h = 0.0
static const double WEIGHT_THRESHOLD = 0.001;
WT weight = (WT)cvRound(fixed_point_mult * w);
if (weight < WEIGHT_THRESHOLD * fixed_point_mult) weight = 0;
return weight;
}
};
template <typename T, typename ET, int n> struct calcWeight_<T, Vec<ET, n> >
{
static inline Vec<ET, n> f(double dist, const float *h, ET fixed_point_mult)
{
Vec<ET, n> res;
for (int i=0; i<n; i++)
res[i] = calcWeight<T, ET>(dist, &h[i], fixed_point_mult);
return res;
}
};
public:
template <typename T> static inline int calcDist(const T a, const T b)
{
return calcDist_<T>::f(a, b);
}
template <typename T>
static inline int calcDist(const Mat& m, int i1, int j1, int i2, int j2)
{
const T a = m.at<T>(i1, j1);
const T b = m.at<T>(i2, j2);
return calcDist<T>(a,b);
}
template <typename T>
static inline int calcUpDownDist(T a_up, T a_down, T b_up, T b_down)
{
return calcUpDownDist_<T>::f(a_up, a_down, b_up, b_down);
};
template <typename T, typename WT>
static inline WT calcWeight(double dist, const float *h,
typename pixelInfo<WT>::sampleType fixed_point_mult)
{
return calcWeight_<T, WT>::f(dist, h, fixed_point_mult);
}
template <typename T>
static inline int maxDist()
{
return (int)pixelInfo<T>::sampleMax() * (int)pixelInfo<T>::sampleMax() *
pixelInfo<T>::channels;
}
};
template <typename T, typename IT, typename WT> struct incWithWeight_
{
static inline void f(IT* estimation, IT* weights_sum, WT weight, T p)
{
estimation[0] += (IT)weight * p;
weights_sum[0] += (IT)weight;
}
};
template <typename ET, typename IT, typename WT> struct incWithWeight_<Vec<ET, 2>, IT, WT>
{
static inline void f(IT* estimation, IT* weights_sum, WT weight, Vec<ET, 2> p)
{
estimation[0] += (IT)weight * p[0];
estimation[1] += (IT)weight * p[1];
weights_sum[0] += (IT)weight;
}
};
template <typename ET, typename IT, typename WT> struct incWithWeight_<Vec<ET, 3>, IT, WT>
{
static inline void f(IT* estimation, IT* weights_sum, WT weight, Vec<ET, 3> p)
{
estimation[0] += (IT)weight * p[0];
estimation[1] += (IT)weight * p[1];
estimation[2] += (IT)weight * p[2];
weights_sum[0] += (IT)weight;
}
};
template <typename ET, typename IT, typename WT> struct incWithWeight_<Vec<ET, 4>, IT, WT>
{
static inline void f(IT* estimation, IT* weights_sum, WT weight, Vec<ET, 4> p)
{
estimation[0] += (IT)weight * p[0];
estimation[1] += (IT)weight * p[1];
estimation[2] += (IT)weight * p[2];
estimation[3] += (IT)weight * p[3];
weights_sum[0] += (IT)weight;
}
};
template <typename ET, typename IT, typename EW> struct incWithWeight_<Vec<ET, 2>, IT, Vec<EW, 2> >
{
static inline void f(IT* estimation, IT* weights_sum, Vec<EW, 2> weight, Vec<ET, 2> p)
{
estimation[0] += (IT)weight[0] * p[0];
estimation[1] += (IT)weight[1] * p[1];
weights_sum[0] += (IT)weight[0];
weights_sum[1] += (IT)weight[1];
}
};
template <typename ET, typename IT, typename EW> struct incWithWeight_<Vec<ET, 3>, IT, Vec<EW, 3> >
{
static inline void f(IT* estimation, IT* weights_sum, Vec<EW, 3> weight, Vec<ET, 3> p)
{
estimation[0] += (IT)weight[0] * p[0];
estimation[1] += (IT)weight[1] * p[1];
estimation[2] += (IT)weight[2] * p[2];
weights_sum[0] += (IT)weight[0];
weights_sum[1] += (IT)weight[1];
weights_sum[2] += (IT)weight[2];
}
};
template <typename ET, typename IT, typename EW> struct incWithWeight_<Vec<ET, 4>, IT, Vec<EW, 4> >
{
static inline void f(IT* estimation, IT* weights_sum, Vec<EW, 4> weight, Vec<ET, 4> p)
{
estimation[0] += (IT)weight[0] * p[0];
estimation[1] += (IT)weight[1] * p[1];
estimation[2] += (IT)weight[2] * p[2];
estimation[3] += (IT)weight[3] * p[3];
weights_sum[0] += (IT)weight[0];
weights_sum[1] += (IT)weight[1];
weights_sum[2] += (IT)weight[2];
weights_sum[3] += (IT)weight[3];
}
};
template <typename T, typename IT, typename WT>
static inline void incWithWeight(IT* estimation, IT* weights_sum, WT weight, T p)
{
return incWithWeight_<T, IT, WT>::f(estimation, weights_sum, weight, p);
}
template <> inline int calcDist(const Vec2b a, const Vec2b b)
template <typename IT, typename UIT, int nc, int nw> struct divByWeightsSum_
{
return (a[0]-b[0])*(a[0]-b[0]) + (a[1]-b[1])*(a[1]-b[1]);
static inline void f(IT* estimation, IT* weights_sum);
};
template <typename IT, typename UIT> struct divByWeightsSum_<IT, UIT, 1, 1>
{
static inline void f(IT* estimation, IT* weights_sum)
{
estimation[0] = (static_cast<UIT>(estimation[0]) + weights_sum[0]/2) / weights_sum[0];
}
};
template <typename IT, typename UIT, int n> struct divByWeightsSum_<IT, UIT, n, 1>
{
static inline void f(IT* estimation, IT* weights_sum)
{
for (size_t i = 0; i < n; i++)
estimation[i] = (static_cast<UIT>(estimation[i]) + weights_sum[0]/2) / weights_sum[0];
}
};
template <typename IT, typename UIT, int n> struct divByWeightsSum_<IT, UIT, n, n>
{
static inline void f(IT* estimation, IT* weights_sum)
{
for (size_t i = 0; i < n; i++)
estimation[i] = (static_cast<UIT>(estimation[i]) + weights_sum[i]/2) / weights_sum[i];
}
};
template <typename IT, typename UIT, int nc, int nw>
static inline void divByWeightsSum(IT* estimation, IT* weights_sum)
{
return divByWeightsSum_<IT, UIT, nc, nw>::f(estimation, weights_sum);
}
template <> inline int calcDist(const Vec3b a, const Vec3b b)
template <typename T, typename IT> struct saturateCastFromArray_
{
return (a[0]-b[0])*(a[0]-b[0]) + (a[1]-b[1])*(a[1]-b[1]) + (a[2]-b[2])*(a[2]-b[2]);
}
static inline T f(IT* estimation)
{
return saturate_cast<T>(estimation[0]);
}
};
template <typename T> static inline int calcDist(const Mat& m, int i1, int j1, int i2, int j2)
template <typename ET, typename IT> struct saturateCastFromArray_<Vec<ET, 2>, IT>
{
const T a = m.at<T>(i1, j1);
const T b = m.at<T>(i2, j2);
return calcDist<T>(a,b);
}
static inline Vec<ET, 2> f(IT* estimation)
{
Vec<ET, 2> res;
res[0] = saturate_cast<ET>(estimation[0]);
res[1] = saturate_cast<ET>(estimation[1]);
return res;
}
};
template <typename T> static inline int calcUpDownDist(T a_up, T a_down, T b_up, T b_down)
template <typename ET, typename IT> struct saturateCastFromArray_<Vec<ET, 3>, IT>
{
return calcDist(a_down, b_down) - calcDist(a_up, b_up);
}
static inline Vec<ET, 3> f(IT* estimation)
{
Vec<ET, 3> res;
res[0] = saturate_cast<ET>(estimation[0]);
res[1] = saturate_cast<ET>(estimation[1]);
res[2] = saturate_cast<ET>(estimation[2]);
return res;
}
};
template <> inline int calcUpDownDist(uchar a_up, uchar a_down, uchar b_up, uchar b_down)
template <typename ET, typename IT> struct saturateCastFromArray_<Vec<ET, 4>, IT>
{
int A = a_down - b_down;
int B = a_up - b_up;
return (A-B)*(A+B);
}
static inline Vec<ET, 4> f(IT* estimation)
{
Vec<ET, 4> res;
res[0] = saturate_cast<ET>(estimation[0]);
res[1] = saturate_cast<ET>(estimation[1]);
res[2] = saturate_cast<ET>(estimation[2]);
res[3] = saturate_cast<ET>(estimation[3]);
return res;
}
};
template <typename T> static inline void incWithWeight(int* estimation, int weight, T p);
template <> inline void incWithWeight(int* estimation, int weight, uchar p)
template <typename T, typename IT> static inline T saturateCastFromArray(IT* estimation)
{
estimation[0] += weight * p;
}
template <> inline void incWithWeight(int* estimation, int weight, Vec2b p)
{
estimation[0] += weight * p[0];
estimation[1] += weight * p[1];
}
template <> inline void incWithWeight(int* estimation, int weight, Vec3b p)
{
estimation[0] += weight * p[0];
estimation[1] += weight * p[1];
estimation[2] += weight * p[2];
}
template <> inline void incWithWeight(int* estimation, int weight, int p)
{
estimation[0] += weight * p;
}
template <> inline void incWithWeight(int* estimation, int weight, Vec2i p)
{
estimation[0] += weight * p[0];
estimation[1] += weight * p[1];
}
template <> inline void incWithWeight(int* estimation, int weight, Vec3i p)
{
estimation[0] += weight * p[0];
estimation[1] += weight * p[1];
estimation[2] += weight * p[2];
}
template <typename T> static inline T saturateCastFromArray(int* estimation);
template <> inline uchar saturateCastFromArray(int* estimation)
{
return saturate_cast<uchar>(estimation[0]);
}
template <> inline Vec2b saturateCastFromArray(int* estimation)
{
Vec2b res;
res[0] = saturate_cast<uchar>(estimation[0]);
res[1] = saturate_cast<uchar>(estimation[1]);
return res;
}
template <> inline Vec3b saturateCastFromArray(int* estimation)
{
Vec3b res;
res[0] = saturate_cast<uchar>(estimation[0]);
res[1] = saturate_cast<uchar>(estimation[1]);
res[2] = saturate_cast<uchar>(estimation[2]);
return res;
}
template <> inline int saturateCastFromArray(int* estimation)
{
return estimation[0];
}
template <> inline Vec2i saturateCastFromArray(int* estimation)
{
estimation[1] = 0;
return Vec2i(estimation);
}
template <> inline Vec3i saturateCastFromArray(int* estimation)
{
return Vec3i(estimation);
return saturateCastFromArray_<T, IT>::f(estimation);
}
#endif

View File

@ -28,12 +28,16 @@ static int divUp(int a, int b)
return (a + b - 1) / b;
}
template <typename FT>
static bool ocl_calcAlmostDist2Weight(UMat & almostDist2Weight, int searchWindowSize, int templateWindowSize, FT h, int cn,
template <typename FT, typename ST, typename WT>
static bool ocl_calcAlmostDist2Weight(UMat & almostDist2Weight,
int searchWindowSize, int templateWindowSize,
const FT *h, int hn, int cn, int normType,
int & almostTemplateWindowSizeSqBinShift)
{
const int maxEstimateSumValue = searchWindowSize * searchWindowSize * 255;
int fixedPointMult = std::numeric_limits<int>::max() / maxEstimateSumValue;
const WT maxEstimateSumValue = searchWindowSize * searchWindowSize *
std::numeric_limits<ST>::max();
int fixedPointMult = (int)std::min<WT>(std::numeric_limits<WT>::max() / maxEstimateSumValue,
std::numeric_limits<int>::max());
int depth = DataType<FT>::depth;
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
@ -48,33 +52,44 @@ static bool ocl_calcAlmostDist2Weight(UMat & almostDist2Weight, int searchWindow
FT almostDist2ActualDistMultiplier = (FT)(1 << almostTemplateWindowSizeSqBinShift) / templateWindowSizeSq;
const FT WEIGHT_THRESHOLD = 1e-3f;
int maxDist = 255 * 255 * cn;
int maxDist = normType == NORM_L1 ? std::numeric_limits<ST>::max() * cn :
std::numeric_limits<ST>::max() * std::numeric_limits<ST>::max() * cn;
int almostMaxDist = (int)(maxDist / almostDist2ActualDistMultiplier + 1);
FT den = 1.0f / (h * h * cn);
FT den[4];
CV_Assert(hn > 0 && hn <= 4);
for (int i=0; i<hn; i++)
den[i] = 1.0f / (h[i] * h[i] * cn);
almostDist2Weight.create(1, almostMaxDist, CV_32SC1);
almostDist2Weight.create(1, almostMaxDist, CV_32SC(hn == 3 ? 4 : hn));
char buf[40];
ocl::Kernel k("calcAlmostDist2Weight", ocl::photo::nlmeans_oclsrc,
format("-D OP_CALC_WEIGHTS -D FT=%s%s", ocl::typeToStr(depth),
doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
format("-D OP_CALC_WEIGHTS -D FT=%s -D w_t=%s"
" -D wlut_t=%s -D convert_wlut_t=%s%s%s",
ocl::typeToStr(depth), ocl::typeToStr(CV_MAKE_TYPE(depth, hn)),
ocl::typeToStr(CV_32SC(hn)), ocl::convertTypeStr(depth, CV_32S, hn, buf),
doubleSupport ? " -D DOUBLE_SUPPORT" : "",
normType == NORM_L1 ? " -D ABS" : ""));
if (k.empty())
return false;
k.args(ocl::KernelArg::PtrWriteOnly(almostDist2Weight), almostMaxDist,
almostDist2ActualDistMultiplier, fixedPointMult, den, WEIGHT_THRESHOLD);
almostDist2ActualDistMultiplier, fixedPointMult,
ocl::KernelArg::Constant(den, (hn == 3 ? 4 : hn)*sizeof(FT)), WEIGHT_THRESHOLD);
size_t globalsize[1] = { almostMaxDist };
return k.run(1, globalsize, NULL, false);
}
static bool ocl_fastNlMeansDenoising(InputArray _src, OutputArray _dst, float h,
int templateWindowSize, int searchWindowSize)
static bool ocl_fastNlMeansDenoising(InputArray _src, OutputArray _dst, const float *h, int hn,
int templateWindowSize, int searchWindowSize, int normType)
{
int type = _src.type(), cn = CV_MAT_CN(type);
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
int ctaSize = ocl::Device::getDefault().isIntel() ? CTA_SIZE_INTEL : CTA_SIZE_DEFAULT;
Size size = _src.size();
if ( type != CV_8UC1 && type != CV_8UC2 && type != CV_8UC4 )
if (cn < 1 || cn > 4 || ((normType != NORM_L2 || depth != CV_8U) &&
(normType != NORM_L1 || (depth != CV_8U && depth != CV_16U))))
return false;
int templateWindowHalfWize = templateWindowSize / 2;
@ -84,33 +99,68 @@ static bool ocl_fastNlMeansDenoising(InputArray _src, OutputArray _dst, float h,
int nblocksx = divUp(size.width, BLOCK_COLS), nblocksy = divUp(size.height, BLOCK_ROWS);
int almostTemplateWindowSizeSqBinShift = -1;
char cvt[2][40];
char buf[4][40];
String opts = format("-D OP_CALC_FASTNLMEANS -D TEMPLATE_SIZE=%d -D SEARCH_SIZE=%d"
" -D uchar_t=%s -D int_t=%s -D BLOCK_COLS=%d -D BLOCK_ROWS=%d"
" -D pixel_t=%s -D int_t=%s -D wlut_t=%s"
" -D weight_t=%s -D convert_weight_t=%s -D sum_t=%s -D convert_sum_t=%s"
" -D BLOCK_COLS=%d -D BLOCK_ROWS=%d"
" -D CTA_SIZE=%d -D TEMPLATE_SIZE2=%d -D SEARCH_SIZE2=%d"
" -D convert_int_t=%s -D cn=%d -D convert_uchar_t=%s",
templateWindowSize, searchWindowSize, ocl::typeToStr(type),
ocl::typeToStr(CV_32SC(cn)), BLOCK_COLS, BLOCK_ROWS, ctaSize,
templateWindowHalfWize, searchWindowHalfSize,
ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]), cn,
ocl::convertTypeStr(CV_32S, CV_8U, cn, cvt[1]));
" -D convert_int_t=%s -D cn=%d -D psz=%d -D convert_pixel_t=%s%s",
templateWindowSize, searchWindowSize,
ocl::typeToStr(type), ocl::typeToStr(CV_32SC(cn)),
ocl::typeToStr(CV_32SC(hn)),
depth == CV_8U ? ocl::typeToStr(CV_32SC(hn)) :
format("long%s", hn > 1 ? format("%d", hn).c_str() : "").c_str(),
depth == CV_8U ? ocl::convertTypeStr(CV_32S, CV_32S, hn, buf[0]) :
format("convert_long%s", hn > 1 ? format("%d", hn).c_str() : "").c_str(),
depth == CV_8U ? ocl::typeToStr(CV_32SC(cn)) :
format("long%s", cn > 1 ? format("%d", cn).c_str() : "").c_str(),
depth == CV_8U ? ocl::convertTypeStr(depth, CV_32S, cn, buf[1]) :
format("convert_long%s", cn > 1 ? format("%d", cn).c_str() : "").c_str(),
BLOCK_COLS, BLOCK_ROWS,
ctaSize, templateWindowHalfWize, searchWindowHalfSize,
ocl::convertTypeStr(depth, CV_32S, cn, buf[2]), cn,
(depth == CV_8U ? sizeof(uchar) : sizeof(ushort)) * (cn == 3 ? 4 : cn),
ocl::convertTypeStr(CV_32S, depth, cn, buf[3]),
normType == NORM_L1 ? " -D ABS" : "");
ocl::Kernel k("fastNlMeansDenoising", ocl::photo::nlmeans_oclsrc, opts);
if (k.empty())
return false;
UMat almostDist2Weight;
if (!ocl_calcAlmostDist2Weight<float>(almostDist2Weight, searchWindowSize, templateWindowSize, h, cn,
almostTemplateWindowSizeSqBinShift))
if ((depth == CV_8U &&
!ocl_calcAlmostDist2Weight<float, uchar, int>(almostDist2Weight,
searchWindowSize, templateWindowSize,
h, hn, cn, normType,
almostTemplateWindowSizeSqBinShift)) ||
(depth == CV_16U &&
!ocl_calcAlmostDist2Weight<float, ushort, int64>(almostDist2Weight,
searchWindowSize, templateWindowSize,
h, hn, cn, normType,
almostTemplateWindowSizeSqBinShift)))
return false;
CV_Assert(almostTemplateWindowSizeSqBinShift >= 0);
UMat srcex;
int borderSize = searchWindowHalfSize + templateWindowHalfWize;
copyMakeBorder(_src, srcex, borderSize, borderSize, borderSize, borderSize, BORDER_DEFAULT);
if (cn == 3) {
srcex.create(size.height + 2*borderSize, size.width + 2*borderSize, CV_MAKE_TYPE(depth, 4));
UMat src(srcex, Rect(borderSize, borderSize, size.width, size.height));
int from_to[] = { 0,0, 1,1, 2,2 };
mixChannels(std::vector<UMat>(1, _src.getUMat()), std::vector<UMat>(1, src), from_to, 3);
copyMakeBorder(src, srcex, borderSize, borderSize, borderSize, borderSize,
BORDER_DEFAULT|BORDER_ISOLATED); // create borders in place
}
else
copyMakeBorder(_src, srcex, borderSize, borderSize, borderSize, borderSize, BORDER_DEFAULT);
_dst.create(size, type);
UMat dst = _dst.getUMat();
UMat dst;
if (cn == 3)
dst.create(size, CV_MAKE_TYPE(depth, 4));
else
dst = _dst.getUMat();
int searchWindowSizeSq = searchWindowSize * searchWindowSize;
Size upColSumSize(size.width, searchWindowSizeSq * nblocksy);
@ -123,7 +173,14 @@ static bool ocl_fastNlMeansDenoising(InputArray _src, OutputArray _dst, float h,
ocl::KernelArg::PtrReadOnly(buffer), almostTemplateWindowSizeSqBinShift);
size_t globalsize[2] = { nblocksx * ctaSize, nblocksy }, localsize[2] = { ctaSize, 1 };
return k.run(2, globalsize, localsize, false);
if (!k.run(2, globalsize, localsize, false)) return false;
if (cn == 3) {
int from_to[] = { 0,0, 1,1, 2,2 };
mixChannels(std::vector<UMat>(1, dst), std::vector<UMat>(1, _dst.getUMat()), from_to, 3);
}
return true;
}
static bool ocl_fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,

View File

@ -50,14 +50,14 @@
using namespace cv;
template <typename T>
template <typename T, typename IT, typename UIT, typename D, typename WT>
struct FastNlMeansMultiDenoisingInvoker :
ParallelLoopBody
{
public:
FastNlMeansMultiDenoisingInvoker(const std::vector<Mat>& srcImgs, int imgToDenoiseIndex,
int temporalWindowSize, Mat& dst, int template_window_size,
int search_window_size, const float h);
int search_window_size, const float *h);
void operator() (const Range& range) const;
@ -81,9 +81,9 @@ private:
int search_window_half_size_;
int temporal_window_half_size_;
int fixed_point_mult_;
typename pixelInfo<WT>::sampleType fixed_point_mult_;
int almost_template_window_size_sq_bin_shift;
std::vector<int> almost_dist2weight;
std::vector<WT> almost_dist2weight;
void calcDistSumsForFirstElementInRow(int i, Array3d<int>& dist_sums,
Array4d<int>& col_dist_sums,
@ -94,19 +94,19 @@ private:
Array4d<int>& up_col_dist_sums) const;
};
template <class T>
FastNlMeansMultiDenoisingInvoker<T>::FastNlMeansMultiDenoisingInvoker(
template <typename T, typename IT, typename UIT, typename D, typename WT>
FastNlMeansMultiDenoisingInvoker<T, IT, UIT, D, WT>::FastNlMeansMultiDenoisingInvoker(
const std::vector<Mat>& srcImgs,
int imgToDenoiseIndex,
int temporalWindowSize,
cv::Mat& dst,
int template_window_size,
int search_window_size,
const float h) :
const float *h) :
dst_(dst), extended_srcs_(srcImgs.size())
{
CV_Assert(srcImgs.size() > 0);
CV_Assert(srcImgs[0].channels() == sizeof(T));
CV_Assert(srcImgs[0].channels() == pixelInfo<T>::channels);
rows_ = srcImgs[0].rows;
cols_ = srcImgs[0].cols;
@ -125,8 +125,10 @@ FastNlMeansMultiDenoisingInvoker<T>::FastNlMeansMultiDenoisingInvoker(
border_size_, border_size_, border_size_, border_size_, cv::BORDER_DEFAULT);
main_extended_src_ = extended_srcs_[temporal_window_half_size_];
const int max_estimate_sum_value = temporal_window_size_ * search_window_size_ * search_window_size_ * 255;
fixed_point_mult_ = std::numeric_limits<int>::max() / max_estimate_sum_value;
const IT max_estimate_sum_value =
(IT)temporal_window_size_ * (IT)search_window_size_ * (IT)search_window_size_ * (IT)pixelInfo<T>::sampleMax();
fixed_point_mult_ = (int)std::min<IT>(std::numeric_limits<IT>::max() / max_estimate_sum_value,
pixelInfo<WT>::sampleMax());
// precalc weight for every possible l2 dist between blocks
// additional optimization of precalced weights to replace division(averaging) by binary shift
@ -138,30 +140,24 @@ FastNlMeansMultiDenoisingInvoker<T>::FastNlMeansMultiDenoisingInvoker(
int almost_template_window_size_sq = 1 << almost_template_window_size_sq_bin_shift;
double almost_dist2actual_dist_multiplier = (double) almost_template_window_size_sq / template_window_size_sq;
int max_dist = 255 * 255 * sizeof(T);
int almost_max_dist = (int) (max_dist / almost_dist2actual_dist_multiplier + 1);
int max_dist = D::template maxDist<T>();
int almost_max_dist = (int)(max_dist / almost_dist2actual_dist_multiplier + 1);
almost_dist2weight.resize(almost_max_dist);
const double WEIGHT_THRESHOLD = 0.001;
for (int almost_dist = 0; almost_dist < almost_max_dist; almost_dist++)
{
double dist = almost_dist * almost_dist2actual_dist_multiplier;
int weight = cvRound(fixed_point_mult_ * std::exp(-dist / (h * h * sizeof(T))));
if (weight < WEIGHT_THRESHOLD * fixed_point_mult_)
weight = 0;
almost_dist2weight[almost_dist] = weight;
almost_dist2weight[almost_dist] =
D::template calcWeight<T, WT>(dist, h, fixed_point_mult_);
}
CV_Assert(almost_dist2weight[0] == fixed_point_mult_);
// additional optimization init end
if (dst_.empty())
dst_ = Mat::zeros(srcImgs[0].size(), srcImgs[0].type());
}
template <class T>
void FastNlMeansMultiDenoisingInvoker<T>::operator() (const Range& range) const
template <typename T, typename IT, typename UIT, typename D, typename WT>
void FastNlMeansMultiDenoisingInvoker<T, IT, UIT, D, WT>::operator() (const Range& range) const
{
int row_from = range.start;
int row_to = range.end - 1;
@ -234,7 +230,7 @@ void FastNlMeansMultiDenoisingInvoker<T>::operator() (const Range& range) const
dist_sums_row[x] -= col_dist_sums_row[x];
col_dist_sums_row[x] = up_col_dist_sums_row[x] +
calcUpDownDist(a_up, a_down, b_up_ptr[start_bx + x], b_down_ptr[start_bx + x]);
D::template calcUpDownDist<T>(a_up, a_down, b_up_ptr[start_bx + x], b_down_ptr[start_bx + x]);
dist_sums_row[x] += col_dist_sums_row[x];
up_col_dist_sums_row[x] = col_dist_sums_row[x];
@ -247,11 +243,11 @@ void FastNlMeansMultiDenoisingInvoker<T>::operator() (const Range& range) const
}
// calc weights
int weights_sum = 0;
int estimation[3];
for (size_t channel_num = 0; channel_num < sizeof(T); channel_num++)
IT estimation[pixelInfo<T>::channels], weights_sum[pixelInfo<WT>::channels];
for (size_t channel_num = 0; channel_num < pixelInfo<T>::channels; channel_num++)
estimation[channel_num] = 0;
for (size_t channel_num = 0; channel_num < pixelInfo<WT>::channels; channel_num++)
weights_sum[channel_num] = 0;
for (int d = 0; d < temporal_window_size_; d++)
{
@ -266,26 +262,22 @@ void FastNlMeansMultiDenoisingInvoker<T>::operator() (const Range& range) const
{
int almostAvgDist = dist_sums_row[x] >> almost_template_window_size_sq_bin_shift;
int weight = almost_dist2weight[almostAvgDist];
weights_sum += weight;
WT weight = almost_dist2weight[almostAvgDist];
T p = cur_row_ptr[border_size_ + search_window_x + x];
incWithWeight(estimation, weight, p);
incWithWeight<T, IT, WT>(estimation, weights_sum, weight, p);
}
}
}
for (size_t channel_num = 0; channel_num < sizeof(T); channel_num++)
estimation[channel_num] = ((unsigned)estimation[channel_num] + weights_sum / 2) / weights_sum;
dst_.at<T>(i,j) = saturateCastFromArray<T>(estimation);
divByWeightsSum<IT, UIT, pixelInfo<T>::channels, pixelInfo<WT>::channels>(estimation,
weights_sum);
dst_.at<T>(i,j) = saturateCastFromArray<T, IT>(estimation);
}
}
}
template <class T>
inline void FastNlMeansMultiDenoisingInvoker<T>::calcDistSumsForFirstElementInRow(
template <typename T, typename IT, typename UIT, typename D, typename WT>
inline void FastNlMeansMultiDenoisingInvoker<T, IT, UIT, D, WT>::calcDistSumsForFirstElementInRow(
int i, Array3d<int>& dist_sums, Array4d<int>& col_dist_sums, Array4d<int>& up_col_dist_sums) const
{
int j = 0;
@ -310,7 +302,7 @@ inline void FastNlMeansMultiDenoisingInvoker<T>::calcDistSumsForFirstElementInRo
{
for (int ty = -template_window_half_size_; ty <= template_window_half_size_; ty++)
{
int dist = calcDist<T>(
int dist = D::template calcDist<T>(
main_extended_src_.at<T>(border_size_ + i + ty, border_size_ + j + tx),
cur_extended_src.at<T>(border_size_ + start_y + ty, border_size_ + start_x + tx));
@ -325,8 +317,8 @@ inline void FastNlMeansMultiDenoisingInvoker<T>::calcDistSumsForFirstElementInRo
}
}
template <class T>
inline void FastNlMeansMultiDenoisingInvoker<T>::calcDistSumsForElementInFirstRow(
template <typename T, typename IT, typename UIT, typename D, typename WT>
inline void FastNlMeansMultiDenoisingInvoker<T, IT, UIT, D, WT>::calcDistSumsForElementInFirstRow(
int i, int j, int first_col_num, Array3d<int>& dist_sums,
Array4d<int>& col_dist_sums, Array4d<int>& up_col_dist_sums) const
{
@ -353,7 +345,7 @@ inline void FastNlMeansMultiDenoisingInvoker<T>::calcDistSumsForElementInFirstRo
int* col_dist_sums_ptr = &col_dist_sums[new_last_col_num][d][y][x];
for (int ty = -template_window_half_size_; ty <= template_window_half_size_; ty++)
{
*col_dist_sums_ptr += calcDist<T>(
*col_dist_sums_ptr += D::template calcDist<T>(
main_extended_src_.at<T>(ay + ty, ax),
cur_extended_src.at<T>(by + ty, bx));
}

View File

@ -20,21 +20,23 @@
#ifdef OP_CALC_WEIGHTS
__kernel void calcAlmostDist2Weight(__global int * almostDist2Weight, int almostMaxDist,
__kernel void calcAlmostDist2Weight(__global wlut_t * almostDist2Weight, int almostMaxDist,
FT almostDist2ActualDistMultiplier, int fixedPointMult,
FT den, FT WEIGHT_THRESHOLD)
w_t den, FT WEIGHT_THRESHOLD)
{
int almostDist = get_global_id(0);
if (almostDist < almostMaxDist)
{
FT dist = almostDist * almostDist2ActualDistMultiplier;
int weight = convert_int_sat_rte(fixedPointMult * exp(-dist * den));
if (weight < WEIGHT_THRESHOLD * fixedPointMult)
weight = 0;
almostDist2Weight[almostDist] = weight;
#ifdef ABS
w_t w = exp((w_t)(-dist*dist) * den);
#else
w_t w = exp((w_t)(-dist) * den);
#endif
wlut_t weight = convert_wlut_t(fixedPointMult * (isnan(w) ? (w_t)1.0 : w));
almostDist2Weight[almostDist] =
weight < (wlut_t)(WEIGHT_THRESHOLD * fixedPointMult) ? (wlut_t)0 : weight;
}
}
@ -44,21 +46,35 @@ __kernel void calcAlmostDist2Weight(__global int * almostDist2Weight, int almost
#define SEARCH_SIZE_SQ (SEARCH_SIZE * SEARCH_SIZE)
inline int calcDist(uchar_t a, uchar_t b)
inline int calcDist(pixel_t a, pixel_t b)
{
#ifdef ABS
int_t retval = convert_int_t(abs_diff(a, b));
#else
int_t diff = convert_int_t(a) - convert_int_t(b);
int_t retval = diff * diff;
#endif
#if cn == 1
return retval;
#elif cn == 2
return retval.x + retval.y;
#elif cn == 3
return retval.x + retval.y + retval.z;
#elif cn == 4
return retval.x + retval.y + retval.z + retval.w;
#else
#error "cn should be either 1 or 2"
#error "cn should be either 1, 2, 3 or 4"
#endif
}
inline int calcDistUpDown(uchar_t down_value, uchar_t down_value_t, uchar_t up_value, uchar_t up_value_t)
#ifdef ABS
inline int calcDistUpDown(pixel_t down_value, pixel_t down_value_t, pixel_t up_value, pixel_t up_value_t)
{
return calcDist(down_value, down_value_t) - calcDist(up_value, up_value_t);
}
#else
inline int calcDistUpDown(pixel_t down_value, pixel_t down_value_t, pixel_t up_value, pixel_t up_value_t)
{
int_t A = convert_int_t(down_value) - convert_int_t(down_value_t);
int_t B = convert_int_t(up_value) - convert_int_t(up_value_t);
@ -68,10 +84,15 @@ inline int calcDistUpDown(uchar_t down_value, uchar_t down_value_t, uchar_t up_v
return retval;
#elif cn == 2
return retval.x + retval.y;
#elif cn == 3
return retval.x + retval.y + retval.z;
#elif cn == 4
return retval.x + retval.y + retval.z + retval.w;
#else
#error "cn should be either 1 or 2"
#error "cn should be either 1, 2, 3 or 4"
#endif
}
#endif
#define COND if (x == 0 && y == 0)
@ -87,9 +108,9 @@ inline void calcFirstElementInRow(__global const uchar * src, int src_step, int
{
int dist = 0, value;
__global const uchar_t * src_template = (__global const uchar_t *)(src +
mad24(sy + i / SEARCH_SIZE, src_step, mad24(cn, sx + i % SEARCH_SIZE, src_offset)));
__global const uchar_t * src_current = (__global const uchar_t *)(src + mad24(y, src_step, mad24(cn, x, src_offset)));
__global const pixel_t * src_template = (__global const pixel_t *)(src +
mad24(sy + i / SEARCH_SIZE, src_step, mad24(psz, sx + i % SEARCH_SIZE, src_offset)));
__global const pixel_t * src_current = (__global const pixel_t *)(src + mad24(y, src_step, mad24(psz, x, src_offset)));
__global int * col_dists_current = col_dists + i * TEMPLATE_SIZE;
#pragma unroll
@ -107,8 +128,8 @@ inline void calcFirstElementInRow(__global const uchar * src, int src_step, int
dist += value;
}
src_current = (__global const uchar_t *)((__global const uchar *)src_current + src_step);
src_template = (__global const uchar_t *)((__global const uchar *)src_template + src_step);
src_current = (__global const pixel_t *)((__global const uchar *)src_current + src_step);
src_template = (__global const pixel_t *)((__global const uchar *)src_template + src_step);
}
#pragma unroll
@ -130,9 +151,9 @@ inline void calcElementInFirstRow(__global const uchar * src, int src_step, int
for (int i = id; i < SEARCH_SIZE_SQ; i += CTA_SIZE)
{
__global const uchar_t * src_current = (__global const uchar_t *)(src + mad24(y, src_step, mad24(cn, x, src_offset)));
__global const uchar_t * src_template = (__global const uchar_t *)(src +
mad24(sy + i / SEARCH_SIZE, src_step, mad24(cn, sx + i % SEARCH_SIZE, src_offset)));
__global const pixel_t * src_current = (__global const pixel_t *)(src + mad24(y, src_step, mad24(psz, x, src_offset)));
__global const pixel_t * src_template = (__global const pixel_t *)(src +
mad24(sy + i / SEARCH_SIZE, src_step, mad24(psz, sx + i % SEARCH_SIZE, src_offset)));
__global int * col_dists_current = col_dists + TEMPLATE_SIZE * i;
int col_dist = 0;
@ -142,8 +163,8 @@ inline void calcElementInFirstRow(__global const uchar * src, int src_step, int
{
col_dist += calcDist(src_current[0], src_template[0]);
src_current = (__global const uchar_t *)((__global const uchar *)src_current + src_step);
src_template = (__global const uchar_t *)((__global const uchar *)src_template + src_step);
src_current = (__global const pixel_t *)((__global const uchar *)src_current + src_step);
src_template = (__global const pixel_t *)((__global const uchar *)src_template + src_step);
}
dists[i] += col_dist - col_dists_current[first];
@ -160,8 +181,8 @@ inline void calcElement(__global const uchar * src, int src_step, int src_offset
int sy_up = y - TEMPLATE_SIZE2 - 1;
int sy_down = y + TEMPLATE_SIZE2;
uchar_t up_value = *(__global const uchar_t *)(src + mad24(sy_up, src_step, mad24(cn, sx, src_offset)));
uchar_t down_value = *(__global const uchar_t *)(src + mad24(sy_down, src_step, mad24(cn, sx, src_offset)));
pixel_t up_value = *(__global const pixel_t *)(src + mad24(sy_up, src_step, mad24(psz, sx, src_offset)));
pixel_t down_value = *(__global const pixel_t *)(src + mad24(sy_down, src_step, mad24(psz, sx, src_offset)));
sx -= SEARCH_SIZE2;
sy_up -= SEARCH_SIZE2;
@ -171,8 +192,8 @@ inline void calcElement(__global const uchar * src, int src_step, int src_offset
{
int wx = i % SEARCH_SIZE, wy = i / SEARCH_SIZE;
uchar_t up_value_t = *(__global const uchar_t *)(src + mad24(sy_up + wy, src_step, mad24(cn, sx + wx, src_offset)));
uchar_t down_value_t = *(__global const uchar_t *)(src + mad24(sy_down + wy, src_step, mad24(cn, sx + wx, src_offset)));
pixel_t up_value_t = *(__global const pixel_t *)(src + mad24(sy_up + wy, src_step, mad24(psz, sx + wx, src_offset)));
pixel_t down_value_t = *(__global const pixel_t *)(src + mad24(sy_down + wy, src_step, mad24(psz, sx + wx, src_offset)));
__global int * col_dists_current = col_dists + mad24(i, TEMPLATE_SIZE, first);
__global int * up_col_dists_current = up_col_dists + mad24(x0, SEARCH_SIZE_SQ, i);
@ -186,24 +207,25 @@ inline void calcElement(__global const uchar * src, int src_step, int src_offset
}
inline void convolveWindow(__global const uchar * src, int src_step, int src_offset,
__local int * dists, __global const int * almostDist2Weight,
__local int * dists, __global const wlut_t * almostDist2Weight,
__global uchar * dst, int dst_step, int dst_offset,
int y, int x, int id, __local int * weights_local,
__local int_t * weighted_sum_local, int almostTemplateWindowSizeSqBinShift)
int y, int x, int id, __local weight_t * weights_local,
__local sum_t * weighted_sum_local, int almostTemplateWindowSizeSqBinShift)
{
int sx = x - SEARCH_SIZE2, sy = y - SEARCH_SIZE2, weights = 0;
int_t weighted_sum = (int_t)(0);
int sx = x - SEARCH_SIZE2, sy = y - SEARCH_SIZE2;
weight_t weights = (weight_t)0;
sum_t weighted_sum = (sum_t)0;
for (int i = id; i < SEARCH_SIZE_SQ; i += CTA_SIZE)
{
int src_index = mad24(sy + i / SEARCH_SIZE, src_step, mad24(i % SEARCH_SIZE + sx, cn, src_offset));
int_t src_value = convert_int_t(*(__global const uchar_t *)(src + src_index));
int src_index = mad24(sy + i / SEARCH_SIZE, src_step, mad24(i % SEARCH_SIZE + sx, psz, src_offset));
sum_t src_value = convert_sum_t(*(__global const pixel_t *)(src + src_index));
int almostAvgDist = dists[i] >> almostTemplateWindowSizeSqBinShift;
int weight = almostDist2Weight[almostAvgDist];
weight_t weight = convert_weight_t(almostDist2Weight[almostAvgDist]);
weights += weight;
weighted_sum += (int_t)(weight) * src_value;
weighted_sum += (sum_t)weight * src_value;
}
weights_local[id] = weights;
@ -223,26 +245,27 @@ inline void convolveWindow(__global const uchar * src, int src_step, int src_off
if (id == 0)
{
int dst_index = mad24(y, dst_step, mad24(cn, x, dst_offset));
int_t weighted_sum_local_0 = weighted_sum_local[0] + weighted_sum_local[1] +
int dst_index = mad24(y, dst_step, mad24(psz, x, dst_offset));
sum_t weighted_sum_local_0 = weighted_sum_local[0] + weighted_sum_local[1] +
weighted_sum_local[2] + weighted_sum_local[3];
int weights_local_0 = weights_local[0] + weights_local[1] + weights_local[2] + weights_local[3];
weight_t weights_local_0 = weights_local[0] + weights_local[1] + weights_local[2] + weights_local[3];
*(__global uchar_t *)(dst + dst_index) = convert_uchar_t(weighted_sum_local_0 / (int_t)(weights_local_0));
*(__global pixel_t *)(dst + dst_index) = convert_pixel_t(weighted_sum_local_0 / (sum_t)weights_local_0);
}
}
__kernel void fastNlMeansDenoising(__global const uchar * src, int src_step, int src_offset,
__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,
__global const int * almostDist2Weight, __global uchar * buffer,
__global const wlut_t * almostDist2Weight, __global uchar * buffer,
int almostTemplateWindowSizeSqBinShift)
{
int block_x = get_group_id(0), nblocks_x = get_num_groups(0);
int block_y = get_group_id(1);
int id = get_local_id(0), first;
__local int dists[SEARCH_SIZE_SQ], weights[CTA_SIZE];
__local int_t weighted_sum[CTA_SIZE];
__local int dists[SEARCH_SIZE_SQ];
__local weight_t weights[CTA_SIZE];
__local sum_t weighted_sum[CTA_SIZE];
int x0 = block_x * BLOCK_COLS, x1 = min(x0 + BLOCK_COLS, dst_cols);
int y0 = block_y * BLOCK_ROWS, y1 = min(y0 + BLOCK_ROWS, dst_rows);

View File

@ -13,11 +13,11 @@
namespace cvtest {
namespace ocl {
PARAM_TEST_CASE(FastNlMeansDenoisingTestBase, Channels, bool)
PARAM_TEST_CASE(FastNlMeansDenoisingTestBase, Channels, int, bool, bool)
{
int cn, templateWindowSize, searchWindowSize;
float h;
bool use_roi;
int cn, normType, templateWindowSize, searchWindowSize;
std::vector<float> h;
bool use_roi, use_image;
TEST_DECLARE_INPUT_PARAMETER(src);
TEST_DECLARE_OUTPUT_PARAMETER(dst);
@ -25,29 +25,46 @@ PARAM_TEST_CASE(FastNlMeansDenoisingTestBase, Channels, bool)
virtual void SetUp()
{
cn = GET_PARAM(0);
use_roi = GET_PARAM(1);
normType = GET_PARAM(1);
use_roi = GET_PARAM(2);
use_image = GET_PARAM(3);
templateWindowSize = 7;
searchWindowSize = 21;
h = 3.0f;
h.resize(cn);
for (int i=0; i<cn; i++)
h[i] = 3.0f + 0.5f*i;
}
virtual void generateTestData()
{
const int type = CV_8UC(cn);
Mat image;
if (cn == 1)
{
image = readImage("denoising/lena_noised_gaussian_sigma=10.png", IMREAD_GRAYSCALE);
if (use_image) {
image = readImage("denoising/lena_noised_gaussian_sigma=10.png",
cn == 1 ? IMREAD_GRAYSCALE : IMREAD_COLOR);
ASSERT_FALSE(image.empty());
}
const int type = CV_8UC(cn);
Size roiSize = cn == 1 ? image.size() : randomSize(1, MAX_VALUE);
Size roiSize = use_image ? image.size() : randomSize(1, MAX_VALUE);
Border srcBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
randomSubMat(src, src_roi, roiSize, srcBorder, type, 0, 255);
if (cn == 1)
image.copyTo(src_roi);
if (use_image) {
ASSERT_TRUE(cn > 0 && cn <= 4);
if (cn == 2) {
int from_to[] = { 0,0, 1,1 };
src_roi.create(roiSize, type);
mixChannels(&image, 1, &src_roi, 1, from_to, 2);
}
else if (cn == 4) {
int from_to[] = { 0,0, 1,1, 2,2, 1,3};
src_roi.create(roiSize, type);
mixChannels(&image, 1, &src_roi, 1, from_to, 4);
}
else image.copyTo(src_roi);
}
Border dstBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
randomSubMat(dst, dst_roi, roiSize, dstBorder, type, 0, 255);
@ -65,8 +82,23 @@ OCL_TEST_P(FastNlMeansDenoising, Mat)
{
generateTestData();
OCL_OFF(cv::fastNlMeansDenoising(src_roi, dst_roi, h, templateWindowSize, searchWindowSize));
OCL_ON(cv::fastNlMeansDenoising(usrc_roi, udst_roi, h, templateWindowSize, searchWindowSize));
OCL_OFF(cv::fastNlMeansDenoising(src_roi, dst_roi, std::vector<float>(1, h[0]), templateWindowSize, searchWindowSize, normType));
OCL_ON(cv::fastNlMeansDenoising(usrc_roi, udst_roi, std::vector<float>(1, h[0]), templateWindowSize, searchWindowSize, normType));
OCL_EXPECT_MATS_NEAR(dst, 1);
}
}
typedef FastNlMeansDenoisingTestBase FastNlMeansDenoising_hsep;
OCL_TEST_P(FastNlMeansDenoising_hsep, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
OCL_OFF(cv::fastNlMeansDenoising(src_roi, dst_roi, h, templateWindowSize, searchWindowSize, normType));
OCL_ON(cv::fastNlMeansDenoising(usrc_roi, udst_roi, h, templateWindowSize, searchWindowSize, normType));
OCL_EXPECT_MATS_NEAR(dst, 1);
}
@ -80,15 +112,21 @@ OCL_TEST_P(FastNlMeansDenoisingColored, Mat)
{
generateTestData();
OCL_OFF(cv::fastNlMeansDenoisingColored(src_roi, dst_roi, h, h, templateWindowSize, searchWindowSize));
OCL_ON(cv::fastNlMeansDenoisingColored(usrc_roi, udst_roi, h, h, templateWindowSize, searchWindowSize));
OCL_OFF(cv::fastNlMeansDenoisingColored(src_roi, dst_roi, h[0], h[0], templateWindowSize, searchWindowSize));
OCL_ON(cv::fastNlMeansDenoisingColored(usrc_roi, udst_roi, h[0], h[0], templateWindowSize, searchWindowSize));
OCL_EXPECT_MATS_NEAR(dst, 1);
}
}
OCL_INSTANTIATE_TEST_CASE_P(Photo, FastNlMeansDenoising, Combine(Values(1, 2), Bool()));
OCL_INSTANTIATE_TEST_CASE_P(Photo, FastNlMeansDenoisingColored, Combine(Values(3, 4), Bool()));
OCL_INSTANTIATE_TEST_CASE_P(Photo, FastNlMeansDenoising,
Combine(Values(1, 2, 3, 4), Values((int)NORM_L2, (int)NORM_L1),
Bool(), Values(true)));
OCL_INSTANTIATE_TEST_CASE_P(Photo, FastNlMeansDenoising_hsep,
Combine(Values(1, 2, 3, 4), Values((int)NORM_L2, (int)NORM_L1),
Bool(), Values(true)));
OCL_INSTANTIATE_TEST_CASE_P(Photo, FastNlMeansDenoisingColored,
Combine(Values(3, 4), Values((int)NORM_L2), Bool(), Values(false)));
} } // namespace cvtest::ocl

View File

@ -50,7 +50,11 @@ ocv_add_library(${the_module} SHARED ${PYTHON_SOURCE_DIR}/src2/cv2.cpp ${cv2_gen
if(PYTHON_DEBUG_LIBRARIES AND NOT PYTHON_LIBRARIES MATCHES "optimized.*debug")
ocv_target_link_libraries(${the_module} debug ${PYTHON_DEBUG_LIBRARIES} optimized ${PYTHON_LIBRARIES})
else()
ocv_target_link_libraries(${the_module} ${PYTHON_LIBRARIES})
if(APPLE)
set_target_properties(${the_module} PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
else()
ocv_target_link_libraries(${the_module} ${PYTHON_LIBRARIES})
endif()
endif()
ocv_target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS})

View File

@ -255,20 +255,24 @@ class ClassInfo(object):
self.methods = {}
self.props = []
self.consts = {}
self.base = None
customname = False
if decl:
self.bases = decl[1].split()[1:]
if len(self.bases) > 1:
bases = decl[1].split()[1:]
if len(bases) > 1:
print("Note: Class %s has more than 1 base class (not supported by Python C extensions)" % (self.name,))
print(" Bases: ", " ".join(self.bases))
print(" Bases: ", " ".join(bases))
print(" Only the first base class will be used")
self.bases = [self.bases[0].strip(",")]
#return sys.exit(-1)
if self.bases and self.bases[0].startswith("cv::"):
self.bases[0] = self.bases[0][4:]
if self.bases and self.bases[0] == "Algorithm":
self.isalgorithm = True
elif len(bases) == 1:
self.base = bases[0].strip(",")
if self.base.startswith("cv::"):
self.base = self.base[4:]
if self.base == "Algorithm":
self.isalgorithm = True
self.base = self.base.replace("::", "_")
for m in decl[2]:
if m.startswith("="):
self.wname = m[1:]
@ -285,8 +289,8 @@ class ClassInfo(object):
def gen_map_code(self, all_classes):
code = "static bool pyopencv_to(PyObject* src, %s& dst, const char* name)\n{\n PyObject* tmp;\n bool ok;\n" % (self.cname)
code += "".join([gen_template_set_prop_from_map.substitute(propname=p.name,proptype=p.tp) for p in self.props])
if self.bases:
code += "\n return pyopencv_to(src, (%s&)dst, name);\n}\n" % all_classes[self.bases[0].replace("::", "_")].cname
if self.base:
code += "\n return pyopencv_to(src, (%s&)dst, name);\n}\n" % all_classes[self.base].cname
else:
code += "\n return true;\n}\n"
return code
@ -330,8 +334,8 @@ class ClassInfo(object):
methods_inits.write(m.get_tab_entry())
baseptr = "NULL"
if self.bases and self.bases[0] in all_classes:
baseptr = "&pyopencv_" + all_classes[self.bases[0]].name + "_Type"
if self.base and self.base in all_classes:
baseptr = "&pyopencv_" + all_classes[self.base].name + "_Type"
code = gen_template_type_impl.substitute(name=self.name, wname=self.wname, cname=self.cname,
getset_code=getset_code.getvalue(), getset_inits=getset_inits.getvalue(),
@ -753,17 +757,17 @@ class PythonWrapperGenerator(object):
sys.exit(-1)
self.classes[classinfo.name] = classinfo
if classinfo.bases:
chunks = classinfo.bases[0].split('::')
if classinfo.base:
chunks = classinfo.base.split('_')
base = '_'.join(chunks)
while base not in self.classes and len(chunks)>1:
del chunks[-2]
base = '_'.join(chunks)
if base not in self.classes:
print("Generator error: unable to resolve base %s for %s"
% (classinfo.bases[0], classinfo.name))
% (classinfo.base, classinfo.name))
sys.exit(-1)
classinfo.bases[0] = "::".join(chunks)
classinfo.base = base
classinfo.isalgorithm |= self.classes[base].isalgorithm
def split_decl_name(self, name):

View File

@ -145,6 +145,16 @@ class Hackathon244Tests(NewOpenCVTests):
self.check_close_pairs(mc, mc0, 5)
self.assertLessEqual(abs(mr - mr0), 5)
def test_inheritance(self):
bm = cv2.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv2.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='run OpenCV python tests')
parser.add_argument('--repo', help='use sample image files from local git repository (path to folder), '

View File

@ -105,34 +105,64 @@ namespace cv
virtual void collectGarbage();
//! @brief Scale factor
CV_PURE_PROPERTY(int, Scale)
/** @see setScale */
virtual int getScale() const = 0;
/** @copybrief getScale @see getScale */
virtual void setScale(int val) = 0;
//! @brief Iterations count
CV_PURE_PROPERTY(int, Iterations)
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
//! @brief Asymptotic value of steepest descent method
CV_PURE_PROPERTY(double, Tau)
/** @see setTau */
virtual double getTau() const = 0;
/** @copybrief getTau @see getTau */
virtual void setTau(double val) = 0;
//! @brief Weight parameter to balance data term and smoothness term
CV_PURE_PROPERTY(double, Labmda)
/** @see setLabmda */
virtual double getLabmda() const = 0;
/** @copybrief getLabmda @see getLabmda */
virtual void setLabmda(double val) = 0;
//! @brief Parameter of spacial distribution in Bilateral-TV
CV_PURE_PROPERTY(double, Alpha)
/** @see setAlpha */
virtual double getAlpha() const = 0;
/** @copybrief getAlpha @see getAlpha */
virtual void setAlpha(double val) = 0;
//! @brief Kernel size of Bilateral-TV filter
CV_PURE_PROPERTY(int, KernelSize)
/** @see setKernelSize */
virtual int getKernelSize() const = 0;
/** @copybrief getKernelSize @see getKernelSize */
virtual void setKernelSize(int val) = 0;
//! @brief Gaussian blur kernel size
CV_PURE_PROPERTY(int, BlurKernelSize)
/** @see setBlurKernelSize */
virtual int getBlurKernelSize() const = 0;
/** @copybrief getBlurKernelSize @see getBlurKernelSize */
virtual void setBlurKernelSize(int val) = 0;
//! @brief Gaussian blur sigma
CV_PURE_PROPERTY(double, BlurSigma)
/** @see setBlurSigma */
virtual double getBlurSigma() const = 0;
/** @copybrief getBlurSigma @see getBlurSigma */
virtual void setBlurSigma(double val) = 0;
//! @brief Radius of the temporal search area
CV_PURE_PROPERTY(int, TemporalAreaRadius)
/** @see setTemporalAreaRadius */
virtual int getTemporalAreaRadius() const = 0;
/** @copybrief getTemporalAreaRadius @see getTemporalAreaRadius */
virtual void setTemporalAreaRadius(int val) = 0;
//! @brief Dense optical flow algorithm
CV_PURE_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow)
/** @see setOpticalFlow */
virtual Ptr<cv::superres::DenseOpticalFlowExt> getOpticalFlow() const = 0;
/** @copybrief getOpticalFlow @see getOpticalFlow */
virtual void setOpticalFlow(const Ptr<cv::superres::DenseOpticalFlowExt> &val) = 0;
protected:
SuperResolution();

View File

@ -64,13 +64,34 @@ namespace cv
class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
CV_PURE_PROPERTY(double, PyrScale)
CV_PURE_PROPERTY(int, LevelsNumber)
CV_PURE_PROPERTY(int, WindowSize)
CV_PURE_PROPERTY(int, Iterations)
CV_PURE_PROPERTY(int, PolyN)
CV_PURE_PROPERTY(double, PolySigma)
CV_PURE_PROPERTY(int, Flags)
/** @see setPyrScale */
virtual double getPyrScale() const = 0;
/** @copybrief getPyrScale @see getPyrScale */
virtual void setPyrScale(double val) = 0;
/** @see setLevelsNumber */
virtual int getLevelsNumber() const = 0;
/** @copybrief getLevelsNumber @see getLevelsNumber */
virtual void setLevelsNumber(int val) = 0;
/** @see setWindowSize */
virtual int getWindowSize() const = 0;
/** @copybrief getWindowSize @see getWindowSize */
virtual void setWindowSize(int val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
/** @see setPolyN */
virtual int getPolyN() const = 0;
/** @copybrief getPolyN @see getPolyN */
virtual void setPolyN(int val) = 0;
/** @see setPolySigma */
virtual double getPolySigma() const = 0;
/** @copybrief getPolySigma @see getPolySigma */
virtual void setPolySigma(double val) = 0;
/** @see setFlags */
virtual int getFlags() const = 0;
/** @copybrief getFlags @see getFlags */
virtual void setFlags(int val) = 0;
};
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback();
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback_CUDA();
@ -82,14 +103,38 @@ namespace cv
class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt
{
public:
CV_PURE_PROPERTY(double, Tau)
CV_PURE_PROPERTY(double, Lambda)
CV_PURE_PROPERTY(double, Theta)
CV_PURE_PROPERTY(int, ScalesNumber)
CV_PURE_PROPERTY(int, WarpingsNumber)
CV_PURE_PROPERTY(double, Epsilon)
CV_PURE_PROPERTY(int, Iterations)
CV_PURE_PROPERTY(bool, UseInitialFlow)
/** @see setTau */
virtual double getTau() const = 0;
/** @copybrief getTau @see getTau */
virtual void setTau(double val) = 0;
/** @see setLambda */
virtual double getLambda() const = 0;
/** @copybrief getLambda @see getLambda */
virtual void setLambda(double val) = 0;
/** @see setTheta */
virtual double getTheta() const = 0;
/** @copybrief getTheta @see getTheta */
virtual void setTheta(double val) = 0;
/** @see setScalesNumber */
virtual int getScalesNumber() const = 0;
/** @copybrief getScalesNumber @see getScalesNumber */
virtual void setScalesNumber(int val) = 0;
/** @see setWarpingsNumber */
virtual int getWarpingsNumber() const = 0;
/** @copybrief getWarpingsNumber @see getWarpingsNumber */
virtual void setWarpingsNumber(int val) = 0;
/** @see setEpsilon */
virtual double getEpsilon() const = 0;
/** @copybrief getEpsilon @see getEpsilon */
virtual void setEpsilon(double val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
/** @see setUseInitialFlow */
virtual bool getUseInitialFlow() const = 0;
/** @copybrief getUseInitialFlow @see getUseInitialFlow */
virtual void setUseInitialFlow(bool val) = 0;
};
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1_CUDA();
@ -99,17 +144,35 @@ namespace cv
{
public:
//! @brief Flow smoothness
CV_PURE_PROPERTY(double, Alpha)
/** @see setAlpha */
virtual double getAlpha() const = 0;
/** @copybrief getAlpha @see getAlpha */
virtual void setAlpha(double val) = 0;
//! @brief Gradient constancy importance
CV_PURE_PROPERTY(double, Gamma)
/** @see setGamma */
virtual double getGamma() const = 0;
/** @copybrief getGamma @see getGamma */
virtual void setGamma(double val) = 0;
//! @brief Pyramid scale factor
CV_PURE_PROPERTY(double, ScaleFactor)
/** @see setScaleFactor */
virtual double getScaleFactor() const = 0;
/** @copybrief getScaleFactor @see getScaleFactor */
virtual void setScaleFactor(double val) = 0;
//! @brief Number of lagged non-linearity iterations (inner loop)
CV_PURE_PROPERTY(int, InnerIterations)
/** @see setInnerIterations */
virtual int getInnerIterations() const = 0;
/** @copybrief getInnerIterations @see getInnerIterations */
virtual void setInnerIterations(int val) = 0;
//! @brief Number of warping iterations (number of pyramid levels)
CV_PURE_PROPERTY(int, OuterIterations)
/** @see setOuterIterations */
virtual int getOuterIterations() const = 0;
/** @copybrief getOuterIterations @see getOuterIterations */
virtual void setOuterIterations(int val) = 0;
//! @brief Number of linear system solver iterations
CV_PURE_PROPERTY(int, SolverIterations)
/** @see setSolverIterations */
virtual int getSolverIterations() const = 0;
/** @copybrief getSolverIterations @see getSolverIterations */
virtual void setSolverIterations(int val) = 0;
};
CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();
@ -117,9 +180,18 @@ namespace cv
class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
CV_PURE_PROPERTY(int, WindowSize)
CV_PURE_PROPERTY(int, MaxLevel)
CV_PURE_PROPERTY(int, Iterations)
/** @see setWindowSize */
virtual int getWindowSize() const = 0;
/** @copybrief getWindowSize @see getWindowSize */
virtual void setWindowSize(int val) = 0;
/** @see setMaxLevel */
virtual int getMaxLevel() const = 0;
/** @copybrief getMaxLevel @see getMaxLevel */
virtual void setMaxLevel(int val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
};
CV_EXPORTS Ptr<PyrLKOpticalFlow> createOptFlow_PyrLK_CUDA();

View File

@ -850,12 +850,51 @@ def getRunArgs(args):
path = npath
return run_args
if hostos == "nt":
def moveTests(instance, destination):
src = os.path.dirname(instance.tests_dir)
# new binaries path
newBinPath = os.path.join(destination, "bin")
try:
# copy binaries and CMakeCache.txt to the specified destination
shutil.copytree(src, newBinPath)
shutil.copy(os.path.join(instance.path, "CMakeCache.txt"), os.path.join(destination, "CMakeCache.txt"))
except Exception, e:
print "Copying error occurred:", str(e)
exit(e.errno)
# pattern of CMakeCache.txt string to be replaced
replacePattern = re.compile("EXECUTABLE_OUTPUT_PATH:PATH=(.+)")
with open(os.path.join(destination, "CMakeCache.txt"), "r") as cachefile:
try:
cachedata = cachefile.read()
if hostos == 'nt':
# fix path slashes on nt systems
newBinPath = re.sub(r"\\", r"/", newBinPath)
# replace old binaries path in CMakeCache.txt
cachedata = re.sub(re.search(replacePattern, cachedata).group(1), newBinPath, cachedata)
except Exception, e:
print "Reading error occurred:", str(e)
exit(e.errno)
with open(os.path.join(destination, "CMakeCache.txt"), "w") as cachefile:
try:
cachefile.write(cachedata)
except Exception, e:
print "Writing error occurred:", str(e)
exit(e.errno)
exit()
if __name__ == "__main__":
test_args = [a for a in sys.argv if a.startswith("--perf_") or a.startswith("--gtest_")]
argv = [a for a in sys.argv if not(a.startswith("--perf_") or a.startswith("--gtest_"))]
parser = OptionParser()
parser = OptionParser(usage="run.py [options] [build_path]", description="Note: build_path is required if running not from CMake build directory")
parser.add_option("-t", "--tests", dest="tests", help="comma-separated list of modules to test", metavar="SUITS", default="")
if hostos == "nt":
parser.add_option("-m", "--move_tests", dest="move", help="location to move current tests build", metavar="PATH", default="")
parser.add_option("-w", "--cwd", dest="cwd", help="working directory for tests", metavar="PATH", default=".")
parser.add_option("-a", "--accuracy", dest="accuracy", help="look for accuracy tests instead of performance tests", action="store_true", default=False)
parser.add_option("-l", "--longname", dest="useLongNames", action="store_true", help="generate log files with long names", default=False)
@ -880,6 +919,7 @@ if __name__ == "__main__":
if len(run_args) == 0:
print >> sys.stderr, "Usage:", os.path.basename(sys.argv[0]), "[options] [build_path]"
print >> sys.stderr, "Please specify build_path or run script from CMake build directory"
exit(1)
options.android_env = {}
@ -906,6 +946,10 @@ if __name__ == "__main__":
test_list = []
for path in run_args:
suite = TestSuite(options, path)
if hostos == "nt":
if(options.move):
moveTests(suite, options.move)
#print vars(suite),"\n"
if options.list:
test_list.extend(suite.tests)

View File

@ -441,29 +441,65 @@ class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
{
public:
//! @brief Time step of the numerical scheme
CV_PURE_PROPERTY(double, Tau)
/** @see setTau */
virtual double getTau() const = 0;
/** @copybrief getTau @see getTau */
virtual void setTau(double val) = 0;
//! @brief Weight parameter for the data term, attachment parameter
CV_PURE_PROPERTY(double, Lambda)
/** @see setLambda */
virtual double getLambda() const = 0;
/** @copybrief getLambda @see getLambda */
virtual void setLambda(double val) = 0;
//! @brief Weight parameter for (u - v)^2, tightness parameter
CV_PURE_PROPERTY(double, Theta)
/** @see setTheta */
virtual double getTheta() const = 0;
/** @copybrief getTheta @see getTheta */
virtual void setTheta(double val) = 0;
//! @brief coefficient for additional illumination variation term
CV_PURE_PROPERTY(double, Gamma)
/** @see setGamma */
virtual double getGamma() const = 0;
/** @copybrief getGamma @see getGamma */
virtual void setGamma(double val) = 0;
//! @brief Number of scales used to create the pyramid of images
CV_PURE_PROPERTY(int, ScalesNumber)
/** @see setScalesNumber */
virtual int getScalesNumber() const = 0;
/** @copybrief getScalesNumber @see getScalesNumber */
virtual void setScalesNumber(int val) = 0;
//! @brief Number of warpings per scale
CV_PURE_PROPERTY(int, WarpingsNumber)
/** @see setWarpingsNumber */
virtual int getWarpingsNumber() const = 0;
/** @copybrief getWarpingsNumber @see getWarpingsNumber */
virtual void setWarpingsNumber(int val) = 0;
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
CV_PURE_PROPERTY(double, Epsilon)
/** @see setEpsilon */
virtual double getEpsilon() const = 0;
/** @copybrief getEpsilon @see getEpsilon */
virtual void setEpsilon(double val) = 0;
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
CV_PURE_PROPERTY(int, InnerIterations)
/** @see setInnerIterations */
virtual int getInnerIterations() const = 0;
/** @copybrief getInnerIterations @see getInnerIterations */
virtual void setInnerIterations(int val) = 0;
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
CV_PURE_PROPERTY(int, OuterIterations)
/** @see setOuterIterations */
virtual int getOuterIterations() const = 0;
/** @copybrief getOuterIterations @see getOuterIterations */
virtual void setOuterIterations(int val) = 0;
//! @brief Use initial flow
CV_PURE_PROPERTY(bool, UseInitialFlow)
/** @see setUseInitialFlow */
virtual bool getUseInitialFlow() const = 0;
/** @copybrief getUseInitialFlow @see getUseInitialFlow */
virtual void setUseInitialFlow(bool val) = 0;
//! @brief Step between scales (<1)
CV_PURE_PROPERTY(double, ScaleStep)
/** @see setScaleStep */
virtual double getScaleStep() const = 0;
/** @copybrief getScaleStep @see getScaleStep */
virtual void setScaleStep(double val) = 0;
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
CV_PURE_PROPERTY(int, MedianFiltering)
/** @see setMedianFiltering */
virtual int getMedianFiltering() const = 0;
/** @copybrief getMedianFiltering @see getMedianFiltering */
virtual void setMedianFiltering(int val) = 0;
};
/** @brief Creates instance of cv::DenseOpticalFlow

View File

@ -67,5 +67,5 @@ PERF_TEST_P(TransformationType, findTransformECC, /*testing::ValuesIn(MotionType
findTransformECC(templateImage, img, warpMat, transform_type,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 5, -1));
}
SANITY_CHECK(warpMat, 1e-3);
SANITY_CHECK(warpMat, 3e-3);
}

View File

@ -465,6 +465,7 @@ double cv::findTransformECC(InputArray templateImage,
meanStdDev(templateFloat, tmpMean, tmpStd, imageMask);
subtract(imageWarped, imgMean, imageWarped, imageMask);//zero-mean input
templateZM = Mat::zeros(templateZM.rows, templateZM.cols, templateZM.type());
subtract(templateFloat, tmpMean, templateZM, imageMask);//zero-mean template
const double tmpNorm = std::sqrt(countNonZero(imageMask)*(tmpStd.val[0])*(tmpStd.val[0]));

View File

@ -27,6 +27,8 @@ set(videoio_hdrs
set(videoio_srcs
${CMAKE_CURRENT_LIST_DIR}/src/cap.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_images.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_mjpeg_encoder.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_mjpeg_decoder.cpp
)
file(GLOB videoio_ext_hdrs

View File

@ -110,8 +110,7 @@ enum { CAP_PROP_POS_MSEC =0,
CAP_PROP_CONVERT_RGB =16,
CAP_PROP_WHITE_BALANCE_BLUE_U =17,
CAP_PROP_RECTIFICATION =18,
CAP_PROP_MONOCROME =19,
CAP_PROP_MONOCHROME =CAP_PROP_MONOCROME,
CAP_PROP_MONOCHROME =19,
CAP_PROP_SHARPNESS =20,
CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature
CAP_PROP_GAMMA =22,
@ -217,7 +216,8 @@ enum { CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast ma
CAP_PROP_PVAPI_DECIMATIONHORIZONTAL = 302, // Horizontal sub-sampling of the image
CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, // Vertical sub-sampling of the image
CAP_PROP_PVAPI_BINNINGX = 304, // Horizontal binning factor
CAP_PROP_PVAPI_BINNINGY = 305 // Vertical binning factor
CAP_PROP_PVAPI_BINNINGY = 305, // Vertical binning factor
CAP_PROP_PVAPI_PIXELFORMAT = 306 // Pixel format
};
// PVAPI: FrameStartTriggerMode
@ -235,6 +235,17 @@ enum { CAP_PVAPI_DECIMATION_OFF = 1, // Off
CAP_PVAPI_DECIMATION_2OUTOF16 = 8 // 2 out of 16 decimation
};
// PVAPI: PixelFormat
enum { CAP_PVAPI_PIXELFORMAT_MONO8 = 1, // Mono8
CAP_PVAPI_PIXELFORMAT_MONO16 = 2, // Mono16
CAP_PVAPI_PIXELFORMAT_BAYER8 = 3, // Bayer8
CAP_PVAPI_PIXELFORMAT_BAYER16 = 4, // Bayer16
CAP_PVAPI_PIXELFORMAT_RGB24 = 5, // Rgb24
CAP_PVAPI_PIXELFORMAT_BGR24 = 6, // Bgr24
CAP_PVAPI_PIXELFORMAT_RGBA32 = 7, // Rgba32
CAP_PVAPI_PIXELFORMAT_BGRA32 = 8, // Bgra32
};
// Properties of cameras available through XIMEA SDK interface
enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
@ -365,6 +376,9 @@ enum { CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integ
CAP_INTELPERC_IMAGE = 3
};
enum { VIDEOWRITER_PROP_QUALITY = 1, // Quality (0..100%) of the videostream encoded
VIDEOWRITER_PROP_FRAMEBYTES = 2, // (Read-only): Size of just encoded video frame
};
class IVideoCapture;
@ -575,10 +589,10 @@ public:
protected:
Ptr<CvCapture> cap;
Ptr<IVideoCapture> icap;
private:
static Ptr<IVideoCapture> createCameraCapture(int index);
};
class IVideoWriter;
/** @brief Video writer class.
*/
class CV_EXPORTS_W VideoWriter
@ -631,6 +645,25 @@ public:
*/
CV_WRAP virtual void write(const Mat& image);
/** @brief Sets a property in the VideoWriter.
@param propId Property identifier. It can be one of the following:
- **VIDEOWRITER_PROP_QUALITY** Quality (0..100%) of the videostream encoded. Can be adjusted dynamically in some codecs.
@param value Value of the property.
*/
CV_WRAP virtual bool set(int propId, double value);
/** @brief Returns the specified VideoWriter property
@param propId Property identifier. It can be one of the following:
- **VIDEOWRITER_PROP_QUALITY** Current quality of the encoded videostream.
- **VIDEOWRITER_PROP_FRAMEBYTES** (Read-only) Size of just encoded video frame; note that the encoding order may be different from representation order.
**Note**: When querying a property that is not supported by the backend used by the VideoWriter
class, value 0 is returned.
*/
CV_WRAP virtual double get(int propId) const;
/** @brief Concatenates 4 chars to a fourcc code
This static method constructs the fourcc code of the codec to be used in the constructor
@ -640,6 +673,10 @@ public:
protected:
Ptr<CvVideoWriter> writer;
Ptr<IVideoWriter> iwriter;
static Ptr<IVideoWriter> create(const String& filename, int fourcc, double fps,
Size frameSize, bool isColor = true);
};
template<> CV_EXPORTS void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const;

View File

@ -160,7 +160,6 @@ enum
CV_CAP_PROP_CONVERT_RGB =16,
CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17,
CV_CAP_PROP_RECTIFICATION =18,
CV_CAP_PROP_MONOCROME =19,
CV_CAP_PROP_MONOCHROME =19,
CV_CAP_PROP_SHARPNESS =20,
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
@ -227,6 +226,7 @@ enum
CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, // Vertical sub-sampling of the image
CV_CAP_PROP_PVAPI_BINNINGX = 304, // Horizontal binning factor
CV_CAP_PROP_PVAPI_BINNINGY = 305, // Vertical binning factor
CV_CAP_PROP_PVAPI_PIXELFORMAT = 306, // Pixel format
// Properties of cameras available through XIMEA SDK interface
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.

View File

@ -499,6 +499,67 @@ CV_IMPL void cvReleaseVideoWriter( CvVideoWriter** pwriter )
namespace cv
{
static Ptr<IVideoCapture> IVideoCapture_create(int index)
{
int domains[] =
{
#ifdef HAVE_DSHOW
CV_CAP_DSHOW,
#endif
#ifdef HAVE_INTELPERC
CV_CAP_INTELPERC,
#endif
-1, -1
};
// interpret preferred interface (0 = autodetect)
int pref = (index / 100) * 100;
if (pref)
{
domains[0]=pref;
index %= 100;
domains[1]=-1;
}
// try every possibly installed camera API
for (int i = 0; domains[i] >= 0; i++)
{
#if defined(HAVE_DSHOW) || \
defined(HAVE_INTELPERC) || \
(0)
Ptr<IVideoCapture> capture;
switch (domains[i])
{
#ifdef HAVE_DSHOW
case CV_CAP_DSHOW:
capture = makePtr<VideoCapture_DShow>(index);
break; // CV_CAP_DSHOW
#endif
#ifdef HAVE_INTELPERC
case CV_CAP_INTELPERC:
capture = makePtr<VideoCapture_IntelPerC>();
break; // CV_CAP_INTEL_PERC
#endif
}
if (capture && capture->isOpened())
return capture;
#endif
}
// failed open a camera
return Ptr<IVideoCapture>();
}
static Ptr<IVideoWriter> IVideoWriter_create(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{
Ptr<IVideoWriter> iwriter;
if( _fourcc == CV_FOURCC('M', 'J', 'P', 'G') )
iwriter = createMotionJpegWriter(filename, fps, frameSize, isColor);
return iwriter;
}
VideoCapture::VideoCapture()
{}
@ -528,7 +589,7 @@ bool VideoCapture::open(const String& filename)
bool VideoCapture::open(int device)
{
if (isOpened()) release();
icap = createCameraCapture(device);
icap = IVideoCapture_create(device);
if (!icap.empty())
return true;
cap.reset(cvCreateCameraCapture(device));
@ -609,59 +670,6 @@ double VideoCapture::get(int propId) const
return icvGetCaptureProperty(cap, propId);
}
Ptr<IVideoCapture> VideoCapture::createCameraCapture(int index)
{
int domains[] =
{
#ifdef HAVE_DSHOW
CV_CAP_DSHOW,
#endif
#ifdef HAVE_INTELPERC
CV_CAP_INTELPERC,
#endif
-1, -1
};
// interpret preferred interface (0 = autodetect)
int pref = (index / 100) * 100;
if (pref)
{
domains[0]=pref;
index %= 100;
domains[1]=-1;
}
// try every possibly installed camera API
for (int i = 0; domains[i] >= 0; i++)
{
#if defined(HAVE_DSHOW) || \
defined(HAVE_INTELPERC) || \
(0)
Ptr<IVideoCapture> capture;
switch (domains[i])
{
#ifdef HAVE_DSHOW
case CV_CAP_DSHOW:
capture = makePtr<VideoCapture_DShow>(index);
if (capture && capture.dynamicCast<VideoCapture_DShow>()->isOpened())
return capture;
break; // CV_CAP_DSHOW
#endif
#ifdef HAVE_INTELPERC
case CV_CAP_INTELPERC:
capture = makePtr<VideoCapture_IntelPerC>();
if (capture && capture.dynamicCast<VideoCapture_IntelPerC>()->isOpened())
return capture;
break; // CV_CAP_INTEL_PERC
#endif
}
#endif
}
// failed open a camera
return Ptr<IVideoCapture>();
}
VideoWriter::VideoWriter()
{}
@ -673,6 +681,7 @@ VideoWriter::VideoWriter(const String& filename, int _fourcc, double fps, Size f
void VideoWriter::release()
{
iwriter.release();
writer.release();
}
@ -683,19 +692,43 @@ VideoWriter::~VideoWriter()
bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{
if (isOpened()) release();
iwriter = IVideoWriter_create(filename, _fourcc, fps, frameSize, isColor);
if (!iwriter.empty())
return true;
writer.reset(cvCreateVideoWriter(filename.c_str(), _fourcc, fps, frameSize, isColor));
return isOpened();
}
bool VideoWriter::isOpened() const
{
return !writer.empty();
return !iwriter.empty() || !writer.empty();
}
bool VideoWriter::set(int propId, double value)
{
if (!iwriter.empty())
return iwriter->setProperty(propId, value);
return false;
}
double VideoWriter::get(int propId) const
{
if (!iwriter.empty())
return iwriter->getProperty(propId);
return 0.;
}
void VideoWriter::write(const Mat& image)
{
IplImage _img = image;
cvWriteFrame(writer, &_img);
if( iwriter )
iwriter->write(image);
else
{
IplImage _img = image;
cvWriteFrame(writer, &_img);
}
}
VideoWriter& VideoWriter::operator << (const Mat& image)

Some files were not shown because too many files have changed in this diff Show More